1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7 #include <string.h>
8
9 #include <hypconstants.h>
10
11 #include <atomic.h>
12 #include <bitmap.h>
13 #include <compiler.h>
14 #include <cpulocal.h>
15 #include <hyp_aspace.h>
16 #include <panic.h>
17 #include <partition.h>
18 #include <platform_mem.h>
19 #include <thread.h>
20 #include <trace.h>
21 #include <util.h>
22
23 #include <asm/cache.h>
24 #include <asm/cpu.h>
25 #include <asm/prefetch.h>
26 #include <asm/timestamp.h>
27
28 #include "event_handlers.h"
29 #include "trace_helpers.h"
30
31 static_assert((uintmax_t)PLATFORM_MAX_CORES <
32 ((uintmax_t)1 << TRACE_INFO_CPU_ID_BITS),
33 "CPU-ID does not fit in info");
34 static_assert((uintmax_t)ENUM_TRACE_ID_MAX_VALUE <
35 ((uintmax_t)1 << TRACE_TAG_TRACE_ID_BITS),
36 "Trace ID does not fit in tag");
37 static_assert(TRACE_BUFFER_ENTRY_SIZE == TRACE_BUFFER_HEADER_SIZE,
38 "Trace header should be the same size as an entry");
39
40 trace_control_t hyp_trace = { .magic = TRACE_MAGIC, .version = TRACE_VERSION };
41 register_t trace_public_class_flags;
42
43 extern trace_buffer_header_t trace_boot_buffer;
44
45 CPULOCAL_DECLARE_STATIC(trace_buffer_header_t *, trace_buffer);
46 static trace_buffer_header_t *trace_buffer_global;
47
48 // Tracing API
49 //
50 // A set of function help to log trace easily. The macro TRACE can help to
51 // construct the correct parameter to call the API.
52
53 static void
trace_init_common(partition_t * partition,void * base,size_t size,count_t buffer_count,trace_buffer_header_t * tbuffers[])54 trace_init_common(partition_t *partition, void *base, size_t size,
55 count_t buffer_count, trace_buffer_header_t *tbuffers[])
56 {
57 count_t global_entries, local_entries;
58
59 assert(size != 0U);
60 assert(base != NULL);
61 assert(buffer_count != 0U);
62
63 if (buffer_count == 1U) {
64 // Allocate all the area to the global buffer
65 global_entries =
66 (count_t)(size / (size_t)TRACE_BUFFER_ENTRY_SIZE);
67 local_entries = 0;
68 } else {
69 // Ensure the count is one global buffer + one per each CPU
70 assert(buffer_count == TRACE_BUFFER_NUM);
71 // Ensure the size left for the global buffer is at least equal
72 // to the size reserved for each local buffer
73 assert(size >= (PER_CPU_TRACE_ENTRIES *
74 TRACE_BUFFER_ENTRY_SIZE * TRACE_BUFFER_NUM));
75 global_entries =
76 (count_t)((size / (size_t)TRACE_BUFFER_ENTRY_SIZE) -
77 ((size_t)PER_CPU_TRACE_ENTRIES *
78 PLATFORM_MAX_CORES));
79 local_entries = PER_CPU_TRACE_ENTRIES;
80 }
81
82 hyp_trace.header = (trace_buffer_header_t *)base;
83 hyp_trace.header_phys =
84 partition_virt_to_phys(partition, (uintptr_t)base);
85
86 count_t entries;
87 trace_buffer_header_t *ptr = (trace_buffer_header_t *)base;
88 for (count_t i = 0U; i < buffer_count; i++) {
89 if (i == 0U) {
90 entries = global_entries;
91 } else {
92 entries = local_entries;
93 }
94 trace_buffer_header_t *tb = ptr;
95 ptr += entries;
96
97 *tb = (trace_buffer_header_t){ 0U };
98 tb->buf_magic = TRACE_MAGIC_BUFFER;
99 tb->entries = entries - 1U;
100 tb->not_wrapped = true;
101
102 atomic_init(&tb->head, 0);
103
104 tbuffers[i] = tb;
105 }
106
107 hyp_trace.num_bufs = buffer_count;
108 // Total size of the trace buffer, in units of 64 bytes.
109 hyp_trace.area_size_64 = (uint32_t)(size / 64U);
110 }
111
112 void
trace_boot_init(void)113 trace_boot_init(void)
114 {
115 register_t flags = 0U;
116
117 hyp_trace.flags = trace_control_flags_default();
118 trace_control_flags_set_format(&hyp_trace.flags,
119 (trace_format_t)TRACE_FORMAT);
120
121 // Default to enable trace buffer and error traces
122 TRACE_SET_CLASS(flags, ERROR);
123 #if !defined(NDEBUG)
124 TRACE_SET_CLASS(flags, INFO);
125 #if !defined(UNITTESTS) || !UNITTESTS
126 TRACE_SET_CLASS(flags, USER);
127 #endif
128 #endif
129 #if defined(VERBOSE_TRACE) && VERBOSE_TRACE
130 TRACE_SET_CLASS(flags, DEBUG);
131 #endif
132 atomic_init(&hyp_trace.enabled_class_flags, flags);
133
134 // Setup internal flags that cannot be changed by hypercalls
135 trace_public_class_flags = ~(register_t)0;
136 TRACE_CLEAR_CLASS(trace_public_class_flags, ERROR);
137 TRACE_CLEAR_CLASS(trace_public_class_flags, LOG_BUFFER);
138
139 trace_init_common(
140 partition_get_private(), &trace_boot_buffer,
141 ((size_t)TRACE_BOOT_ENTRIES * (size_t)TRACE_BUFFER_ENTRY_SIZE),
142 1U, &trace_buffer_global);
143 }
144
145 static void
trace_buffer_init(partition_t * partition,void * base,size_t size)146 trace_buffer_init(partition_t *partition, void *base, size_t size)
147 REQUIRE_PREEMPT_DISABLED
148 {
149 assert(size != 0);
150 assert(base != NULL);
151
152 trace_buffer_header_t *tbs[TRACE_BUFFER_NUM];
153 trace_init_common(partition, base, size, TRACE_BUFFER_NUM, tbs);
154 // The global buffer will be the first, followed by the local buffers
155 trace_buffer_global = tbs[0];
156 for (cpu_index_t i = 0U; i < PLATFORM_MAX_CORES; i++) {
157 bitmap_set(tbs[i + 1U]->cpu_mask, i);
158 // The global buffer is first, hence the increment by 1
159 CPULOCAL_BY_INDEX(trace_buffer, i) = tbs[i + 1];
160 }
161
162 // Copy the log entries from the boot trace into the newly allocated
163 // trace of the boot CPU, which is the current CPU
164 cpu_index_t cpu_id = cpulocal_get_index();
165 trace_buffer_header_t *trace_buffer =
166 CPULOCAL_BY_INDEX(trace_buffer, cpu_id);
167 assert(trace_boot_buffer.entries < trace_buffer->entries);
168
169 trace_buffer_header_t *tb = &trace_boot_buffer;
170 index_t head = atomic_load_explicit(&tb->head, memory_order_relaxed);
171 size_t cpy_size = head * sizeof(trace_buffer_entry_t);
172
173 if (cpy_size != 0U) {
174 // The log entries follow on immediately after the header
175 char *src_buf = (char *)(tb + 1);
176 char *dst_buf = (char *)(trace_buffer + 1);
177
178 (void)memcpy(dst_buf, src_buf, cpy_size);
179
180 CACHE_CLEAN_INVALIDATE_RANGE(dst_buf, cpy_size);
181 }
182
183 atomic_store_release(&trace_buffer->head, head);
184 }
185
186 #if defined(PLATFORM_TRACE_STANDALONE_REGION)
187 void
trace_single_region_init(partition_t * partition,paddr_t base,size_t size)188 trace_single_region_init(partition_t *partition, paddr_t base, size_t size)
189 {
190 assert(size != 0);
191 assert(base != 0);
192
193 // Call to initiaize the trace buffer
194 trace_buffer_init(partition, (void *)base, size);
195 }
196 #else
197 void
trace_init(partition_t * partition,size_t size)198 trace_init(partition_t *partition, size_t size)
199 {
200 assert(size != 0);
201
202 void_ptr_result_t alloc_ret = partition_alloc(
203 partition, size, alignof(trace_buffer_header_t));
204 if (alloc_ret.e != OK) {
205 panic("Error allocating trace buffer");
206 }
207
208 // Call to initiaize the trace buffer
209 trace_buffer_init(partition, alloc_ret.r, size);
210 }
211 #endif
212
213 // Log a trace with specified trace class.
214 //
215 // id: ID of this trace event.
216 // argn: information to store for this trace.
217 void
trace_standard_handle_trace_log(trace_id_t id,trace_action_t action,const char * fmt,register_t arg0,register_t arg1,register_t arg2,register_t arg3,register_t arg4)218 trace_standard_handle_trace_log(trace_id_t id, trace_action_t action,
219 const char *fmt, register_t arg0,
220 register_t arg1, register_t arg2,
221 register_t arg3, register_t arg4)
222 {
223 trace_buffer_header_t *tb;
224 trace_info_t trace_info;
225 trace_tag_t trace_tag;
226 index_t head, entries;
227
228 cpu_index_t cpu_id;
229 uint64_t timestamp;
230
231 // Add the data to the trace buffer only if:
232 // - The requested action is tracing, and tracing is enabled, or
233 // - The requested action is logging, and putting log messages in the
234 // trace buffer is enabled.
235 bool trace_action = ((action == TRACE_ACTION_TRACE) ||
236 (action == TRACE_ACTION_TRACE_LOCAL) ||
237 (action == TRACE_ACTION_TRACE_AND_LOG));
238 bool log_action = ((action == TRACE_ACTION_LOG) ||
239 (action == TRACE_ACTION_TRACE_AND_LOG));
240 register_t class_flags = trace_get_class_flags();
241 if (compiler_unexpected(
242 !trace_action &&
243 (!log_action ||
244 ((class_flags & TRACE_CLASS_BITS(TRACE_LOG_BUFFER)) ==
245 0U)))) {
246 goto out;
247 }
248
249 cpu_id = cpulocal_get_index_unsafe();
250 timestamp = arch_get_timestamp();
251
252 trace_info_init(&trace_info);
253 trace_info_set_cpu_id(&trace_info, cpu_id);
254 trace_info_set_timestamp(&trace_info, timestamp);
255
256 trace_tag_init(&trace_tag);
257 trace_tag_set_trace_id(&trace_tag, id);
258 #if TRACE_FORMAT == 1
259 thread_t *thread = thread_get_self();
260 trace_tag_set_trace_ids(&trace_tag, trace_ids_raw(thread->trace_ids));
261 #else
262 #error unsupported format
263 #endif
264
265 // Use the local buffer if the requested action is TRACE_LOCAL and we
266 // are not still using the boot trace
267 trace_buffer_header_t *cpu_tb = CPULOCAL_BY_INDEX(trace_buffer, cpu_id);
268 if ((action == TRACE_ACTION_TRACE_LOCAL) && (cpu_tb != NULL)) {
269 tb = cpu_tb;
270 } else {
271 tb = trace_buffer_global;
272 }
273
274 entries = tb->entries;
275
276 // Atomically grab the next entry in the buffer
277 head = atomic_fetch_add_explicit(&tb->head, 1, memory_order_consume);
278 if (compiler_unexpected(head >= entries)) {
279 index_t new_head = head + 1U;
280
281 tb->not_wrapped = false;
282 head -= entries;
283
284 (void)atomic_compare_exchange_strong_explicit(
285 &tb->head, &new_head, head + 1U, memory_order_relaxed,
286 memory_order_relaxed);
287 }
288
289 trace_buffer_entry_t *buffers =
290 (trace_buffer_entry_t *)((uintptr_t)tb +
291 (uintptr_t)TRACE_BUFFER_HEADER_SIZE);
292
293 #if defined(ARCH_ARM) && defined(ARCH_IS_64BIT) && ARCH_IS_64BIT
294 // Store using non-temporal store instructions. Also, if the entry
295 // fits within a DC ZVA block (typically one cache line), then zero
296 // the entry first so the CPU doesn't waste time filling the cache;
297 // and if the entry covers an entire cache line, flush it immediately
298 // so it doesn't hang around if stnp is ineffective (as the manuals
299 // suggest is the case for Cortex-A7x).
300 __asm__ volatile(
301 #if ((1 << CPU_DCZVA_BITS) <= TRACE_BUFFER_ENTRY_SIZE) && \
302 ((1 << CPU_DCZVA_BITS) <= TRACE_BUFFER_ENTRY_ALIGN)
303 "dc zva, %[entry_addr];"
304 #endif
305 "stnp %[info], %[tag], [%[entry_addr], 0];"
306 "stnp %[fmt], %[arg0], [%[entry_addr], 16];"
307 "stnp %[arg1], %[arg2], [%[entry_addr], 32];"
308 "stnp %[arg3], %[arg4], [%[entry_addr], 48];"
309 #if ((1 << CPU_L1D_LINE_BITS) <= TRACE_BUFFER_ENTRY_SIZE) && \
310 ((1 << CPU_L1D_LINE_BITS) <= TRACE_BUFFER_ENTRY_ALIGN)
311 "dc civac, %[entry_addr];"
312 #endif
313 : [entry] "=m"(buffers[head])
314 : [entry_addr] "r"(&buffers[head]),
315 [info] "r"(trace_info_raw(trace_info)),
316 [tag] "r"(trace_tag_raw(trace_tag)), [fmt] "r"(fmt),
317 [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2),
318 [arg3] "r"(arg3), [arg4] "r"(arg4));
319 #else
320 prefetch_store_stream(&buffers[head]);
321
322 buffers[head].info = trace_info;
323 buffers[head].tag = trace_tag;
324 buffers[head].fmt = fmt;
325 buffers[head].args[0] = arg0;
326 buffers[head].args[1] = arg1;
327 buffers[head].args[2] = arg2;
328 buffers[head].args[3] = arg3;
329 buffers[head].args[4] = arg4;
330 #endif
331
332 out:
333 return;
334 }
335
336 void
trace_set_class_flags(register_t flags)337 trace_set_class_flags(register_t flags)
338 {
339 (void)atomic_fetch_or_explicit(&hyp_trace.enabled_class_flags, flags,
340 memory_order_relaxed);
341 }
342
343 void
trace_clear_class_flags(register_t flags)344 trace_clear_class_flags(register_t flags)
345 {
346 (void)atomic_fetch_and_explicit(&hyp_trace.enabled_class_flags, ~flags,
347 memory_order_relaxed);
348 }
349
350 void
trace_update_class_flags(register_t set_flags,register_t clear_flags)351 trace_update_class_flags(register_t set_flags, register_t clear_flags)
352 {
353 register_t flags = atomic_load_explicit(&hyp_trace.enabled_class_flags,
354 memory_order_relaxed);
355
356 register_t new_flags;
357 do {
358 new_flags = flags & ~clear_flags;
359 new_flags |= set_flags;
360 } while (!atomic_compare_exchange_strong_explicit(
361 &hyp_trace.enabled_class_flags, &flags, new_flags,
362 memory_order_relaxed, memory_order_relaxed));
363 }
364
365 register_t
trace_get_class_flags(void)366 trace_get_class_flags(void)
367 {
368 return atomic_load_explicit(&hyp_trace.enabled_class_flags,
369 memory_order_relaxed);
370 }
371