1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2012 The Chromium OS Authors.
4  */
5 
6 #include <common.h>
7 #include <mapmem.h>
8 #include <time.h>
9 #include <trace.h>
10 #include <asm/global_data.h>
11 #include <asm/io.h>
12 #include <asm/sections.h>
13 
14 DECLARE_GLOBAL_DATA_PTR;
15 
16 static char trace_enabled __section(".data");
17 static char trace_inited __section(".data");
18 
19 /* The header block at the start of the trace memory area */
20 struct trace_hdr {
21 	int func_count;		/* Total number of function call sites */
22 	u64 call_count;		/* Total number of tracked function calls */
23 	u64 untracked_count;	/* Total number of untracked function calls */
24 	int funcs_used;		/* Total number of functions used */
25 
26 	/*
27 	 * Call count for each function. This is indexed by the word offset
28 	 * of the function from gd->relocaddr
29 	 */
30 	uintptr_t *call_accum;
31 
32 	/* Function trace list */
33 	struct trace_call *ftrace;	/* The function call records */
34 	ulong ftrace_size;	/* Num. of ftrace records we have space for */
35 	ulong ftrace_count;	/* Num. of ftrace records written */
36 	ulong ftrace_too_deep_count;	/* Functions that were too deep */
37 
38 	int depth;		/* Depth of function calls */
39 	int depth_limit;	/* Depth limit to trace to */
40 	int max_depth;		/* Maximum depth seen so far */
41 	int min_depth;		/* Minimum depth seen so far */
42 	bool trace_locked;	/* Used to detect recursive tracing */
43 };
44 
45 /* Pointer to start of trace buffer */
46 static struct trace_hdr *hdr __section(".data");
47 
48 static inline uintptr_t __attribute__((no_instrument_function))
func_ptr_to_num(void * func_ptr)49 		func_ptr_to_num(void *func_ptr)
50 {
51 	uintptr_t offset = (uintptr_t)func_ptr;
52 
53 #ifdef CONFIG_SANDBOX
54 	offset -= (uintptr_t)&_init;
55 #else
56 	if (gd->flags & GD_FLG_RELOC)
57 		offset -= gd->relocaddr;
58 	else
59 		offset -= CONFIG_TEXT_BASE;
60 #endif
61 	return offset / FUNC_SITE_SIZE;
62 }
63 
64 #if defined(CONFIG_EFI_LOADER) && (defined(CONFIG_ARM) || defined(CONFIG_RISCV))
65 
66 /**
67  * trace_gd - the value of the gd register
68  */
69 static volatile gd_t *trace_gd;
70 
71 /**
72  * trace_save_gd() - save the value of the gd register
73  */
trace_save_gd(void)74 static void notrace trace_save_gd(void)
75 {
76 	trace_gd = gd;
77 }
78 
79 /**
80  * trace_swap_gd() - swap between U-Boot and application gd register value
81  *
82  * An UEFI application may change the value of the register that gd lives in.
83  * But some of our functions like get_ticks() access this register. So we
84  * have to set the gd register to the U-Boot value when entering a trace
85  * point and set it back to the application value when exiting the trace point.
86  */
trace_swap_gd(void)87 static void notrace trace_swap_gd(void)
88 {
89 	volatile gd_t *temp_gd = trace_gd;
90 
91 	trace_gd = gd;
92 	set_gd(temp_gd);
93 }
94 
95 #else
96 
trace_save_gd(void)97 static void notrace trace_save_gd(void)
98 {
99 }
100 
trace_swap_gd(void)101 static void notrace trace_swap_gd(void)
102 {
103 }
104 
105 #endif
106 
add_ftrace(void * func_ptr,void * caller,ulong flags)107 static void notrace add_ftrace(void *func_ptr, void *caller, ulong flags)
108 {
109 	if (hdr->depth > hdr->depth_limit) {
110 		hdr->ftrace_too_deep_count++;
111 		return;
112 	}
113 	if (hdr->ftrace_count < hdr->ftrace_size) {
114 		struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
115 
116 		rec->func = func_ptr_to_num(func_ptr);
117 		rec->caller = func_ptr_to_num(caller);
118 		rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK);
119 	}
120 	hdr->ftrace_count++;
121 }
122 
123 /**
124  * __cyg_profile_func_enter() - record function entry
125  *
126  * We add to our tally for this function and add to the list of called
127  * functions.
128  *
129  * @func_ptr:	pointer to function being entered
130  * @caller:	pointer to function which called this function
131  */
__cyg_profile_func_enter(void * func_ptr,void * caller)132 void notrace __cyg_profile_func_enter(void *func_ptr, void *caller)
133 {
134 	if (trace_enabled) {
135 		int func;
136 
137 		if (hdr->trace_locked) {
138 			trace_enabled = 0;
139 			puts("trace: recursion detected, disabling\n");
140 			hdr->trace_locked = false;
141 			return;
142 		}
143 
144 		hdr->trace_locked = true;
145 		trace_swap_gd();
146 		add_ftrace(func_ptr, caller, FUNCF_ENTRY);
147 		func = func_ptr_to_num(func_ptr);
148 		if (func < hdr->func_count) {
149 			hdr->call_accum[func]++;
150 			hdr->call_count++;
151 		} else {
152 			hdr->untracked_count++;
153 		}
154 		hdr->depth++;
155 		if (hdr->depth > hdr->max_depth)
156 			hdr->max_depth = hdr->depth;
157 		trace_swap_gd();
158 		hdr->trace_locked = false;
159 	}
160 }
161 
162 /**
163  * __cyg_profile_func_exit() - record function exit
164  *
165  * @func_ptr:	pointer to function being entered
166  * @caller:	pointer to function which called this function
167  */
__cyg_profile_func_exit(void * func_ptr,void * caller)168 void notrace __cyg_profile_func_exit(void *func_ptr, void *caller)
169 {
170 	if (trace_enabled) {
171 		trace_swap_gd();
172 		hdr->depth--;
173 		add_ftrace(func_ptr, caller, FUNCF_EXIT);
174 		if (hdr->depth < hdr->min_depth)
175 			hdr->min_depth = hdr->depth;
176 		trace_swap_gd();
177 	}
178 }
179 
180 /**
181  * trace_list_functions() - produce a list of called functions
182  *
183  * The information is written into the supplied buffer - a header followed
184  * by a list of function records.
185  *
186  * @buff:	buffer to place list into
187  * @buff_size:	size of buffer
188  * @needed:	returns size of buffer needed, which may be
189  *		greater than buff_size if we ran out of space.
190  * Return:	0 if ok, -ENOSPC if space was exhausted
191  */
trace_list_functions(void * buff,size_t buff_size,size_t * needed)192 int trace_list_functions(void *buff, size_t buff_size, size_t *needed)
193 {
194 	struct trace_output_hdr *output_hdr = NULL;
195 	void *end, *ptr = buff;
196 	size_t func;
197 	size_t upto;
198 
199 	end = buff ? buff + buff_size : NULL;
200 
201 	/* Place some header information */
202 	if (ptr + sizeof(struct trace_output_hdr) < end)
203 		output_hdr = ptr;
204 	ptr += sizeof(struct trace_output_hdr);
205 
206 	/* Add information about each function */
207 	for (func = upto = 0; func < hdr->func_count; func++) {
208 		size_t calls = hdr->call_accum[func];
209 
210 		if (!calls)
211 			continue;
212 
213 		if (ptr + sizeof(struct trace_output_func) < end) {
214 			struct trace_output_func *stats = ptr;
215 
216 			stats->offset = func * FUNC_SITE_SIZE;
217 			stats->call_count = calls;
218 			upto++;
219 		}
220 		ptr += sizeof(struct trace_output_func);
221 	}
222 
223 	/* Update the header */
224 	if (output_hdr) {
225 		output_hdr->rec_count = upto;
226 		output_hdr->type = TRACE_CHUNK_FUNCS;
227 	}
228 
229 	/* Work out how must of the buffer we used */
230 	*needed = ptr - buff;
231 	if (ptr > end)
232 		return -ENOSPC;
233 
234 	return 0;
235 }
236 
237 /**
238  * trace_list_functions() - produce a list of function calls
239  *
240  * The information is written into the supplied buffer - a header followed
241  * by a list of function records.
242  *
243  * @buff:	buffer to place list into
244  * @buff_size:	size of buffer
245  * @needed:	returns size of buffer needed, which may be
246  *		greater than buff_size if we ran out of space.
247  * Return:	0 if ok, -ENOSPC if space was exhausted
248  */
trace_list_calls(void * buff,size_t buff_size,size_t * needed)249 int trace_list_calls(void *buff, size_t buff_size, size_t *needed)
250 {
251 	struct trace_output_hdr *output_hdr = NULL;
252 	void *end, *ptr = buff;
253 	size_t rec, upto;
254 	size_t count;
255 
256 	end = buff ? buff + buff_size : NULL;
257 
258 	/* Place some header information */
259 	if (ptr + sizeof(struct trace_output_hdr) < end)
260 		output_hdr = ptr;
261 	ptr += sizeof(struct trace_output_hdr);
262 
263 	/* Add information about each call */
264 	count = hdr->ftrace_count;
265 	if (count > hdr->ftrace_size)
266 		count = hdr->ftrace_size;
267 	for (rec = upto = 0; rec < count; rec++) {
268 		if (ptr + sizeof(struct trace_call) < end) {
269 			struct trace_call *call = &hdr->ftrace[rec];
270 			struct trace_call *out = ptr;
271 
272 			out->func = call->func * FUNC_SITE_SIZE;
273 			out->caller = call->caller * FUNC_SITE_SIZE;
274 			out->flags = call->flags;
275 			upto++;
276 		}
277 		ptr += sizeof(struct trace_call);
278 	}
279 
280 	/* Update the header */
281 	if (output_hdr) {
282 		memset(output_hdr, '\0', sizeof(*output_hdr));
283 		output_hdr->rec_count = upto;
284 		output_hdr->type = TRACE_CHUNK_CALLS;
285 		output_hdr->version = TRACE_VERSION;
286 		output_hdr->text_base = CONFIG_TEXT_BASE;
287 	}
288 
289 	/* Work out how must of the buffer we used */
290 	*needed = ptr - buff;
291 	if (ptr > end)
292 		return -ENOSPC;
293 
294 	return 0;
295 }
296 
297 /**
298  * trace_print_stats() - print basic information about tracing
299  */
trace_print_stats(void)300 void trace_print_stats(void)
301 {
302 	ulong count;
303 
304 #ifndef FTRACE
305 	puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n");
306 	puts("You will likely get zeroed data here\n");
307 #endif
308 	if (!trace_inited) {
309 		printf("Trace is disabled\n");
310 		return;
311 	}
312 	print_grouped_ull(hdr->func_count, 10);
313 	puts(" function sites\n");
314 	print_grouped_ull(hdr->call_count, 10);
315 	puts(" function calls\n");
316 	print_grouped_ull(hdr->untracked_count, 10);
317 	puts(" untracked function calls\n");
318 	count = min(hdr->ftrace_count, hdr->ftrace_size);
319 	print_grouped_ull(count, 10);
320 	puts(" traced function calls");
321 	if (hdr->ftrace_count > hdr->ftrace_size) {
322 		printf(" (%lu dropped due to overflow)",
323 		       hdr->ftrace_count - hdr->ftrace_size);
324 	}
325 
326 	/* Add in minimum depth since the trace did not start at top level */
327 	printf("\n%15d maximum observed call depth\n",
328 	       hdr->max_depth - hdr->min_depth);
329 	printf("%15d call depth limit\n", hdr->depth_limit);
330 	print_grouped_ull(hdr->ftrace_too_deep_count, 10);
331 	puts(" calls not traced due to depth\n");
332 	print_grouped_ull(hdr->ftrace_size, 10);
333 	puts(" max function calls\n");
334 	printf("\ntrace buffer %lx call records %lx\n",
335 	       (ulong)map_to_sysmem(hdr), (ulong)map_to_sysmem(hdr->ftrace));
336 }
337 
trace_set_enabled(int enabled)338 void notrace trace_set_enabled(int enabled)
339 {
340 	trace_enabled = enabled != 0;
341 }
342 
get_func_count(void)343 static int get_func_count(void)
344 {
345 	/* Detect no support for mon_len since this means tracing cannot work */
346 	if (IS_ENABLED(CONFIG_SANDBOX) && !gd->mon_len) {
347 		puts("Tracing is not supported on this board\n");
348 		return -ENOTSUPP;
349 	}
350 
351 	return gd->mon_len / FUNC_SITE_SIZE;
352 }
353 
354 /**
355  * trace_init() - initialize the tracing system and enable it
356  *
357  * @buff:	Pointer to trace buffer
358  * @buff_size:	Size of trace buffer
359  * Return:	0 if ok
360  */
trace_init(void * buff,size_t buff_size)361 int notrace trace_init(void *buff, size_t buff_size)
362 {
363 	int func_count = get_func_count();
364 	size_t needed;
365 	int was_disabled = !trace_enabled;
366 
367 	if (func_count < 0)
368 		return func_count;
369 	trace_save_gd();
370 
371 	if (!was_disabled) {
372 #ifdef CONFIG_TRACE_EARLY
373 		ulong used, count;
374 		char *end;
375 
376 		/*
377 		 * Copy over the early trace data if we have it. Disable
378 		 * tracing while we are doing this.
379 		 */
380 		trace_enabled = 0;
381 		hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR,
382 				 CONFIG_TRACE_EARLY_SIZE);
383 		count = min(hdr->ftrace_count, hdr->ftrace_size);
384 		end = (char *)&hdr->ftrace[count];
385 		used = end - (char *)hdr;
386 		printf("trace: copying %08lx bytes of early data from %x to %08lx\n",
387 		       used, CONFIG_TRACE_EARLY_ADDR,
388 		       (ulong)map_to_sysmem(buff));
389 		printf("%lu traced function calls", count);
390 		if (hdr->ftrace_count > hdr->ftrace_size) {
391 			printf(" (%lu dropped due to overflow)",
392 			       hdr->ftrace_count - hdr->ftrace_size);
393 			hdr->ftrace_count = hdr->ftrace_size;
394 		}
395 		puts("\n");
396 		memcpy(buff, hdr, used);
397 #else
398 		puts("trace: already enabled\n");
399 		return -EALREADY;
400 #endif
401 	}
402 	hdr = (struct trace_hdr *)buff;
403 	needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
404 	if (needed > buff_size) {
405 		printf("trace: buffer size %zx bytes: at least %zx needed\n",
406 		       buff_size, needed);
407 		return -ENOSPC;
408 	}
409 
410 	if (was_disabled) {
411 		memset(hdr, '\0', needed);
412 		hdr->min_depth = INT_MAX;
413 	}
414 	hdr->func_count = func_count;
415 	hdr->call_accum = (uintptr_t *)(hdr + 1);
416 
417 	/* Use any remaining space for the timed function trace */
418 	hdr->ftrace = (struct trace_call *)(buff + needed);
419 	hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
420 	hdr->depth_limit = CONFIG_TRACE_CALL_DEPTH_LIMIT;
421 
422 	puts("trace: enabled\n");
423 	trace_enabled = 1;
424 	trace_inited = 1;
425 
426 	return 0;
427 }
428 
429 #ifdef CONFIG_TRACE_EARLY
430 /**
431  * trace_early_init() - initialize the tracing system for early tracing
432  *
433  * Return:	0 if ok, -ENOSPC if not enough memory is available
434  */
trace_early_init(void)435 int notrace trace_early_init(void)
436 {
437 	int func_count = get_func_count();
438 	size_t buff_size = CONFIG_TRACE_EARLY_SIZE;
439 	size_t needed;
440 
441 	if (func_count < 0)
442 		return func_count;
443 	/* We can ignore additional calls to this function */
444 	if (trace_enabled)
445 		return 0;
446 
447 	hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE);
448 	needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
449 	if (needed > buff_size) {
450 		printf("trace: buffer size is %zx bytes, at least %zx needed\n",
451 		       buff_size, needed);
452 		return -ENOSPC;
453 	}
454 
455 	memset(hdr, '\0', needed);
456 	hdr->call_accum = (uintptr_t *)(hdr + 1);
457 	hdr->func_count = func_count;
458 	hdr->min_depth = INT_MAX;
459 
460 	/* Use any remaining space for the timed function trace */
461 	hdr->ftrace = (struct trace_call *)((char *)hdr + needed);
462 	hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
463 	hdr->depth_limit = CONFIG_TRACE_EARLY_CALL_DEPTH_LIMIT;
464 	printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR);
465 
466 	trace_enabled = 1;
467 
468 	return 0;
469 }
470 #endif
471