1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2012 The Chromium OS Authors.
4  */
5 
6 #include <mapmem.h>
7 #include <time.h>
8 #include <trace.h>
9 #include <linux/errno.h>
10 #include <asm/global_data.h>
11 #include <asm/io.h>
12 #include <asm/sections.h>
13 
14 DECLARE_GLOBAL_DATA_PTR;
15 
16 static char trace_enabled __section(".data");
17 static char trace_inited __section(".data");
18 
19 /* The header block at the start of the trace memory area */
20 struct trace_hdr {
21 	int func_count;		/* Total number of function call sites */
22 	u64 call_count;		/* Total number of tracked function calls */
23 	u64 untracked_count;	/* Total number of untracked function calls */
24 	int funcs_used;		/* Total number of functions used */
25 
26 	/*
27 	 * Call count for each function. This is indexed by the word offset
28 	 * of the function from gd->relocaddr
29 	 */
30 	uintptr_t *call_accum;
31 
32 	/* Function trace list */
33 	struct trace_call *ftrace;	/* The function call records */
34 	ulong ftrace_size;	/* Num. of ftrace records we have space for */
35 	ulong ftrace_count;	/* Num. of ftrace records written */
36 	ulong ftrace_too_deep_count;	/* Functions that were too deep */
37 
38 	int depth;		/* Depth of function calls */
39 	int depth_limit;	/* Depth limit to trace to */
40 	int max_depth;		/* Maximum depth seen so far */
41 	int min_depth;		/* Minimum depth seen so far */
42 	bool trace_locked;	/* Used to detect recursive tracing */
43 };
44 
45 /* Pointer to start of trace buffer */
46 static struct trace_hdr *hdr __section(".data");
47 
48 static inline uintptr_t __attribute__((no_instrument_function))
func_ptr_to_num(void * func_ptr)49 		func_ptr_to_num(void *func_ptr)
50 {
51 	uintptr_t offset = (uintptr_t)func_ptr;
52 
53 #ifdef CONFIG_SANDBOX
54 	offset -= (uintptr_t)_init;
55 #else
56 	if (gd->flags & GD_FLG_RELOC)
57 		offset -= gd->relocaddr;
58 	else
59 		offset -= CONFIG_TEXT_BASE;
60 #endif
61 	return offset / FUNC_SITE_SIZE;
62 }
63 
64 #if defined(CONFIG_EFI_LOADER) && (defined(CONFIG_ARM) || defined(CONFIG_RISCV))
65 
66 /**
67  * trace_gd - the value of the gd register
68  */
69 static gd_t *trace_gd;
70 
71 /**
72  * trace_save_gd() - save the value of the gd register
73  */
trace_save_gd(void)74 static void notrace trace_save_gd(void)
75 {
76 	trace_gd = gd;
77 }
78 
79 /**
80  * trace_swap_gd() - swap between U-Boot and application gd register value
81  *
82  * An UEFI application may change the value of the register that gd lives in.
83  * But some of our functions like get_ticks() access this register. So we
84  * have to set the gd register to the U-Boot value when entering a trace
85  * point and set it back to the application value when exiting the trace point.
86  */
trace_swap_gd(void)87 static void notrace trace_swap_gd(void)
88 {
89 	gd_t *temp_gd = trace_gd;
90 
91 	trace_gd = gd;
92 	set_gd(temp_gd);
93 }
94 
95 #else
96 
trace_save_gd(void)97 static void notrace trace_save_gd(void)
98 {
99 }
100 
trace_swap_gd(void)101 static void notrace trace_swap_gd(void)
102 {
103 }
104 
105 #endif
106 
add_ftrace(void * func_ptr,void * caller,ulong flags)107 static void notrace add_ftrace(void *func_ptr, void *caller, ulong flags)
108 {
109 	if (hdr->depth > hdr->depth_limit) {
110 		hdr->ftrace_too_deep_count++;
111 		return;
112 	}
113 	if (hdr->ftrace_count < hdr->ftrace_size) {
114 		struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
115 
116 		rec->func = func_ptr_to_num(func_ptr);
117 		rec->caller = func_ptr_to_num(caller);
118 		rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK);
119 	}
120 	hdr->ftrace_count++;
121 }
122 
123 /**
124  * __cyg_profile_func_enter() - record function entry
125  *
126  * We add to our tally for this function and add to the list of called
127  * functions.
128  *
129  * @func_ptr:	pointer to function being entered
130  * @caller:	pointer to function which called this function
131  */
__cyg_profile_func_enter(void * func_ptr,void * caller)132 void notrace __cyg_profile_func_enter(void *func_ptr, void *caller)
133 {
134 	if (trace_enabled) {
135 		int func;
136 
137 		if (hdr->trace_locked) {
138 			trace_enabled = 0;
139 			puts("trace: recursion detected, disabling\n");
140 			hdr->trace_locked = false;
141 			return;
142 		}
143 
144 		hdr->trace_locked = true;
145 		trace_swap_gd();
146 		add_ftrace(func_ptr, caller, FUNCF_ENTRY);
147 		func = func_ptr_to_num(func_ptr);
148 		if (func < hdr->func_count) {
149 			hdr->call_accum[func]++;
150 			hdr->call_count++;
151 		} else {
152 			hdr->untracked_count++;
153 		}
154 		hdr->depth++;
155 		if (hdr->depth > hdr->max_depth)
156 			hdr->max_depth = hdr->depth;
157 		trace_swap_gd();
158 		hdr->trace_locked = false;
159 	}
160 }
161 
162 /**
163  * __cyg_profile_func_exit() - record function exit
164  *
165  * @func_ptr:	pointer to function being entered
166  * @caller:	pointer to function which called this function
167  */
__cyg_profile_func_exit(void * func_ptr,void * caller)168 void notrace __cyg_profile_func_exit(void *func_ptr, void *caller)
169 {
170 	if (trace_enabled) {
171 		trace_swap_gd();
172 		hdr->depth--;
173 		add_ftrace(func_ptr, caller, FUNCF_EXIT);
174 		if (hdr->depth < hdr->min_depth)
175 			hdr->min_depth = hdr->depth;
176 		trace_swap_gd();
177 	}
178 }
179 
180 /**
181  * trace_list_functions() - produce a list of called functions
182  *
183  * The information is written into the supplied buffer - a header followed
184  * by a list of function records.
185  *
186  * @buff:	buffer to place list into
187  * @buff_size:	size of buffer
188  * @needed:	returns size of buffer needed, which may be
189  *		greater than buff_size if we ran out of space.
190  * Return:	0 if ok, -ENOSPC if space was exhausted
191  */
trace_list_functions(void * buff,size_t buff_size,size_t * needed)192 int trace_list_functions(void *buff, size_t buff_size, size_t *needed)
193 {
194 	struct trace_output_hdr *output_hdr = NULL;
195 	void *end, *ptr = buff;
196 	size_t func;
197 	size_t upto;
198 
199 	end = buff ? buff + buff_size : NULL;
200 
201 	/* Place some header information */
202 	if (ptr + sizeof(struct trace_output_hdr) < end)
203 		output_hdr = ptr;
204 	ptr += sizeof(struct trace_output_hdr);
205 
206 	/* Add information about each function */
207 	for (func = upto = 0; func < hdr->func_count; func++) {
208 		size_t calls = hdr->call_accum[func];
209 
210 		if (!calls)
211 			continue;
212 
213 		if (ptr + sizeof(struct trace_output_func) < end) {
214 			struct trace_output_func *stats = ptr;
215 
216 			stats->offset = func * FUNC_SITE_SIZE;
217 			stats->call_count = calls;
218 			upto++;
219 		}
220 		ptr += sizeof(struct trace_output_func);
221 	}
222 
223 	/* Update the header */
224 	if (output_hdr) {
225 		output_hdr->rec_count = upto;
226 		output_hdr->type = TRACE_CHUNK_FUNCS;
227 	}
228 
229 	/* Work out how must of the buffer we used */
230 	*needed = ptr - buff;
231 	if (ptr > end)
232 		return -ENOSPC;
233 
234 	return 0;
235 }
236 
237 /**
238  * trace_list_functions() - produce a list of function calls
239  *
240  * The information is written into the supplied buffer - a header followed
241  * by a list of function records.
242  *
243  * @buff:	buffer to place list into
244  * @buff_size:	size of buffer
245  * @needed:	returns size of buffer needed, which may be
246  *		greater than buff_size if we ran out of space.
247  * Return:	0 if ok, -ENOSPC if space was exhausted
248  */
trace_list_calls(void * buff,size_t buff_size,size_t * needed)249 int trace_list_calls(void *buff, size_t buff_size, size_t *needed)
250 {
251 	struct trace_output_hdr *output_hdr = NULL;
252 	void *end, *ptr = buff;
253 	size_t rec, upto;
254 	size_t count;
255 
256 	end = buff ? buff + buff_size : NULL;
257 
258 	/* Place some header information */
259 	if (ptr + sizeof(struct trace_output_hdr) < end)
260 		output_hdr = ptr;
261 	ptr += sizeof(struct trace_output_hdr);
262 
263 	/* Add information about each call */
264 	count = hdr->ftrace_count;
265 	if (count > hdr->ftrace_size)
266 		count = hdr->ftrace_size;
267 	for (rec = upto = 0; rec < count; rec++) {
268 		if (ptr + sizeof(struct trace_call) < end) {
269 			struct trace_call *call = &hdr->ftrace[rec];
270 			struct trace_call *out = ptr;
271 
272 			out->func = call->func * FUNC_SITE_SIZE;
273 			out->caller = call->caller * FUNC_SITE_SIZE;
274 			out->flags = call->flags;
275 			upto++;
276 		}
277 		ptr += sizeof(struct trace_call);
278 	}
279 
280 	/* Update the header */
281 	if (output_hdr) {
282 		memset(output_hdr, '\0', sizeof(*output_hdr));
283 		output_hdr->rec_count = upto;
284 		output_hdr->type = TRACE_CHUNK_CALLS;
285 		output_hdr->version = TRACE_VERSION;
286 		output_hdr->text_base = CONFIG_TEXT_BASE;
287 	}
288 
289 	/* Work out how must of the buffer we used */
290 	*needed = ptr - buff;
291 	if (ptr > end)
292 		return -ENOSPC;
293 
294 	return 0;
295 }
296 
297 /**
298  * trace_print_stats() - print basic information about tracing
299  */
trace_print_stats(void)300 void trace_print_stats(void)
301 {
302 	ulong count;
303 
304 #ifndef FTRACE
305 	puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n");
306 	puts("You will likely get zeroed data here\n");
307 #endif
308 	if (!trace_inited) {
309 		printf("Trace is disabled\n");
310 		return;
311 	}
312 	print_grouped_ull(hdr->func_count, 10);
313 	puts(" function sites\n");
314 	print_grouped_ull(hdr->call_count, 10);
315 	puts(" function calls\n");
316 	print_grouped_ull(hdr->untracked_count, 10);
317 	puts(" untracked function calls\n");
318 	count = min(hdr->ftrace_count, hdr->ftrace_size);
319 	print_grouped_ull(count, 10);
320 	puts(" traced function calls");
321 	if (hdr->ftrace_count > hdr->ftrace_size) {
322 		printf(" (%lu dropped due to overflow)",
323 		       hdr->ftrace_count - hdr->ftrace_size);
324 	}
325 
326 	/* Add in minimum depth since the trace did not start at top level */
327 	printf("\n%15d maximum observed call depth\n",
328 	       hdr->max_depth - hdr->min_depth);
329 	printf("%15d call depth limit\n", hdr->depth_limit);
330 	print_grouped_ull(hdr->ftrace_too_deep_count, 10);
331 	puts(" calls not traced due to depth\n");
332 	print_grouped_ull(hdr->ftrace_size, 10);
333 	puts(" max function calls\n");
334 	printf("\ntrace buffer %lx call records %lx\n",
335 	       (ulong)map_to_sysmem(hdr), (ulong)map_to_sysmem(hdr->ftrace));
336 }
337 
trace_set_enabled(int enabled)338 void notrace trace_set_enabled(int enabled)
339 {
340 	trace_enabled = enabled != 0;
341 }
342 
get_func_count(void)343 static int get_func_count(void)
344 {
345 	/* Detect no support for mon_len since this means tracing cannot work */
346 	if (IS_ENABLED(CONFIG_SANDBOX) && !gd->mon_len) {
347 		puts("Tracing is not supported on this board\n");
348 		return -ENOTSUPP;
349 	}
350 
351 	return gd->mon_len / FUNC_SITE_SIZE;
352 }
353 
trace_init_(void * buff,size_t buff_size,bool copy_early,bool enable)354 static int notrace trace_init_(void *buff, size_t buff_size, bool copy_early,
355 			       bool enable)
356 {
357 	int func_count = get_func_count();
358 	size_t needed;
359 	int was_disabled = !trace_enabled;
360 
361 	if (func_count < 0)
362 		return func_count;
363 	trace_save_gd();
364 
365 	if (copy_early) {
366 #ifdef CONFIG_TRACE_EARLY
367 		ulong used, count;
368 		char *end;
369 
370 		/*
371 		 * Copy over the early trace data if we have it. Disable
372 		 * tracing while we are doing this.
373 		 */
374 		trace_enabled = 0;
375 		hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR,
376 				 CONFIG_TRACE_EARLY_SIZE);
377 		count = min(hdr->ftrace_count, hdr->ftrace_size);
378 		end = (char *)&hdr->ftrace[count];
379 		used = end - (char *)hdr;
380 		printf("trace: copying %08lx bytes of early data from %x to %08lx\n",
381 		       used, CONFIG_TRACE_EARLY_ADDR,
382 		       (ulong)map_to_sysmem(buff));
383 		printf("%lu traced function calls", count);
384 		if (hdr->ftrace_count > hdr->ftrace_size) {
385 			printf(" (%lu dropped due to overflow)",
386 			       hdr->ftrace_count - hdr->ftrace_size);
387 			hdr->ftrace_count = hdr->ftrace_size;
388 		}
389 		puts("\n");
390 		memcpy(buff, hdr, used);
391 #endif
392 	}
393 	hdr = (struct trace_hdr *)buff;
394 	needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
395 	if (needed > buff_size) {
396 		printf("trace: buffer size %zx bytes: at least %zx needed\n",
397 		       buff_size, needed);
398 		return -ENOSPC;
399 	}
400 
401 	if (was_disabled) {
402 		memset(hdr, '\0', needed);
403 		hdr->min_depth = INT_MAX;
404 	}
405 	hdr->func_count = func_count;
406 	hdr->call_accum = (uintptr_t *)(hdr + 1);
407 
408 	/* Use any remaining space for the timed function trace */
409 	hdr->ftrace = (struct trace_call *)(buff + needed);
410 	hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
411 	hdr->depth_limit = CONFIG_TRACE_CALL_DEPTH_LIMIT;
412 
413 	printf("trace: initialized, %senabled\n", enable ? "" : "not ");
414 	trace_enabled = enable;
415 	trace_inited = 1;
416 
417 	return 0;
418 }
419 
420 /**
421  * trace_init() - initialize the tracing system and enable it
422  *
423  * @buff:	Pointer to trace buffer
424  * @buff_size:	Size of trace buffer
425  * Return:	0 if ok
426  */
trace_init(void * buff,size_t buff_size)427 int notrace trace_init(void *buff, size_t buff_size)
428 {
429 	/* If traces are enabled already, we may have early traces to copy */
430 	return trace_init_(buff, buff_size, trace_enabled, true);
431 }
432 
433 /**
434  * trace_wipe() - clear accumulated traced data
435  *
436  * May be called with tracing enabled or disabled.
437  */
trace_wipe(void)438 int notrace trace_wipe(void)
439 {
440 	bool was_enabled = trace_enabled;
441 
442 	if (trace_enabled)
443 		trace_enabled = 0;
444 	return trace_init_(gd->trace_buff, CONFIG_TRACE_BUFFER_SIZE,
445 			   false, was_enabled);
446 }
447 
448 #ifdef CONFIG_TRACE_EARLY
449 /**
450  * trace_early_init() - initialize the tracing system for early tracing
451  *
452  * Return:	0 if ok, -ENOSPC if not enough memory is available
453  */
trace_early_init(void)454 int notrace trace_early_init(void)
455 {
456 	int func_count = get_func_count();
457 	size_t buff_size = CONFIG_TRACE_EARLY_SIZE;
458 	size_t needed;
459 
460 	if (func_count < 0)
461 		return func_count;
462 	/* We can ignore additional calls to this function */
463 	if (trace_enabled)
464 		return 0;
465 
466 	hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE);
467 	needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
468 	if (needed > buff_size) {
469 		printf("trace: buffer size is %zx bytes, at least %zx needed\n",
470 		       buff_size, needed);
471 		return -ENOSPC;
472 	}
473 
474 	memset(hdr, '\0', needed);
475 	hdr->call_accum = (uintptr_t *)(hdr + 1);
476 	hdr->func_count = func_count;
477 	hdr->min_depth = INT_MAX;
478 
479 	/* Use any remaining space for the timed function trace */
480 	hdr->ftrace = (struct trace_call *)((char *)hdr + needed);
481 	hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
482 	hdr->depth_limit = CONFIG_TRACE_EARLY_CALL_DEPTH_LIMIT;
483 	printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR);
484 
485 	trace_enabled = 1;
486 
487 	return 0;
488 }
489 #endif
490