1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (C) 2019, Linaro Limited
4 */
5
6 /*
7 * APIs defined in this file are required to use __noprof attribute to
8 * avoid any circular dependency during profiling. So this requirement
9 * prohibits these APIs to use standard library APIs as those can be
10 * profiled too.
11 */
12
13 #include <assert.h>
14 #include <types_ext.h>
15 #include <user_ta_header.h>
16 #if defined(__KERNEL__)
17 #if defined(ARM32) || defined(ARM64)
18 #include <arm.h>
19 #elif defined(RV32) || defined(RV64)
20 #include <riscv.h>
21 #endif
22 #include <kernel/panic.h>
23 #include <kernel/tee_ta_manager.h>
24 #include <kernel/thread.h>
25 #include <mm/core_mmu.h>
26 #else
27 #if defined(ARM32) || defined(ARM64)
28 #include <arm_user_sysreg.h>
29 #elif defined(RV32) || defined(RV64)
30 #include <riscv_user_sysreg.h>
31 #endif
32 #include <setjmp.h>
33 #include <utee_syscalls.h>
34 #endif
35 #include "ftrace.h"
36
get_fbuf(void)37 static __noprof struct ftrace_buf *get_fbuf(void)
38 {
39 #if defined(__KERNEL__)
40 short int ct = thread_get_id_may_fail();
41 struct ts_session *s = NULL;
42 struct thread_specific_data *tsd = NULL;
43
44 if (ct == -1)
45 return NULL;
46
47 if (!(core_mmu_user_va_range_is_defined() &&
48 core_mmu_user_mapping_is_active()))
49 return NULL;
50
51 tsd = thread_get_tsd();
52 s = TAILQ_FIRST(&tsd->sess_stack);
53
54 if (!s || tsd->ctx != s->ctx)
55 return NULL;
56
57 if (!is_ta_ctx(s->ctx) || to_ta_ctx(s->ctx)->panicked)
58 return NULL;
59
60 if (s->fbuf && s->fbuf->syscall_trace_enabled &&
61 !s->fbuf->syscall_trace_suspended)
62 return s->fbuf;
63 else
64 return NULL;
65 #else
66 return &__ftrace_buf_start;
67 #endif
68 }
69
add_elem(struct ftrace_buf * fbuf,uint8_t level,uint64_t val)70 static void __noprof add_elem(struct ftrace_buf *fbuf, uint8_t level,
71 uint64_t val)
72 {
73 uint64_t *elem = NULL;
74 size_t idx = fbuf->curr_idx;
75
76 /* Make sure the topmost byte doesn't contain useful information */
77 assert(!(val >> 56));
78
79 elem = (uint64_t *)((vaddr_t)fbuf + fbuf->buf_off) + idx;
80 *elem = SHIFT_U64(level, 56) | val;
81
82 idx++;
83 if ((idx + 1) * sizeof(*elem) > fbuf->max_size) {
84 idx = 0;
85 fbuf->overflow = true;
86 }
87
88 fbuf->curr_idx = idx;
89 }
90
ftrace_enter(unsigned long pc,unsigned long * lr)91 void __noprof ftrace_enter(unsigned long pc, unsigned long *lr)
92 {
93 uint64_t now = barrier_read_counter_timer();
94 struct ftrace_buf *fbuf = get_fbuf();
95
96 if (!fbuf || !fbuf->buf_off || !fbuf->max_size)
97 return;
98
99 add_elem(fbuf, fbuf->ret_idx + 1, pc);
100
101 if (fbuf->ret_idx < FTRACE_RETFUNC_DEPTH) {
102 fbuf->ret_stack[fbuf->ret_idx] = *lr;
103 fbuf->begin_time[fbuf->ret_idx] = now;
104 fbuf->ret_idx++;
105 } else {
106 /*
107 * This scenario isn't expected as function call depth
108 * shouldn't be more than FTRACE_RETFUNC_DEPTH.
109 */
110 #if defined(__KERNEL__)
111 panic();
112 #else
113 _utee_panic(0);
114 #endif
115 }
116
117 *lr = (unsigned long)&__ftrace_return;
118 }
119
ftrace_return(void)120 unsigned long __noprof ftrace_return(void)
121 {
122 uint64_t now = barrier_read_counter_timer();
123 struct ftrace_buf *fbuf = get_fbuf();
124 uint64_t start = 0;
125 uint64_t elapsed = 0;
126
127 /* Check for valid return index */
128 if (!fbuf || !fbuf->ret_idx || fbuf->ret_idx > FTRACE_RETFUNC_DEPTH)
129 return 0;
130
131 fbuf->ret_idx--;
132 start = fbuf->begin_time[fbuf->ret_idx];
133 elapsed = (now - start) * 1000000000 / read_cntfrq();
134 add_elem(fbuf, 0, elapsed);
135
136 return fbuf->ret_stack[fbuf->ret_idx];
137 }
138
139 #if !defined(__KERNEL__)
ftrace_longjmp(unsigned int * ret_idx)140 void __noprof ftrace_longjmp(unsigned int *ret_idx)
141 {
142 while (__ftrace_buf_start.ret_idx > *ret_idx)
143 ftrace_return();
144 }
145
ftrace_setjmp(unsigned int * ret_idx)146 void __noprof ftrace_setjmp(unsigned int *ret_idx)
147 {
148 *ret_idx = __ftrace_buf_start.ret_idx;
149 }
150 #endif
151