1 /*
2 * Copyright (c) 2024 Meta Platforms
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/debug/symtab.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <zephyr/linker/linker-defs.h>
11 #include <kernel_internal.h>
12 #include <zephyr/logging/log.h>
13
14 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
15
16 uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf);
17
18 typedef bool (*riscv_stacktrace_cb)(void *cookie, unsigned long addr, unsigned long sfp);
19
20 #define MAX_STACK_FRAMES CONFIG_ARCH_STACKWALK_MAX_FRAMES
21
22 struct stackframe {
23 uintptr_t fp;
24 uintptr_t ra;
25 };
26
27 typedef bool (*stack_verify_fn)(uintptr_t, const struct k_thread *const, const struct arch_esf *);
28
in_irq_stack_bound(uintptr_t addr,uint8_t cpu_id)29 static inline bool in_irq_stack_bound(uintptr_t addr, uint8_t cpu_id)
30 {
31 uintptr_t start, end;
32
33 start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
34 end = start + CONFIG_ISR_STACK_SIZE;
35
36 return (addr >= start) && (addr < end);
37 }
38
in_kernel_thread_stack_bound(uintptr_t addr,const struct k_thread * const thread)39 static inline bool in_kernel_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
40 {
41 #ifdef CONFIG_THREAD_STACK_INFO
42 uintptr_t start, end;
43
44 start = thread->stack_info.start;
45 end = Z_STACK_PTR_ALIGN(thread->stack_info.start + thread->stack_info.size);
46
47 return (addr >= start) && (addr < end);
48 #else
49 ARG_UNUSED(addr);
50 ARG_UNUSED(thread);
51 /* Return false as we can't check if the addr is in the thread stack without stack info */
52 return false;
53 #endif
54 }
55
56 #ifdef CONFIG_USERSPACE
in_user_thread_stack_bound(uintptr_t addr,const struct k_thread * const thread)57 static inline bool in_user_thread_stack_bound(uintptr_t addr, const struct k_thread *const thread)
58 {
59 uintptr_t start, end;
60
61 /* See: zephyr/include/zephyr/arch/riscv/arch.h */
62 if (IS_ENABLED(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)) {
63 start = thread->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE;
64 } else {
65 start = thread->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
66 }
67 end = Z_STACK_PTR_ALIGN(thread->arch.priv_stack_start + K_KERNEL_STACK_RESERVED +
68 CONFIG_PRIVILEGED_STACK_SIZE);
69
70 return (addr >= start) && (addr < end);
71 }
72 #endif /* CONFIG_USERSPACE */
73
in_stack_bound(uintptr_t addr,const struct k_thread * const thread,const struct arch_esf * esf)74 static bool in_stack_bound(uintptr_t addr, const struct k_thread *const thread,
75 const struct arch_esf *esf)
76 {
77 ARG_UNUSED(esf);
78
79 if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
80 return false;
81 }
82
83 #ifdef CONFIG_USERSPACE
84 if ((thread->base.user_options & K_USER) != 0) {
85 return in_user_thread_stack_bound(addr, thread);
86 }
87 #endif /* CONFIG_USERSPACE */
88
89 return in_kernel_thread_stack_bound(addr, thread);
90 }
91
in_text_region(uintptr_t addr)92 static inline bool in_text_region(uintptr_t addr)
93 {
94 return (addr >= (uintptr_t)__text_region_start) && (addr < (uintptr_t)__text_region_end);
95 }
96
97 #ifdef CONFIG_FRAME_POINTER
walk_stackframe(riscv_stacktrace_cb cb,void * cookie,const struct k_thread * thread,const struct arch_esf * esf,stack_verify_fn vrfy,const _callee_saved_t * csf)98 static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
99 const struct arch_esf *esf, stack_verify_fn vrfy,
100 const _callee_saved_t *csf)
101 {
102 uintptr_t fp, last_fp = 0;
103 uintptr_t ra;
104 struct stackframe *frame;
105
106 if (esf != NULL) {
107 /* Unwind the provided exception stack frame */
108 fp = esf->s0;
109 ra = esf->mepc;
110 } else if ((csf == NULL) || (csf == &_current->callee_saved)) {
111 /* Unwind current thread (default case when nothing is provided ) */
112 fp = (uintptr_t)__builtin_frame_address(0);
113 ra = (uintptr_t)walk_stackframe;
114 } else {
115 /* Unwind the provided thread */
116 fp = csf->s0;
117 ra = csf->ra;
118 }
119
120 for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy(fp, thread, esf) && (fp > last_fp); i++) {
121 if (in_text_region(ra) && !cb(cookie, ra, fp)) {
122 break;
123 }
124 last_fp = fp;
125
126 /* Unwind to the previous frame */
127 frame = (struct stackframe *)fp - 1;
128
129 if ((i == 0) && (esf != NULL)) {
130 /* Print `esf->ra` if we are at the top of the stack */
131 if (in_text_region(esf->ra) && !cb(cookie, esf->ra, fp)) {
132 break;
133 }
134 /**
135 * For the first stack frame, the `ra` is not stored in the frame if the
136 * preempted function doesn't call any other function, we can observe:
137 *
138 * .-------------.
139 * frame[0]->fp ---> | frame[0] fp |
140 * :-------------:
141 * frame[0]->ra ---> | frame[1] fp |
142 * | frame[1] ra |
143 * :~~~~~~~~~~~~~:
144 * | frame[N] fp |
145 *
146 * Instead of:
147 *
148 * .-------------.
149 * frame[0]->fp ---> | frame[0] fp |
150 * frame[0]->ra ---> | frame[1] ra |
151 * :-------------:
152 * | frame[1] fp |
153 * | frame[1] ra |
154 * :~~~~~~~~~~~~~:
155 * | frame[N] fp |
156 *
157 * Check if `frame->ra` actually points to a `fp`, and adjust accordingly
158 */
159 if (vrfy(frame->ra, thread, esf)) {
160 fp = frame->ra;
161 frame = (struct stackframe *)fp;
162 }
163 }
164
165 fp = frame->fp;
166 ra = frame->ra;
167 }
168 }
169 #else /* !CONFIG_FRAME_POINTER */
170 register uintptr_t current_stack_pointer __asm__("sp");
walk_stackframe(riscv_stacktrace_cb cb,void * cookie,const struct k_thread * thread,const struct arch_esf * esf,stack_verify_fn vrfy,const _callee_saved_t * csf)171 static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k_thread *thread,
172 const struct arch_esf *esf, stack_verify_fn vrfy,
173 const _callee_saved_t *csf)
174 {
175 uintptr_t sp;
176 uintptr_t ra;
177 uintptr_t *ksp, last_ksp = 0;
178
179 if (esf != NULL) {
180 /* Unwind the provided exception stack frame */
181 sp = z_riscv_get_sp_before_exc(esf);
182 ra = esf->mepc;
183 } else if ((csf == NULL) || (csf == &_current->callee_saved)) {
184 /* Unwind current thread (default case when nothing is provided ) */
185 sp = current_stack_pointer;
186 ra = (uintptr_t)walk_stackframe;
187 } else {
188 /* Unwind the provided thread */
189 sp = csf->sp;
190 ra = csf->ra;
191 }
192
193 ksp = (uintptr_t *)sp;
194 for (int i = 0; (i < MAX_STACK_FRAMES) && vrfy((uintptr_t)ksp, thread, esf) &&
195 ((uintptr_t)ksp > last_ksp);) {
196 if (in_text_region(ra)) {
197 if (!cb(cookie, ra, POINTER_TO_UINT(ksp))) {
198 break;
199 }
200 /*
201 * Increment the iterator only if `ra` is within the text region to get the
202 * most out of it
203 */
204 i++;
205 }
206 last_ksp = (uintptr_t)ksp;
207 /* Unwind to the previous frame */
208 ra = ((struct arch_esf *)ksp++)->ra;
209 }
210 }
211 #endif /* CONFIG_FRAME_POINTER */
212
arch_stack_walk(stack_trace_callback_fn callback_fn,void * cookie,const struct k_thread * thread,const struct arch_esf * esf)213 void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
214 const struct k_thread *thread, const struct arch_esf *esf)
215 {
216 if (thread == NULL) {
217 /* In case `thread` is NULL, default that to `_current` and try to unwind */
218 thread = _current;
219 }
220
221 walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
222 &thread->callee_saved);
223 }
224
225 #ifdef CONFIG_EXCEPTION_STACK_TRACE
in_fatal_stack_bound(uintptr_t addr,const struct k_thread * const thread,const struct arch_esf * esf)226 static bool in_fatal_stack_bound(uintptr_t addr, const struct k_thread *const thread,
227 const struct arch_esf *esf)
228 {
229 if (!IS_ALIGNED(addr, sizeof(uintptr_t))) {
230 return false;
231 }
232
233 if ((thread == NULL) || arch_is_in_isr()) {
234 /* We were servicing an interrupt */
235 uint8_t cpu_id = IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0U;
236
237 return in_irq_stack_bound(addr, cpu_id);
238 }
239
240 return in_stack_bound(addr, thread, esf);
241 }
242
243 #if __riscv_xlen == 32
244 #define PR_REG "%08" PRIxPTR
245 #elif __riscv_xlen == 64
246 #define PR_REG "%016" PRIxPTR
247 #endif
248
249 #ifdef CONFIG_FRAME_POINTER
250 #define SFP "fp"
251 #else
252 #define SFP "sp"
253 #endif /* CONFIG_FRAME_POINTER */
254
255 #ifdef CONFIG_SYMTAB
256 #define LOG_STACK_TRACE(idx, sfp, ra, name, offset) \
257 EXCEPTION_DUMP(" %2d: " SFP ": " PR_REG " ra: " PR_REG " [%s+0x%x]", \
258 idx, sfp, ra, name, offset)
259 #else
260 #define LOG_STACK_TRACE(idx, sfp, ra, name, offset) \
261 EXCEPTION_DUMP(" %2d: " SFP ": " PR_REG " ra: " PR_REG, idx, sfp, ra)
262 #endif /* CONFIG_SYMTAB */
263
print_trace_address(void * arg,unsigned long ra,unsigned long sfp)264 static bool print_trace_address(void *arg, unsigned long ra, unsigned long sfp)
265 {
266 int *i = arg;
267 #ifdef CONFIG_SYMTAB
268 uint32_t offset = 0;
269 const char *name = symtab_find_symbol_name(ra, &offset);
270 #endif /* CONFIG_SYMTAB */
271
272 LOG_STACK_TRACE((*i)++, sfp, ra, name, offset);
273
274 return true;
275 }
276
z_riscv_unwind_stack(const struct arch_esf * esf,const _callee_saved_t * csf)277 void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf)
278 {
279 int i = 0;
280
281 EXCEPTION_DUMP("call trace:");
282 walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf);
283 EXCEPTION_DUMP("");
284 }
285 #endif /* CONFIG_EXCEPTION_STACK_TRACE */
286