1 /*
2 * Copyright (c) 2020 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8 #include <kernel_internal.h>
9 #include <zephyr/toolchain.h>
10 #include <zephyr/debug/coredump.h>
11 #include <zephyr/sys/byteorder.h>
12 #include <zephyr/sys/util.h>
13
14 #include "coredump_internal.h"
15 #if defined(CONFIG_DEBUG_COREDUMP_BACKEND_LOGGING)
16 extern struct coredump_backend_api coredump_backend_logging;
17 static struct coredump_backend_api
18 *backend_api = &coredump_backend_logging;
19 #elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_FLASH_PARTITION)
20 extern struct coredump_backend_api coredump_backend_flash_partition;
21 static struct coredump_backend_api
22 *backend_api = &coredump_backend_flash_partition;
23 #elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_INTEL_ADSP_MEM_WINDOW)
24 extern struct coredump_backend_api coredump_backend_intel_adsp_mem_window;
25 static struct coredump_backend_api
26 *backend_api = &coredump_backend_intel_adsp_mem_window;
27 #elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_IN_MEMORY)
28 extern struct coredump_backend_api coredump_backend_in_memory;
29 static struct coredump_backend_api
30 *backend_api = &coredump_backend_in_memory;
31 #elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_OTHER)
32 extern struct coredump_backend_api coredump_backend_other;
33 static struct coredump_backend_api
34 *backend_api = &coredump_backend_other;
35 #else
36 #error "Need to select a coredump backend"
37 #endif
38
39 #if defined(CONFIG_COREDUMP_DEVICE)
40 #include <zephyr/drivers/coredump.h>
41 #define DT_DRV_COMPAT zephyr_coredump
42 #endif
43
44 #if defined(CONFIG_DEBUG_COREDUMP_THREAD_STACK_TOP_LIMIT) && \
45 CONFIG_DEBUG_COREDUMP_THREAD_STACK_TOP_LIMIT > 0
46 #define STACK_TOP_LIMIT ((size_t)CONFIG_DEBUG_COREDUMP_THREAD_STACK_TOP_LIMIT)
47 #else
48 #define STACK_TOP_LIMIT SIZE_MAX
49 #endif
50
51 #if defined(CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK)
arch_coredump_priv_stack_dump(struct k_thread * thread)52 __weak void arch_coredump_priv_stack_dump(struct k_thread *thread)
53 {
54 /* Stub if architecture has not implemented this. */
55 ARG_UNUSED(thread);
56 }
57 #endif /* CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK */
58
dump_header(unsigned int reason)59 static void dump_header(unsigned int reason)
60 {
61 struct coredump_hdr_t hdr = {
62 .id = {'Z', 'E'},
63 .hdr_version = COREDUMP_HDR_VER,
64 .reason = sys_cpu_to_le16(reason),
65 };
66
67 if (sizeof(uintptr_t) == 8) {
68 hdr.ptr_size_bits = 6; /* 2^6 = 64 */
69 } else if (sizeof(uintptr_t) == 4) {
70 hdr.ptr_size_bits = 5; /* 2^5 = 32 */
71 } else {
72 hdr.ptr_size_bits = 0; /* Unknown */
73 }
74
75 hdr.tgt_code = sys_cpu_to_le16(arch_coredump_tgt_code_get());
76
77 backend_api->buffer_output((uint8_t *)&hdr, sizeof(hdr));
78 }
79
80 #if defined(CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_MIN) || \
81 defined(CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_THREADS)
82
select_stack_region(const struct k_thread * thread,uintptr_t * start,uintptr_t * end)83 static inline void select_stack_region(const struct k_thread *thread, uintptr_t *start,
84 uintptr_t *end)
85 {
86 uintptr_t sp;
87
88 *start = thread->stack_info.start;
89 *end = thread->stack_info.start + thread->stack_info.size;
90
91 if (!IS_ENABLED(CONFIG_DEBUG_COREDUMP_THREAD_STACK_TOP)) {
92 return;
93 }
94
95 sp = arch_coredump_stack_ptr_get(thread);
96
97 if (IN_RANGE(sp, *start, *end)) {
98 /* Skip ahead to the stack pointer. */
99 *start = sp;
100 }
101
102 /* Make sure no more than STACK_TOP_LIMIT bytes of the stack are dumped. */
103 *end = *start + MIN((size_t)(*end - *start), STACK_TOP_LIMIT);
104 }
105
dump_thread(struct k_thread * thread)106 static void dump_thread(struct k_thread *thread)
107 {
108 uintptr_t start_addr;
109 uintptr_t end_addr;
110
111 /*
112 * When dumping minimum information,
113 * the current thread struct and stack need to
114 * be dumped so debugger can examine them.
115 */
116
117 if (thread == NULL) {
118 return;
119 }
120
121 start_addr = POINTER_TO_UINT(thread);
122 end_addr = start_addr + sizeof(*thread);
123 coredump_memory_dump(start_addr, end_addr);
124
125 select_stack_region(thread, &start_addr, &end_addr);
126 coredump_memory_dump(start_addr, end_addr);
127
128 #if defined(CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK)
129 if ((thread->base.user_options & K_USER) == K_USER) {
130 arch_coredump_priv_stack_dump(thread);
131 }
132 #endif /* CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK */
133 }
134 #endif
135
136 #if defined(CONFIG_COREDUMP_DEVICE)
process_coredump_dev_memory(const struct device * dev)137 static void process_coredump_dev_memory(const struct device *dev)
138 {
139 DEVICE_API_GET(coredump, dev)->dump(dev);
140 }
141 #endif
142
process_memory_region_list(void)143 void process_memory_region_list(void)
144 {
145 #ifdef CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_LINKER_RAM
146 unsigned int idx = 0;
147
148 while (true) {
149 struct z_coredump_memory_region_t *r =
150 &z_coredump_memory_regions[idx];
151
152 if (r->end == POINTER_TO_UINT(NULL)) {
153 break;
154 }
155
156 coredump_memory_dump(r->start, r->end);
157
158 idx++;
159 }
160 #endif
161
162 #ifdef CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_THREADS
163 /*
164 * Content of _kernel.threads not being modified during dump
165 * capture so no need to lock z_thread_monitor_lock.
166 */
167 struct k_thread *current;
168
169 for (current = _kernel.threads; current; current = current->next_thread) {
170 dump_thread(current);
171 }
172
173 /* Also add interrupt stack, in case error occurred in an interrupt */
174 char *irq_stack = _kernel.cpus[0].irq_stack;
175 uintptr_t start_addr = POINTER_TO_UINT(irq_stack) - CONFIG_ISR_STACK_SIZE;
176
177 coredump_memory_dump(start_addr, POINTER_TO_UINT(irq_stack));
178 #endif /* CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_THREADS */
179
180 #if defined(CONFIG_COREDUMP_DEVICE)
181 #define MY_FN(inst) process_coredump_dev_memory(DEVICE_DT_INST_GET(inst));
182 DT_INST_FOREACH_STATUS_OKAY(MY_FN)
183 #endif
184 }
185
186 #ifdef CONFIG_DEBUG_COREDUMP_THREADS_METADATA
dump_threads_metadata(void)187 static void dump_threads_metadata(void)
188 {
189 struct coredump_threads_meta_hdr_t hdr = {
190 .id = THREADS_META_HDR_ID,
191 .hdr_version = THREADS_META_HDR_VER,
192 .num_bytes = 0,
193 };
194
195 hdr.num_bytes += sizeof(_kernel);
196
197 coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr));
198 coredump_buffer_output((uint8_t *)&_kernel, sizeof(_kernel));
199 }
200 #endif /* CONFIG_DEBUG_COREDUMP_THREADS_METADATA */
201
coredump(unsigned int reason,const struct arch_esf * esf,struct k_thread * thread)202 void coredump(unsigned int reason, const struct arch_esf *esf,
203 struct k_thread *thread)
204 {
205 z_coredump_start();
206
207 dump_header(reason);
208
209 if (esf != NULL) {
210 arch_coredump_info_dump(esf);
211 }
212
213 #ifdef CONFIG_DEBUG_COREDUMP_THREADS_METADATA
214 dump_threads_metadata();
215 #endif
216
217 if (thread != NULL) {
218 #ifdef CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_MIN
219 dump_thread(thread);
220 #endif
221 }
222
223 process_memory_region_list();
224
225 z_coredump_end();
226 }
227
z_coredump_start(void)228 void z_coredump_start(void)
229 {
230 backend_api->start();
231 }
232
z_coredump_end(void)233 void z_coredump_end(void)
234 {
235 backend_api->end();
236 }
237
coredump_buffer_output(uint8_t * buf,size_t buflen)238 void coredump_buffer_output(uint8_t *buf, size_t buflen)
239 {
240 if ((buf == NULL) || (buflen == 0)) {
241 /* Invalid buffer, skip */
242 return;
243 }
244
245 backend_api->buffer_output(buf, buflen);
246 }
247
coredump_memory_dump(uintptr_t start_addr,uintptr_t end_addr)248 void coredump_memory_dump(uintptr_t start_addr, uintptr_t end_addr)
249 {
250 struct coredump_mem_hdr_t m;
251 size_t len;
252
253 if ((start_addr == POINTER_TO_UINT(NULL)) ||
254 (end_addr == POINTER_TO_UINT(NULL))) {
255 return;
256 }
257
258 if (start_addr >= end_addr) {
259 return;
260 }
261
262 len = end_addr - start_addr;
263
264 m.id = COREDUMP_MEM_HDR_ID;
265 m.hdr_version = COREDUMP_MEM_HDR_VER;
266
267 if (sizeof(uintptr_t) == 8) {
268 m.start = sys_cpu_to_le64(start_addr);
269 m.end = sys_cpu_to_le64(end_addr);
270 } else if (sizeof(uintptr_t) == 4) {
271 m.start = sys_cpu_to_le32(start_addr);
272 m.end = sys_cpu_to_le32(end_addr);
273 }
274
275 coredump_buffer_output((uint8_t *)&m, sizeof(m));
276
277 coredump_buffer_output((uint8_t *)start_addr, len);
278 }
279
coredump_query(enum coredump_query_id query_id,void * arg)280 int coredump_query(enum coredump_query_id query_id, void *arg)
281 {
282 int ret;
283
284 if (backend_api->query == NULL) {
285 ret = -ENOTSUP;
286 } else {
287 ret = backend_api->query(query_id, arg);
288 }
289
290 return ret;
291 }
292
coredump_cmd(enum coredump_cmd_id cmd_id,void * arg)293 int coredump_cmd(enum coredump_cmd_id cmd_id, void *arg)
294 {
295 int ret;
296
297 if (backend_api->cmd == NULL) {
298 ret = -ENOTSUP;
299 } else {
300 ret = backend_api->cmd(cmd_id, arg);
301 }
302
303 return ret;
304 }
305