1 /*
2 * Copyright (c) 2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Kernel fatal error handler for ARM Cortex-M and Cortex-R
10 *
11 * This module provides the z_arm_fatal_error() routine for ARM Cortex-M
12 * and Cortex-R CPUs.
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/arch/exception.h>
17 #include <kernel_arch_data.h>
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
20
21 #ifdef CONFIG_EXCEPTION_DEBUG
esf_dump(const struct arch_esf * esf)22 static void esf_dump(const struct arch_esf *esf)
23 {
24 EXCEPTION_DUMP("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x",
25 esf->basic.a1, esf->basic.a2, esf->basic.a3);
26 EXCEPTION_DUMP("r3/a4: 0x%08x r12/ip: 0x%08x r14/lr: 0x%08x",
27 esf->basic.a4, esf->basic.ip, esf->basic.lr);
28 EXCEPTION_DUMP(" xpsr: 0x%08x", esf->basic.xpsr);
29 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
30 for (int i = 0; i < ARRAY_SIZE(esf->fpu.s); i += 4) {
31 EXCEPTION_DUMP("s[%2d]: 0x%08x s[%2d]: 0x%08x"
32 " s[%2d]: 0x%08x s[%2d]: 0x%08x",
33 i, (uint32_t)esf->fpu.s[i],
34 i + 1, (uint32_t)esf->fpu.s[i + 1],
35 i + 2, (uint32_t)esf->fpu.s[i + 2],
36 i + 3, (uint32_t)esf->fpu.s[i + 3]);
37 }
38 #ifdef CONFIG_VFP_FEATURE_REGS_S64_D32
39 for (int i = 0; i < ARRAY_SIZE(esf->fpu.d); i += 4) {
40 EXCEPTION_DUMP("d[%2d]: 0x%16llx d[%2d]: 0x%16llx"
41 " d[%2d]: 0x%16llx d[%2d]: 0x%16llx",
42 i, (uint64_t)esf->fpu.d[i],
43 i + 1, (uint64_t)esf->fpu.d[i + 1],
44 i + 2, (uint64_t)esf->fpu.d[i + 2],
45 i + 3, (uint64_t)esf->fpu.d[i + 3]);
46 }
47 #endif
48 EXCEPTION_DUMP("fpscr: 0x%08x", esf->fpu.fpscr);
49 #endif
50 #if defined(CONFIG_EXTRA_EXCEPTION_INFO)
51 const struct _callee_saved *callee = esf->extra_info.callee;
52
53 if (callee != NULL) {
54 EXCEPTION_DUMP("r4/v1: 0x%08x r5/v2: 0x%08x r6/v3: 0x%08x",
55 callee->v1, callee->v2, callee->v3);
56 EXCEPTION_DUMP("r7/v4: 0x%08x r8/v5: 0x%08x r9/v6: 0x%08x",
57 callee->v4, callee->v5, callee->v6);
58 EXCEPTION_DUMP("r10/v7: 0x%08x r11/v8: 0x%08x psp: 0x%08x",
59 callee->v7, callee->v8, callee->psp);
60 }
61
62 EXCEPTION_DUMP("EXC_RETURN: 0x%0x", esf->extra_info.exc_return);
63
64 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
65 EXCEPTION_DUMP("Faulting instruction address (r15/pc): 0x%08x",
66 esf->basic.pc);
67 }
68 #endif /* CONFIG_EXCEPTION_DEBUG */
69
z_arm_fatal_error(unsigned int reason,const struct arch_esf * esf)70 void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf)
71 {
72 #ifdef CONFIG_EXCEPTION_DEBUG
73 if (esf != NULL) {
74 esf_dump(esf);
75 }
76 #endif /* CONFIG_EXCEPTION_DEBUG */
77
78 /* LOG the IRQn that was unhandled */
79 #if defined(CONFIG_CPU_CORTEX_M)
80 if (reason == K_ERR_SPURIOUS_IRQ) {
81 uint32_t irqn = __get_IPSR() - 16;
82
83 EXCEPTION_DUMP("Unhandled IRQn: %d", irqn);
84 }
85 #endif
86
87 z_fatal_error(reason, esf);
88 }
89
90 /**
91 * @brief Handle a software-generated fatal exception
92 * (e.g. kernel oops, panic, etc.).
93 *
94 * Notes:
95 * - the function is invoked in SVC Handler
96 * - if triggered from nPRIV mode, only oops and stack fail error reasons
97 * may be propagated to the fault handling process.
98 * - We expect the supplied exception stack frame to always be a valid
99 * frame. That is because, if the ESF cannot be stacked during an SVC,
100 * a processor fault (e.g. stacking error) will be generated, and the
101 * fault handler will executed instead of the SVC.
102 *
103 * @param esf exception frame
104 * @param callee_regs Callee-saved registers (R4-R11)
105 * @param exc_return EXC_RETURN value present in LR after exception entry.
106 */
z_do_kernel_oops(const struct arch_esf * esf,_callee_saved_t * callee_regs,uint32_t exc_return)107 void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs, uint32_t exc_return)
108 {
109 #if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
110 ARG_UNUSED(callee_regs);
111 #endif
112 /* Stacked R0 holds the exception reason. */
113 unsigned int reason = esf->basic.r0;
114
115 z_arm_set_fault_sp(esf, exc_return);
116
117 #if defined(CONFIG_USERSPACE)
118 if (z_arm_preempted_thread_in_user_mode(esf)) {
119 /*
120 * Exception triggered from user mode.
121 *
122 * User mode is only allowed to induce oopses and stack check
123 * failures via software-triggered system fatal exceptions.
124 */
125 if (!((esf->basic.r0 == K_ERR_KERNEL_OOPS) ||
126 (esf->basic.r0 == K_ERR_STACK_CHK_FAIL))) {
127
128 reason = K_ERR_KERNEL_OOPS;
129 }
130 }
131
132 #endif /* CONFIG_USERSPACE */
133
134 #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
135 z_arm_fatal_error(reason, esf);
136 #else
137 struct arch_esf esf_copy;
138
139 memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
140 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
141 /* extra exception info is collected in callee_reg param
142 * on CONFIG_ARMV7_M_ARMV8_M_MAINLINE
143 */
144
145 esf_copy.extra_info = (struct __extra_esf_info) {
146 .callee = callee_regs,
147 };
148 #else
149 /* extra exception info is not collected for kernel oops
150 * path today so we make a copy of the ESF and zero out
151 * that information
152 */
153 esf_copy.extra_info = (struct __extra_esf_info) { 0 };
154 #endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
155
156 z_arm_fatal_error(reason, &esf_copy);
157 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
158 }
159
arch_syscall_oops(void * ssf_ptr)160 FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
161 {
162 uint32_t *ssf_contents = ssf_ptr;
163 struct arch_esf oops_esf = { 0 };
164
165 /* TODO: Copy the rest of the register set out of ssf_ptr */
166 oops_esf.basic.pc = ssf_contents[3];
167
168 z_arm_fatal_error(K_ERR_KERNEL_OOPS, &oops_esf);
169 CODE_UNREACHABLE;
170 }
171