1 /*
2 * Arm SCP/MCP Software
3 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 * Description:
8 * ARM-M exception handlers.
9 */
10
11 #include "arch_exceptions.h"
12
13 #include <fwk_attributes.h>
14 #include <fwk_log.h>
15
16 #include <fmw_cmsis.h>
17
18 #include <inttypes.h>
19 #include <stdbool.h>
20 #include <stdint.h>
21 #include <string.h>
22
23 #define ICSR SCB->ICSR
24
25 #define HFSR SCB->HFSR
26 #define HARD_FAULT_FORCED() \
27 ((HFSR & SCB_HFSR_FORCED_Msk) == SCB_HFSR_FORCED_Msk)
28
29 #define MMFSR (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk)
30 #define MMFAR (SCB->MMFAR)
31 #define MMFAR_VALID() \
32 ((MMFSR & SCB_CFSR_MMARVALID_Msk) == SCB_CFSR_MMARVALID_Msk)
33
34 #define BFSR (SCB->CFSR & SCB_CFSR_BUSFAULTSR_Msk)
35 #define BFAR SCB->BFAR
36 #define BFAR_VALID() ((BFSR & SCB_CFSR_BFARVALID_Msk) == SCB_CFSR_BFARVALID_Msk)
37
38 #define UFSR (SCB->CFSR & SCB_CFSR_USGFAULTSR_Msk)
39
40 #define SFSR SCB->SFSR
41 #define SFAR SCB->SFAR
42 #define SFAR_VALID() ((SFSR & SAU_SFSR_SFARVALID_Msk) == SAU_SFSR_SFARVALID_Msk)
43
44 #define HARD_FAULT 0x3U
45 #define MEMMANAGE_FAULT 0x4U
46 #define BUS_FAULT 0x5U
47 #define USAGE_FAULT 0x6U
48 #define SECURE_FAULT 0x7U
49
50 struct FWK_PACKED stacked_context {
51 uint32_t r0;
52 uint32_t r1;
53 uint32_t r2;
54 uint32_t r3;
55 uint32_t r12;
56 uint32_t r14;
57 uint32_t PC;
58 uint32_t xPSR;
59 };
60
61 #if (FWK_LOG_LEVEL <= FWK_LOG_LEVEL_ERROR) && !defined(FWK_LOG_BUFFERED)
62 static const char *const mmanage_fault_errors[] = {
63 "Instruction access violation",
64 "Data access violation",
65 "Reserved",
66 "MemManage fault on unstacking for a return from exception",
67 "MemManage fault on stacking for exception entry",
68 "MemManage fault occurred during floating-point lazy state preservation",
69 "Reserved",
70 };
71
72 static const char *const bus_fault_errors[] = {
73 "Instruction bus error",
74 "Precise data bus error",
75 "Imprecise data bus error",
76 "BusFault on unstacking for a return from exception",
77 "BusFault on stacking for exception entry",
78 "BusFault occurred during floating-point lazy state preservation",
79 "Reserved",
80 };
81
82 static const char *const usage_fault_errors[] = {
83 "Undefined instruction",
84 "Invalid state",
85 "Invalid PC load UsageFault, caused by an invalid PC load by EXC_RETURN",
86 "The processor has attempted to access a coprocessor",
87 "Reserved",
88 "Reserved",
89 "Reserved",
90 "Reserved",
91 "The processor has made an unaligned memory access",
92 "Divide by zero",
93 "Reserved",
94 "Reserved",
95 "Reserved",
96 "Reserved",
97 "Reserved",
98 "Reserved",
99 };
100
101 # if defined(ARMV8M) && (__ARM_FEATURE_CMSE == 3U)
102 static const char *const secure_fault_errors[] = {
103 "Invalid entry point",
104 "Invalid integrity signature",
105 "Invalid exception return",
106 "Attribution unit violation",
107 "Invalid transition",
108 "Lazy state preservation error flag",
109 "", /* FAR is reported separately */
110 "Lazy state error flag",
111 };
112 # endif /* ARMV8M && __ARM_FEATURE_CMSE == 3U */
113 #endif /* (FWK_LOG_LEVEL <= FWK_LOG_LEVEL_ERROR) && !defined(FWK_LOG_BUFFERED) \
114 */
115
116 #ifdef __NEWLIB__
117 /*
118 * This function overloads a weak definition provided by Newlib. It is called
119 * during initialization of the C runtime just after .bss has been zeroed.
120 */
software_init_hook(void)121 void software_init_hook(void)
122 {
123 extern char __data_load__;
124 extern char __data_start__;
125 extern char __data_end__;
126
127 char *load = &__data_load__;
128 char *start = &__data_start__;
129 char *end = &__data_end__;
130
131 if (load != start) {
132 (void)memcpy(start, load, (size_t)(end - start));
133 }
134 }
135 #endif
136
137 #ifdef __ARMCC_VERSION
138 extern char Image$$ARM_LIB_STACKHEAP$$ZI$$Limit;
139
140 # define arch_exception_stack (&Image$$ARM_LIB_STACKHEAP$$ZI$$Limit)
141 #else
142 extern char __stackheap_end__;
143
144 # define arch_exception_stack (&__stackheap_end__)
145 #endif
146
147 #if (FWK_LOG_LEVEL <= FWK_LOG_LEVEL_ERROR) && !defined(FWK_LOG_BUFFERED)
handle_hard_fault(struct stacked_context * context)148 static void handle_hard_fault(struct stacked_context *context)
149 {
150 FWK_LOG_ERR("HARD FAULT occured at 0x%" PRIX32, context->PC);
151 if (HARD_FAULT_FORCED()) {
152 FWK_LOG_ERR("Forced HARD FAULT");
153 }
154 }
155
handle_memmanage_fault(struct stacked_context * context)156 static void handle_memmanage_fault(struct stacked_context *context)
157 {
158 size_t number_of_faults;
159 uint8_t idx;
160
161 number_of_faults = FWK_ARRAY_SIZE(mmanage_fault_errors);
162
163 FWK_LOG_ERR("MEMMANAGE FAULT occured at 0x%" PRIX32, context->PC);
164 for (idx = 0; idx < number_of_faults; idx++) {
165 if (((MMFSR >> (idx + SCB_CFSR_MEMFAULTSR_Pos)) & 1U) == 1U) {
166 FWK_LOG_ERR("%s", mmanage_fault_errors[idx]);
167 }
168 }
169
170 if (MMFAR_VALID()) {
171 FWK_LOG_ERR("MMFAR: 0x%" PRIX32, MMFAR);
172 }
173 }
174
handle_bus_fault(struct stacked_context * context)175 static void handle_bus_fault(struct stacked_context *context)
176 {
177 size_t number_of_faults;
178 uint8_t idx;
179
180 number_of_faults = FWK_ARRAY_SIZE(bus_fault_errors);
181
182 FWK_LOG_ERR("BUS FAULT occured at 0x%" PRIX32, context->PC);
183 for (idx = 0; idx < number_of_faults; idx++) {
184 if (((BFSR >> (idx + SCB_CFSR_BUSFAULTSR_Pos)) & 1U) == 1U) {
185 FWK_LOG_ERR("%s", bus_fault_errors[idx]);
186 }
187 }
188
189 if (BFAR_VALID()) {
190 FWK_LOG_ERR("BFAR: 0x%" PRIX32, BFAR);
191 }
192 }
193
handle_usage_fault(struct stacked_context * context)194 static void handle_usage_fault(struct stacked_context *context)
195 {
196 size_t number_of_faults;
197 uint8_t idx;
198
199 number_of_faults = FWK_ARRAY_SIZE(usage_fault_errors);
200
201 FWK_LOG_ERR("USAGE FAULT occured at 0x%" PRIX32, context->PC);
202 for (idx = 0; idx < number_of_faults; idx++) {
203 if (((UFSR >> (idx + SCB_CFSR_USGFAULTSR_Pos)) & 1U) == 1U) {
204 FWK_LOG_ERR("%s", usage_fault_errors[idx]);
205 }
206 }
207 }
208
209 # if defined(ARMV8M) && (__ARM_FEATURE_CMSE == 3U)
handle_secure_fault(struct stacked_context * context)210 static void handle_secure_fault(struct stacked_context *context)
211 {
212 size_t number_of_faults;
213 uint8_t idx;
214
215 number_of_faults = FWK_ARRAY_SIZE(secure_fault_errors);
216
217 FWK_LOG_ERR("SECURE FAULT occured at 0x%" PRIX32, context->PC);
218 for (idx = 0; idx < number_of_faults; idx++) {
219 if (idx == SAU_SFSR_SFARVALID_Pos) {
220 continue;
221 }
222 if (((SFSR >> idx) & 1U) == 1U) {
223 FWK_LOG_ERR("%s", secure_fault_errors[idx]);
224 }
225 }
226
227 if (SFAR_VALID) {
228 FWK_LOG_ERR("SFAR: 0x%" PRIX32, SFAR);
229 }
230 }
231 # endif /* ARMV8M && __ARM_FEATURE_CMSE == 3U */
232
handle_exception(struct stacked_context * context)233 static void handle_exception(struct stacked_context *context)
234 {
235 uint32_t exception_number;
236
237 exception_number = ICSR & SCB_ICSR_VECTACTIVE_Msk;
238 switch (exception_number) {
239 case HARD_FAULT:
240 handle_hard_fault(context);
241 break;
242 case MEMMANAGE_FAULT:
243 handle_memmanage_fault(context);
244 break;
245 case BUS_FAULT:
246 handle_bus_fault(context);
247 break;
248 case USAGE_FAULT:
249 handle_usage_fault(context);
250 break;
251 # if defined(ARMV8M) && (__ARM_FEATURE_CMSE == 3U)
252 case SECURE_FAULT:
253 handle_secure_fault(context);
254 break;
255 # endif /* ARMV8M && __ARM_FEATURE_CMSE == 3U */
256 default:
257 FWK_LOG_ERR(
258 "Unhandled Fault: (%" PRIX32 ") occured at 0x%08" PRIX32,
259 exception_number,
260 context->PC);
261 break;
262 };
263
264 FWK_LOG_ERR(
265 "r0:\t0x%08" PRIX32 " r1:\t0x%08" PRIX32, context->r0, context->r1);
266 FWK_LOG_ERR(
267 "r2:\t0x%08" PRIX32 " r3:\t0x%08" PRIX32, context->r2, context->r3);
268 FWK_LOG_ERR(
269 "r12:\t0x%08" PRIX32 " r14:\t0x%08" PRIX32, context->r12, context->r14);
270 FWK_LOG_ERR("PC:\t0x%08" PRIX32, context->PC);
271 FWK_LOG_ERR("xPSR:\t0x%08" PRIX32, context->xPSR);
272 }
273
handle_arch_exception(void)274 __attribute__((naked)) noreturn void handle_arch_exception(void)
275 {
276 /* It is recommended not to have C code in naked function. */
277 __asm(
278 /* Save SP to print exception information. */
279 "mov r0, sp \n\t"
280 /* Save LR to the stack. */
281 "push {lr}\n\t"
282 /* Print exception information. */
283 "bl %0 \n\t"
284 /*
285 * Restore LR. Some platforms checks LR to select between MSP and PSP.
286 */
287 "pop {lr}\n\t"
288 /* Jump to the platform's exception handler. */
289 "b %1"
290 :
291 : "i"(handle_exception), "i"(arch_exception_invalid));
292 }
293 #else /* (FWK_LOG_LEVEL <= FWK_LOG_LEVEL_ERROR) && !defined(FWK_LOG_BUFFERED) \
294 */
295
handle_arch_exception(void)296 __attribute__((naked)) noreturn void handle_arch_exception(void)
297 {
298 __asm("b %0" : : "i"(arch_exception_invalid));
299 }
300
301 #endif /* (FWK_LOG_LEVEL <= FWK_LOG_LEVEL_ERROR) && !defined(FWK_LOG_BUFFERED) \
302 */
303
304 /*
305 * Set up the exception table. The structure below is added to the
306 * .exceptions section which will be explicitly placed at the beginning of the
307 * binary by the linker script.
308 */
309 const struct {
310 uintptr_t stack;
311 uintptr_t exceptions[NVIC_USER_IRQ_OFFSET - 1];
312 } arch_exceptions FWK_SECTION(".exceptions") = {
313 .stack = (uintptr_t)(arch_exception_stack),
314 .exceptions = {
315 [NVIC_USER_IRQ_OFFSET + Reset_IRQn - 1] =
316 (uintptr_t)(arch_exception_reset),
317 [NonMaskableInt_IRQn + (NVIC_USER_IRQ_OFFSET - 1)] =
318 (uintptr_t)(arch_exception_invalid),
319 [NVIC_USER_IRQ_OFFSET + HardFault_IRQn - 1] =
320 (uintptr_t)(handle_arch_exception),
321 [NVIC_USER_IRQ_OFFSET + MemoryManagement_IRQn - 1] =
322 (uintptr_t)(handle_arch_exception),
323 [NVIC_USER_IRQ_OFFSET + BusFault_IRQn - 1] =
324 (uintptr_t)(handle_arch_exception),
325 [NVIC_USER_IRQ_OFFSET + UsageFault_IRQn - 1] =
326 (uintptr_t)(handle_arch_exception),
327 #ifdef ARMV8M
328 [NVIC_USER_IRQ_OFFSET + SecureFault_IRQn - 1] =
329 (uintptr_t)(handle_arch_exception),
330 #endif /* ARMV8M */
331 [NVIC_USER_IRQ_OFFSET + DebugMonitor_IRQn - 1] =
332 (uintptr_t)(arch_exception_invalid),
333
334 [NVIC_USER_IRQ_OFFSET + SVCall_IRQn - 1] =
335 (uintptr_t)(arch_exception_invalid),
336 [NVIC_USER_IRQ_OFFSET + PendSV_IRQn - 1] =
337 (uintptr_t)(arch_exception_invalid),
338 [NVIC_USER_IRQ_OFFSET + SysTick_IRQn - 1] =
339 (uintptr_t)(arch_exception_invalid),
340 },
341 };
342