1 /*
2  * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
3  * Copyright (c) 2018 Lexmark International, Inc.
4  * Copyright 2023 NXP
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <zephyr/kernel.h>
10 #include <zephyr/arch/exception.h>
11 #include <kernel_internal.h>
12 #include <zephyr/arch/common/exc_handle.h>
13 #include <zephyr/logging/log.h>
14 #if defined(CONFIG_GDBSTUB)
15 #include <zephyr/arch/arm/gdbstub.h>
16 #include <zephyr/debug/gdbstub.h>
17 #endif
18 
19 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
20 
21 #define FAULT_DUMP_VERBOSE	(CONFIG_FAULT_DUMP == 2)
22 
23 #if FAULT_DUMP_VERBOSE
get_dbgdscr_moe_string(uint32_t moe)24 static const char *get_dbgdscr_moe_string(uint32_t moe)
25 {
26 	switch (moe) {
27 	case DBGDSCR_MOE_HALT_REQUEST:
28 		return "Halt Request";
29 	case DBGDSCR_MOE_BREAKPOINT:
30 		return "Breakpoint";
31 	case DBGDSCR_MOE_ASYNC_WATCHPOINT:
32 		return "Asynchronous Watchpoint";
33 	case DBGDSCR_MOE_BKPT_INSTRUCTION:
34 		return "BKPT Instruction";
35 	case DBGDSCR_MOE_EXT_DEBUG_REQUEST:
36 		return "External Debug Request";
37 	case DBGDSCR_MOE_VECTOR_CATCH:
38 		return "Vector Catch";
39 	case DBGDSCR_MOE_OS_UNLOCK_CATCH:
40 		return "OS Unlock Catch";
41 	case DBGDSCR_MOE_SYNC_WATCHPOINT:
42 		return "Synchronous Watchpoint";
43 	default:
44 		return "Unknown";
45 	}
46 }
47 
dump_debug_event(void)48 static void dump_debug_event(void)
49 {
50 	/* Read and parse debug mode of entry */
51 	uint32_t dbgdscr = __get_DBGDSCR();
52 	uint32_t moe = (dbgdscr & DBGDSCR_MOE_Msk) >> DBGDSCR_MOE_Pos;
53 
54 	/* Print debug event information */
55 	EXCEPTION_DUMP("Debug Event (%s)", get_dbgdscr_moe_string(moe));
56 }
57 
dump_fault(uint32_t status,uint32_t addr)58 static uint32_t dump_fault(uint32_t status, uint32_t addr)
59 {
60 	uint32_t reason = K_ERR_CPU_EXCEPTION;
61 	/*
62 	 * Dump fault status and, if applicable, status-specific information.
63 	 * Note that the fault address is only displayed for the synchronous
64 	 * faults because it is unpredictable for asynchronous faults.
65 	 */
66 	switch (status) {
67 	case FSR_FS_ALIGNMENT_FAULT:
68 		reason = K_ERR_ARM_ALIGNMENT_FAULT;
69 		EXCEPTION_DUMP("Alignment Fault @ 0x%08x", addr);
70 		break;
71 	case FSR_FS_PERMISSION_FAULT:
72 		reason = K_ERR_ARM_PERMISSION_FAULT;
73 		EXCEPTION_DUMP("Permission Fault @ 0x%08x", addr);
74 		break;
75 	case FSR_FS_SYNC_EXTERNAL_ABORT:
76 		reason = K_ERR_ARM_SYNC_EXTERNAL_ABORT;
77 		EXCEPTION_DUMP("Synchronous External Abort @ 0x%08x", addr);
78 		break;
79 	case FSR_FS_ASYNC_EXTERNAL_ABORT:
80 		reason = K_ERR_ARM_ASYNC_EXTERNAL_ABORT;
81 		EXCEPTION_DUMP("Asynchronous External Abort");
82 		break;
83 	case FSR_FS_SYNC_PARITY_ERROR:
84 		reason = K_ERR_ARM_SYNC_PARITY_ERROR;
85 		EXCEPTION_DUMP("Synchronous Parity/ECC Error @ 0x%08x", addr);
86 		break;
87 	case FSR_FS_ASYNC_PARITY_ERROR:
88 		reason = K_ERR_ARM_ASYNC_PARITY_ERROR;
89 		EXCEPTION_DUMP("Asynchronous Parity/ECC Error");
90 		break;
91 	case FSR_FS_DEBUG_EVENT:
92 		reason = K_ERR_ARM_DEBUG_EVENT;
93 		dump_debug_event();
94 		break;
95 #if defined(CONFIG_AARCH32_ARMV8_R)
96 	case FSR_FS_TRANSLATION_FAULT:
97 		reason = K_ERR_ARM_TRANSLATION_FAULT;
98 		EXCEPTION_DUMP("Translation Fault @ 0x%08x", addr);
99 		break;
100 	case FSR_FS_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT:
101 		reason = K_ERR_ARM_UNSUPPORTED_EXCLUSIVE_ACCESS_FAULT;
102 		EXCEPTION_DUMP("Unsupported Exclusive Access Fault @ 0x%08x", addr);
103 		break;
104 #elif defined(CONFIG_ARMV7_A)
105 	case FSR_FS_PERMISSION_FAULT_2ND_LEVEL:
106 		reason = K_ERR_ARM_PERMISSION_FAULT_2ND_LEVEL;
107 		EXCEPTION_DUMP("2nd Level Permission Fault @ 0x%08x", addr);
108 		break;
109 	case FSR_FS_ACCESS_FLAG_FAULT_1ST_LEVEL:
110 		reason = K_ERR_ARM_ACCESS_FLAG_FAULT_1ST_LEVEL;
111 		EXCEPTION_DUMP("1st Level Access Flag Fault @ 0x%08x", addr);
112 		break;
113 	case FSR_FS_ACCESS_FLAG_FAULT_2ND_LEVEL:
114 		reason = K_ERR_ARM_ACCESS_FLAG_FAULT_2ND_LEVEL;
115 		EXCEPTION_DUMP("2nd Level Access Flag Fault @ 0x%08x", addr);
116 		break;
117 	case FSR_FS_CACHE_MAINTENANCE_INSTRUCTION_FAULT:
118 		reason = K_ERR_ARM_CACHE_MAINTENANCE_INSTRUCTION_FAULT;
119 		EXCEPTION_DUMP("Cache Maintenance Instruction Fault @ 0x%08x", addr);
120 		break;
121 	case FSR_FS_TRANSLATION_FAULT:
122 		reason = K_ERR_ARM_TRANSLATION_FAULT;
123 		EXCEPTION_DUMP("1st Level Translation Fault @ 0x%08x", addr);
124 		break;
125 	case FSR_FS_TRANSLATION_FAULT_2ND_LEVEL:
126 		reason = K_ERR_ARM_TRANSLATION_FAULT_2ND_LEVEL;
127 		EXCEPTION_DUMP("2nd Level Translation Fault @ 0x%08x", addr);
128 		break;
129 	case FSR_FS_DOMAIN_FAULT_1ST_LEVEL:
130 		reason = K_ERR_ARM_DOMAIN_FAULT_1ST_LEVEL;
131 		EXCEPTION_DUMP("1st Level Domain Fault @ 0x%08x", addr);
132 		break;
133 	case FSR_FS_DOMAIN_FAULT_2ND_LEVEL:
134 		reason = K_ERR_ARM_DOMAIN_FAULT_2ND_LEVEL;
135 		EXCEPTION_DUMP("2nd Level Domain Fault @ 0x%08x", addr);
136 		break;
137 	case FSR_FS_SYNC_EXTERNAL_ABORT_TRANSLATION_TABLE_1ST_LEVEL:
138 		reason = K_ERR_ARM_SYNC_EXTERNAL_ABORT_TRANSLATION_TABLE_1ST_LEVEL;
139 		EXCEPTION_DUMP("1st Level Synchronous External Abort Translation Table @ 0x%08x",
140 				addr);
141 		break;
142 	case FSR_FS_SYNC_EXTERNAL_ABORT_TRANSLATION_TABLE_2ND_LEVEL:
143 		reason = K_ERR_ARM_SYNC_EXTERNAL_ABORT_TRANSLATION_TABLE_2ND_LEVEL;
144 		EXCEPTION_DUMP("2nd Level Synchronous External Abort Translation Table @ 0x%08x",
145 				addr);
146 		break;
147 	case FSR_FS_TLB_CONFLICT_ABORT:
148 		reason = K_ERR_ARM_TLB_CONFLICT_ABORT;
149 		EXCEPTION_DUMP("TLB Conflict Abort @ 0x%08x", addr);
150 		break;
151 	case FSR_FS_SYNC_PARITY_ERROR_TRANSLATION_TABLE_1ST_LEVEL:
152 		reason = K_ERR_ARM_SYNC_PARITY_ERROR_TRANSLATION_TABLE_1ST_LEVEL;
153 		EXCEPTION_DUMP("1st Level Synchronous Parity Error Translation Table @ 0x%08x",
154 				addr);
155 		break;
156 	case FSR_FS_SYNC_PARITY_ERROR_TRANSLATION_TABLE_2ND_LEVEL:
157 		reason = K_ERR_ARM_SYNC_PARITY_ERROR_TRANSLATION_TABLE_2ND_LEVEL;
158 		EXCEPTION_DUMP("2nd Level Synchronous Parity Error Translation Table @ 0x%08x",
159 				addr);
160 		break;
161 #else
162 	case FSR_FS_BACKGROUND_FAULT:
163 		reason = K_ERR_ARM_BACKGROUND_FAULT;
164 		EXCEPTION_DUMP("Background Fault @ 0x%08x", addr);
165 		break;
166 #endif
167 	default:
168 		EXCEPTION_DUMP("Unknown (%u)", status);
169 	}
170 	return reason;
171 }
172 #endif
173 
174 #if defined(CONFIG_FPU_SHARING)
175 
z_arm_fpu_caller_save(struct __fpu_sf * fpu)176 static ALWAYS_INLINE void z_arm_fpu_caller_save(struct __fpu_sf *fpu)
177 {
178 	__asm__ volatile (
179 		"vstmia %0, {s0-s15};\n"
180 		: : "r" (&fpu->s[0])
181 		: "memory"
182 		);
183 #if CONFIG_VFP_FEATURE_REGS_S64_D32
184 	__asm__ volatile (
185 		"vstmia %0, {d16-d31};\n\t"
186 		:
187 		: "r" (&fpu->d[0])
188 		: "memory"
189 		);
190 #endif
191 }
192 
193 /**
194  * @brief FPU undefined instruction fault handler
195  *
196  * @return Returns true if the FPU is already enabled
197  *           implying a true undefined instruction
198  *         Returns false if the FPU was disabled
199  */
z_arm_fault_undef_instruction_fp(void)200 bool z_arm_fault_undef_instruction_fp(void)
201 {
202 	/*
203 	 * Assume this is a floating point instruction that faulted because
204 	 * the FP unit was disabled.  Enable the FP unit and try again.  If
205 	 * the FP was already enabled then this was an actual undefined
206 	 * instruction.
207 	 */
208 	if (__get_FPEXC() & FPEXC_EN) {
209 		return true;
210 	}
211 
212 	__set_FPEXC(FPEXC_EN);
213 
214 	if (_current_cpu->nested > 1) {
215 		/*
216 		 * If the nested count is greater than 1, the undefined
217 		 * instruction exception came from an irq/svc context.  (The
218 		 * irq/svc handler would have the nested count at 1 and then
219 		 * the undef exception would increment it to 2).
220 		 */
221 		struct __fpu_sf *spill_esf =
222 			(struct __fpu_sf *)_current_cpu->fp_ctx;
223 
224 		if (spill_esf == NULL) {
225 			return false;
226 		}
227 
228 		_current_cpu->fp_ctx = NULL;
229 
230 		/*
231 		 * If the nested count is 2 and the current thread has used the
232 		 * VFP (whether or not it was actually using the VFP before the
233 		 * current exception) OR if the nested count is greater than 2
234 		 * and the VFP was enabled on the irq/svc entrance for the
235 		 * saved exception stack frame, then save the floating point
236 		 * context because it is about to be overwritten.
237 		 */
238 		if (((_current_cpu->nested == 2)
239 				&& (_current->base.user_options & K_FP_REGS))
240 			|| ((_current_cpu->nested > 2)
241 				&& (spill_esf->undefined & FPEXC_EN))) {
242 			/*
243 			 * Spill VFP registers to specified exception stack
244 			 * frame
245 			 */
246 			spill_esf->undefined |= FPEXC_EN;
247 			spill_esf->fpscr = __get_FPSCR();
248 			z_arm_fpu_caller_save(spill_esf);
249 		}
250 	} else {
251 		/*
252 		 * If the nested count is one, a thread was the faulting
253 		 * context.  Just flag that this thread uses the VFP.  This
254 		 * means that a thread that uses the VFP does not have to,
255 		 * but should, set K_FP_REGS on thread creation.
256 		 */
257 		_current->base.user_options |= K_FP_REGS;
258 	}
259 
260 	return false;
261 }
262 #endif
263 
264 /**
265  * @brief Undefined instruction fault handler
266  *
267  * @return Returns true if the fault is fatal
268  */
z_arm_fault_undef_instruction(struct arch_esf * esf)269 bool z_arm_fault_undef_instruction(struct arch_esf *esf)
270 {
271 #if defined(CONFIG_FPU_SHARING)
272 	/*
273 	 * This is a true undefined instruction and we will be crashing
274 	 * so save away the VFP registers.
275 	 */
276 	esf->fpu.undefined = __get_FPEXC();
277 	esf->fpu.fpscr = __get_FPSCR();
278 	z_arm_fpu_caller_save(&esf->fpu);
279 #endif
280 
281 #if defined(CONFIG_GDBSTUB)
282 	z_gdb_entry(esf, GDB_EXCEPTION_INVALID_INSTRUCTION);
283 	/* Might not be fatal if GDB stub placed it in the code. */
284 	return false;
285 #endif
286 
287 	/* Print fault information */
288 	EXCEPTION_DUMP("***** UNDEFINED INSTRUCTION ABORT *****");
289 
290 	uint32_t reason = IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) ?
291 			  K_ERR_CPU_EXCEPTION :
292 			  K_ERR_ARM_UNDEFINED_INSTRUCTION;
293 
294 	/* Invoke kernel fatal exception handler */
295 	z_arm_fatal_error(reason, esf);
296 
297 	/* All undefined instructions are treated as fatal for now */
298 	return true;
299 }
300 
301 /**
302  * @brief Prefetch abort fault handler
303  *
304  * @return Returns true if the fault is fatal
305  */
z_arm_fault_prefetch(struct arch_esf * esf)306 bool z_arm_fault_prefetch(struct arch_esf *esf)
307 {
308 	uint32_t reason = K_ERR_CPU_EXCEPTION;
309 
310 	/* Read and parse Instruction Fault Status Register (IFSR) */
311 	uint32_t ifsr = __get_IFSR();
312 #if defined(CONFIG_AARCH32_ARMV8_R)
313 	uint32_t fs = ifsr & IFSR_STATUS_Msk;
314 #else
315 	uint32_t fs = ((ifsr & IFSR_FS1_Msk) >> 6) | (ifsr & IFSR_FS0_Msk);
316 #endif
317 
318 	/* Read Instruction Fault Address Register (IFAR) */
319 	uint32_t ifar = __get_IFAR();
320 
321 #if defined(CONFIG_GDBSTUB)
322 	/* The BKPT instruction could have caused a software breakpoint */
323 	if (fs == IFSR_DEBUG_EVENT) {
324 		/* Debug event, call the gdbstub handler */
325 		z_gdb_entry(esf, GDB_EXCEPTION_BREAKPOINT);
326 	} else {
327 		/* Fatal */
328 		z_gdb_entry(esf, GDB_EXCEPTION_MEMORY_FAULT);
329 	}
330 	return false;
331 #endif
332 	/* Print fault information*/
333 	EXCEPTION_DUMP("***** PREFETCH ABORT *****");
334 	if (FAULT_DUMP_VERBOSE) {
335 		reason = dump_fault(fs, ifar);
336 	}
337 
338 	/* Simplify exception codes if requested */
339 	if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
340 		reason = K_ERR_CPU_EXCEPTION;
341 	}
342 
343 	/* Invoke kernel fatal exception handler */
344 	z_arm_fatal_error(reason, esf);
345 
346 	/* All prefetch aborts are treated as fatal for now */
347 	return true;
348 }
349 
350 #ifdef CONFIG_USERSPACE
351 Z_EXC_DECLARE(z_arm_user_string_nlen);
352 
353 static const struct z_exc_handle exceptions[] = {
354 	Z_EXC_HANDLE(z_arm_user_string_nlen)
355 };
356 
357 /* Perform an assessment whether an MPU fault shall be
358  * treated as recoverable.
359  *
360  * @return true if error is recoverable, otherwise return false.
361  */
memory_fault_recoverable(struct arch_esf * esf)362 static bool memory_fault_recoverable(struct arch_esf *esf)
363 {
364 	for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
365 		/* Mask out instruction mode */
366 		uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
367 		uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
368 
369 		if (esf->basic.pc >= start && esf->basic.pc < end) {
370 			esf->basic.pc = (uint32_t)(exceptions[i].fixup);
371 			return true;
372 		}
373 	}
374 
375 	return false;
376 }
377 #endif
378 
379 /**
380  * @brief Data abort fault handler
381  *
382  * @return Returns true if the fault is fatal
383  */
z_arm_fault_data(struct arch_esf * esf)384 bool z_arm_fault_data(struct arch_esf *esf)
385 {
386 	uint32_t reason = K_ERR_CPU_EXCEPTION;
387 
388 	/* Read and parse Data Fault Status Register (DFSR) */
389 	uint32_t dfsr = __get_DFSR();
390 #if defined(CONFIG_AARCH32_ARMV8_R)
391 	uint32_t fs = dfsr & DFSR_STATUS_Msk;
392 #else
393 	uint32_t fs = ((dfsr & DFSR_FS1_Msk) >> 6) | (dfsr & DFSR_FS0_Msk);
394 #endif
395 
396 	/* Read Data Fault Address Register (DFAR) */
397 	uint32_t dfar = __get_DFAR();
398 
399 #if defined(CONFIG_GDBSTUB)
400 	z_gdb_entry(esf, GDB_EXCEPTION_MEMORY_FAULT);
401 	/* return false - non-fatal error */
402 	return false;
403 #endif
404 
405 #if defined(CONFIG_USERSPACE)
406 	if ((fs == COND_CODE_1(CONFIG_AARCH32_ARMV8_R,
407 				(FSR_FS_TRANSLATION_FAULT),
408 				(FSR_FS_BACKGROUND_FAULT)))
409 			|| (fs == FSR_FS_PERMISSION_FAULT)) {
410 		if (memory_fault_recoverable(esf)) {
411 			return false;
412 		}
413 	}
414 #endif
415 
416 	/* Print fault information*/
417 	EXCEPTION_DUMP("***** DATA ABORT *****");
418 	if (FAULT_DUMP_VERBOSE) {
419 		reason = dump_fault(fs, dfar);
420 	}
421 
422 	/* Simplify exception codes if requested */
423 	if (IS_ENABLED(CONFIG_SIMPLIFIED_EXCEPTION_CODES) && (reason >= K_ERR_ARCH_START)) {
424 		reason = K_ERR_CPU_EXCEPTION;
425 	}
426 
427 	/* Invoke kernel fatal exception handler */
428 	z_arm_fatal_error(reason, esf);
429 
430 	/* All data aborts are treated as fatal for now */
431 	return true;
432 }
433 
434 /**
435  * @brief Initialisation of fault handling
436  */
z_arm_fault_init(void)437 void z_arm_fault_init(void)
438 {
439 	/* Nothing to do for now */
440 }
441