1 /*
2  * Copyright (c) 2019 Nordic Semiconductor ASA.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/arch/cpu.h>
9 #include <cmsis_core.h>
10 #include <zephyr/sys/barrier.h>
11 
12 static volatile int test_flag;
13 static volatile int expected_reason = -1;
14 
15 /* Used to validate ESF collection during a fault */
16 static volatile int run_esf_validation;
17 static volatile int esf_validation_rv;
18 static volatile uint32_t expected_msp;
19 static K_THREAD_STACK_DEFINE(esf_collection_stack, 2048);
20 static struct k_thread esf_collection_thread;
21 #define MAIN_PRIORITY 7
22 #define PRIORITY      5
23 
24 /**
25  * Validates that pEsf matches state from set_regs_with_known_pattern()
26  */
check_esf_matches_expectations(const struct arch_esf * pEsf)27 static int check_esf_matches_expectations(const struct arch_esf *pEsf)
28 {
29 	const uint16_t expected_fault_instruction = 0xde5a; /* udf #90 */
30 	const bool caller_regs_match_expected =
31 		(pEsf->basic.r0 == 0) && (pEsf->basic.r1 == 1) && (pEsf->basic.r2 == 2) &&
32 		(pEsf->basic.r3 == 3) && (pEsf->basic.lr == 15) &&
33 		(*(uint16_t *)pEsf->basic.pc == expected_fault_instruction);
34 	if (!caller_regs_match_expected) {
35 		printk("__basic_sf member of ESF is incorrect\n");
36 		return -1;
37 	}
38 
39 #if defined(CONFIG_EXTRA_EXCEPTION_INFO)
40 	const struct _callee_saved *callee_regs = pEsf->extra_info.callee;
41 	const bool callee_regs_match_expected =
42 		(callee_regs->v1 /* r4 */ == 4) && (callee_regs->v2 /* r5 */ == 5) &&
43 		(callee_regs->v3 /* r6 */ == 6) && (callee_regs->v4 /* r7 */ == 7) &&
44 		(callee_regs->v5 /* r8 */ == 8) && (callee_regs->v6 /* r9 */ == 9) &&
45 		(callee_regs->v7 /* r10 */ == 10) && (callee_regs->v8 /* r11 */ == 11);
46 	if (!callee_regs_match_expected) {
47 		printk("_callee_saved_t member of ESF is incorrect\n");
48 		return -1;
49 	}
50 
51 	/* we expect the EXC_RETURN value to have:
52 	 *  - PREFIX: bits [31:24] = 0xFF
53 	 *  - Mode, bit [3] = 1 since exception occurred from thread mode
54 	 *  - SPSEL, bit [2] = 1 since frame should reside on PSP
55 	 */
56 	const uint32_t exc_bits_set_mask = 0xff00000C;
57 
58 	if ((pEsf->extra_info.exc_return & exc_bits_set_mask) != exc_bits_set_mask) {
59 		printk("Incorrect EXC_RETURN of 0x%08x", pEsf->extra_info.exc_return);
60 		return -1;
61 	}
62 
63 	/* the psp should match the contents of the esf copy up
64 	 * to the xpsr. (the xpsr value in the copy used for pEsf
65 	 * is overwritten in fault.c)
66 	 */
67 	if (memcmp((void *)callee_regs->psp, pEsf, offsetof(struct arch_esf, basic.xpsr)) != 0) {
68 		printk("psp does not match __basic_sf provided\n");
69 		return -1;
70 	}
71 
72 	if (pEsf->extra_info.msp != expected_msp) {
73 		printk("MSP is 0x%08x but should be 0x%08x", pEsf->extra_info.msp, expected_msp);
74 		return -1;
75 	}
76 #endif /* CONFIG_EXTRA_EXCEPTION_INFO */
77 	return 0;
78 }
79 
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)80 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
81 {
82 	TC_PRINT("Caught system error -- reason %d\n", reason);
83 
84 	if (expected_reason == -1) {
85 		printk("Was not expecting a crash\n");
86 		k_fatal_halt(reason);
87 	}
88 
89 	if (reason != expected_reason) {
90 		printk("Wrong crash type got %d expected %d\n", reason, expected_reason);
91 		k_fatal_halt(reason);
92 	}
93 
94 	if (run_esf_validation) {
95 		if (check_esf_matches_expectations(pEsf) == 0) {
96 			esf_validation_rv = TC_PASS;
97 		}
98 		run_esf_validation = 0;
99 	}
100 
101 	expected_reason = -1;
102 }
103 
104 /**
105  * Set ARM registers with a known pattern:
106  *  r0-r12 are set to 0...12, respectively
107  *  r13 (sp) is left untouched
108  *  r14 (pc) will point to the faulting instruction (udf #90)
109  *  r15 (lr) is set to 15 (since a fault takes place, we never use the value)
110  *
111  * Note: Routine was written to be ARMV6M compatible
112  *
113  * In k_sys_fatal_error_handler above we will check that the ESF provided
114  * as a parameter matches these expectations.
115  */
set_regs_with_known_pattern(void * p1,void * p2,void * p3)116 void set_regs_with_known_pattern(void *p1, void *p2, void *p3)
117 {
118 	ARG_UNUSED(p1);
119 	ARG_UNUSED(p2);
120 	ARG_UNUSED(p3);
121 
122 	__asm__ volatile("mov r1, #1\n"
123 			 "mov r2, #2\n"
124 			 "mov r3, #3\n"
125 			 "mov r4, #4\n"
126 			 "mov r5, #5\n"
127 			 "mov r6, #6\n"
128 			 "mov r7, #7\n"
129 			 "mov r0, #8\n"
130 			 "mov r8, r0\n"
131 			 "add r0, r0, #1\n"
132 			 "mov r9, r0\n"
133 			 "add r0, r0, #1\n"
134 			 "mov r10, r0\n"
135 			 "add r0, r0, #1\n"
136 			 "mov r11, r0\n"
137 			 "add r0, r0, #1\n"
138 			 "mov r12, r0\n"
139 			 "add r0, r0, #3\n"
140 			 "mov lr, r0\n"
141 			 "mov r0, #0\n"
142 			 "udf #90\n");
143 }
144 
145 /**
146  * @brief Test to verify code fault handling in ISR execution context
147  * @ingroup kernel_fatal_tests
148  */
ZTEST(arm_interrupt,test_arm_esf_collection)149 ZTEST(arm_interrupt, test_arm_esf_collection)
150 {
151 	int test_validation_rv;
152 
153 	/* if the check in the fault handler succeeds,
154 	 * this will be set to TC_PASS
155 	 */
156 	esf_validation_rv = TC_FAIL;
157 
158 	/* since the fault is from a task, the interrupt stack (msp)
159 	 * should match whatever the current value is
160 	 */
161 	expected_msp = __get_MSP();
162 
163 	run_esf_validation = 1;
164 	expected_reason = K_ERR_CPU_EXCEPTION;
165 
166 	/* Run test thread and main thread at same priority to guarantee the
167 	 * crashy thread we create below runs to completion before we get
168 	 * to the end of this function
169 	 */
170 	k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
171 
172 	TC_PRINT("Testing ESF Reporting\n");
173 	k_thread_create(&esf_collection_thread, esf_collection_stack,
174 			K_THREAD_STACK_SIZEOF(esf_collection_stack), set_regs_with_known_pattern,
175 			NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0, K_NO_WAIT);
176 
177 	test_validation_rv = esf_validation_rv;
178 
179 	zassert_not_equal(test_validation_rv, TC_FAIL, "ESF fault collection failed");
180 }
181 
arm_isr_handler(const void * args)182 void arm_isr_handler(const void *args)
183 {
184 	ARG_UNUSED(args);
185 
186 #if defined(CONFIG_CPU_CORTEX_M) && defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
187 	/* Clear Floating Point Status and Control Register (FPSCR),
188 	 * to prevent from having the interrupt line set to pending again,
189 	 * in case FPU IRQ is selected by the test as "Available IRQ line"
190 	 */
191 #if defined(CONFIG_ARMV8_1_M_MAINLINE)
192 	/*
193 	 * For ARMv8.1-M with FPU, the FPSCR[18:16] LTPSIZE field must be set
194 	 * to 0b100 for "Tail predication not applied" as it's reset value
195 	 */
196 	__set_FPSCR(4 << FPU_FPDSCR_LTPSIZE_Pos);
197 #else
198 	__set_FPSCR(0);
199 #endif
200 #endif
201 
202 	test_flag++;
203 
204 	if (test_flag == 1) {
205 		/* Intentional Kernel oops */
206 		expected_reason = K_ERR_KERNEL_OOPS;
207 		k_oops();
208 	} else if (test_flag == 2) {
209 		/* Intentional Kernel panic */
210 		expected_reason = K_ERR_KERNEL_PANIC;
211 		k_panic();
212 	} else if (test_flag == 3) {
213 		/* Intentional ASSERT */
214 		expected_reason = K_ERR_KERNEL_PANIC;
215 		__ASSERT(0, "Intentional assert\n");
216 	} else if (test_flag == 4) {
217 #if defined(CONFIG_HW_STACK_PROTECTION)
218 		/*
219 		 * Verify that the Stack Overflow has been reported by the core
220 		 * and the expected reason variable is reset.
221 		 */
222 		int reason = expected_reason;
223 
224 		zassert_equal(reason, -1, "expected_reason has not been reset (%d)\n", reason);
225 #endif
226 	}
227 }
228 
229 /**
230  * @brief Test ARM Interrupt handling
231  * @ingroup kernel_arch_interrupt_tests
232  */
ZTEST(arm_interrupt,test_arm_interrupt)233 ZTEST(arm_interrupt, test_arm_interrupt)
234 {
235 	/* Determine an NVIC IRQ line that is not currently in use. */
236 	int i;
237 	int init_flag, post_flag, reason;
238 
239 	init_flag = test_flag;
240 
241 	zassert_false(init_flag, "Test flag not initialized to zero\n");
242 
243 	for (i = CONFIG_NUM_IRQS - 1; i >= 0; i--) {
244 		if (NVIC_GetEnableIRQ(i) == 0) {
245 			/*
246 			 * Interrupts configured statically with IRQ_CONNECT(.)
247 			 * are automatically enabled. NVIC_GetEnableIRQ()
248 			 * returning false, here, implies that the IRQ line is
249 			 * either not implemented or it is not enabled, thus,
250 			 * currently not in use by Zephyr.
251 			 */
252 
253 			/* Set the NVIC line to pending. */
254 			NVIC_SetPendingIRQ(i);
255 
256 			if (NVIC_GetPendingIRQ(i)) {
257 				/* If the NVIC line is pending, it is
258 				 * guaranteed that it is implemented; clear the
259 				 * line.
260 				 */
261 				NVIC_ClearPendingIRQ(i);
262 
263 				if (!NVIC_GetPendingIRQ(i)) {
264 					/*
265 					 * If the NVIC line can be successfully
266 					 * un-pended, it is guaranteed that it
267 					 * can be used for software interrupt
268 					 * triggering.
269 					 */
270 					break;
271 				}
272 			}
273 		}
274 	}
275 
276 	zassert_true(i >= 0, "No available IRQ line to use in the test\n");
277 
278 	TC_PRINT("Available IRQ line: %u\n", i);
279 
280 	/* Verify that triggering an interrupt in an IRQ line,
281 	 * on which an ISR has not yet been installed, leads
282 	 * to a fault of type K_ERR_SPURIOUS_IRQ.
283 	 */
284 	expected_reason = K_ERR_SPURIOUS_IRQ;
285 	NVIC_ClearPendingIRQ(i);
286 	NVIC_EnableIRQ(i);
287 	NVIC_SetPendingIRQ(i);
288 	barrier_dsync_fence_full();
289 	barrier_isync_fence_full();
290 
291 	/* Verify that the spurious ISR has led to the fault and the
292 	 * expected reason variable is reset.
293 	 */
294 	reason = expected_reason;
295 	zassert_equal(reason, -1, "expected_reason has not been reset (%d)\n", reason);
296 	NVIC_DisableIRQ(i);
297 
298 	arch_irq_connect_dynamic(i, 0 /* highest priority */, arm_isr_handler, NULL, 0);
299 
300 	NVIC_ClearPendingIRQ(i);
301 	NVIC_EnableIRQ(i);
302 
303 	for (int j = 1; j <= 3; j++) {
304 
305 		/* Set the dynamic IRQ to pending state. */
306 		NVIC_SetPendingIRQ(i);
307 
308 		/*
309 		 * Instruction barriers to make sure the NVIC IRQ is
310 		 * set to pending state before 'test_flag' is checked.
311 		 */
312 		barrier_dsync_fence_full();
313 		barrier_isync_fence_full();
314 
315 		/* Returning here implies the thread was not aborted. */
316 
317 		/* Confirm test flag is set by the ISR handler. */
318 		post_flag = test_flag;
319 		zassert_true(post_flag == j, "Test flag not set by ISR\n");
320 	}
321 
322 #if defined(CONFIG_HW_STACK_PROTECTION)
323 	/*
324 	 * Simulate a stacking error that is caused explicitly by the
325 	 * exception entry context stacking, to verify that the CPU can
326 	 * correctly report stacking errors that are not also Data
327 	 * access violation errors.
328 	 */
329 	expected_reason = K_ERR_STACK_CHK_FAIL;
330 
331 	__disable_irq();
332 
333 	/* Trigger an interrupt to cause the stacking error */
334 	NVIC_ClearPendingIRQ(i);
335 	NVIC_EnableIRQ(i);
336 	NVIC_SetPendingIRQ(i);
337 
338 	/* Manually set PSP almost at the bottom of the stack. An exception
339 	 * entry will make PSP descend below the limit and into the MPU guard
340 	 * section (or beyond the address pointed by PSPLIM in ARMv8-M MCUs).
341 	 */
342 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) && defined(CONFIG_MPU_STACK_GUARD)
343 #define FPU_STACK_EXTRA_SIZE 0x48
344 	/* If an FP context is present, we should not set the PSP
345 	 * too close to the end of the stack, because stacking of
346 	 * the ESF might corrupt kernel memory, making it not
347 	 * possible to continue the test execution.
348 	 */
349 	uint32_t fp_extra_size = (__get_CONTROL() & CONTROL_FPCA_Msk) ? FPU_STACK_EXTRA_SIZE : 0;
350 	__set_PSP(_current->stack_info.start + 0x10 + fp_extra_size);
351 #else
352 	__set_PSP(_current->stack_info.start + 0x10);
353 #endif
354 
355 	__enable_irq();
356 	barrier_dsync_fence_full();
357 	barrier_isync_fence_full();
358 
359 	/* No stack variable access below this point.
360 	 * The IRQ will handle the verification.
361 	 */
362 #endif /* CONFIG_HW_STACK_PROTECTION */
363 }
364 
365 #if defined(CONFIG_USERSPACE)
366 #include <zephyr/internal/syscall_handler.h>
367 #include "test_syscalls.h"
368 
z_impl_test_arm_user_interrupt_syscall(void)369 void z_impl_test_arm_user_interrupt_syscall(void)
370 {
371 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
372 	/* Confirm IRQs are not locked */
373 	zassert_false(__get_PRIMASK(), "PRIMASK is set\n");
374 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
375 
376 	static bool first_call = 1;
377 
378 	if (first_call == 1) {
379 
380 		/* First time the syscall is invoked */
381 		first_call = 0;
382 
383 		/* Lock IRQs in supervisor mode */
384 		unsigned int key = irq_lock();
385 
386 		/* Verify that IRQs were not already locked */
387 		zassert_false(key, "IRQs locked in system call\n");
388 	}
389 
390 	/* Confirm IRQs are still locked */
391 	zassert_true(__get_BASEPRI(), "BASEPRI not set\n");
392 #endif
393 }
394 
z_vrfy_test_arm_user_interrupt_syscall(void)395 static inline void z_vrfy_test_arm_user_interrupt_syscall(void)
396 {
397 	z_impl_test_arm_user_interrupt_syscall();
398 }
399 #include <zephyr/syscalls/test_arm_user_interrupt_syscall_mrsh.c>
400 
401 /**
402  * @brief Test ARM Interrupt handling in user mode
403  * @ingroup kernel_arch_interrupt_tests
404  */
ZTEST_USER(arm_interrupt,test_arm_user_interrupt)405 ZTEST_USER(arm_interrupt, test_arm_user_interrupt)
406 {
407 	/* Test thread executing in user mode */
408 	zassert_true(arch_is_user_context(), "Test thread not running in user mode\n");
409 
410 	/* Attempt to lock IRQs in user mode */
411 	irq_lock();
412 	/* Attempt to lock again should return non-zero value of previous
413 	 * locking attempt, if that were to be successful.
414 	 */
415 	int lock = irq_lock();
416 
417 	zassert_false(lock, "IRQs shown locked in user mode\n");
418 
419 	/* Generate a system call to manage the IRQ locking */
420 	test_arm_user_interrupt_syscall();
421 
422 	/* Attempt to unlock IRQs in user mode */
423 	irq_unlock(0);
424 
425 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
426 	/* The first system call has left the IRQs locked.
427 	 * Generate a second system call to inspect the IRQ locking.
428 	 *
429 	 * In Cortex-M Baseline system calls cannot be invoked
430 	 * with interrupts locked, so we skip this part of the
431 	 * test.
432 	 */
433 	test_arm_user_interrupt_syscall();
434 
435 	/* Verify that thread is not able to infer that IRQs are locked. */
436 	zassert_false(irq_lock(), "IRQs are shown to be locked\n");
437 #endif
438 }
439 #else
ZTEST_USER(arm_interrupt,test_arm_user_interrupt)440 ZTEST_USER(arm_interrupt, test_arm_user_interrupt)
441 {
442 	ztest_test_skip();
443 }
444 #endif /* CONFIG_USERSPACE */
445 
446 #pragma GCC push_options
447 #pragma GCC optimize("O0")
448 /* Avoid compiler optimizing null pointer de-referencing. */
449 
450 /**
451  * @brief Test ARM Null Pointer Exception handling
452  * @ingroup kernel_arch_interrupt_tests
453  */
ZTEST(arm_interrupt,test_arm_null_pointer_exception)454 ZTEST(arm_interrupt, test_arm_null_pointer_exception)
455 {
456 	Z_TEST_SKIP_IFNDEF(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION);
457 
458 	int reason;
459 
460 	struct test_struct {
461 		uint32_t val[2];
462 	};
463 
464 	struct test_struct *test_struct_null_pointer = 0x0;
465 
466 	expected_reason = K_ERR_CPU_EXCEPTION;
467 
468 	printk("Reading a null pointer value: 0x%0x\n", test_struct_null_pointer->val[1]);
469 
470 	reason = expected_reason;
471 	zassert_equal(reason, -1, "expected_reason has not been reset (%d)\n", reason);
472 }
473 #pragma GCC pop_options
474