1 /*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/tc_util.h>
10 #include <zephyr/test_toolchain.h>
11 #include <zephyr/kernel_structs.h>
12 #include <zephyr/irq_offload.h>
13 #include <kswap.h>
14 #include <assert.h>
15
16 #if defined(CONFIG_USERSPACE)
17 #include <zephyr/kernel/mm.h>
18 #include <zephyr/internal/syscall_handler.h>
19 #include "test_syscalls.h"
20 #endif
21
22 #if defined(CONFIG_DEMAND_PAGING)
23 #include <zephyr/kernel/mm/demand_paging.h>
24 #endif
25
26 #if defined(CONFIG_X86) && defined(CONFIG_X86_MMU)
27 #define STACKSIZE (8192)
28 #else
29 #define STACKSIZE (2048 + CONFIG_TEST_EXTRA_STACK_SIZE)
30 #endif
31 #define MAIN_PRIORITY 7
32 #define PRIORITY 5
33
34 static K_THREAD_STACK_DEFINE(alt_stack, STACKSIZE);
35
36 #if defined(CONFIG_STACK_SENTINEL) && !defined(CONFIG_ARCH_POSIX)
37 #define OVERFLOW_STACKSIZE (STACKSIZE / 2)
38 static k_thread_stack_t *overflow_stack =
39 alt_stack + (STACKSIZE - OVERFLOW_STACKSIZE);
40 #else
41 #if defined(CONFIG_USERSPACE) && defined(CONFIG_ARC)
42 /* for ARC, privilege stack is merged into defined stack */
43 #define OVERFLOW_STACKSIZE (STACKSIZE + CONFIG_PRIVILEGED_STACK_SIZE)
44 #else
45 #define OVERFLOW_STACKSIZE STACKSIZE
46 #endif
47 #endif
48
49 static struct k_thread alt_thread;
50 volatile int rv;
51
52 static ZTEST_DMEM volatile int expected_reason = -1;
53
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)54 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
55 {
56 TC_PRINT("Caught system error -- reason %d\n", reason);
57
58 if (expected_reason == -1) {
59 printk("Was not expecting a crash\n");
60 TC_END_REPORT(TC_FAIL);
61 k_fatal_halt(reason);
62 }
63
64 if (k_current_get() != &alt_thread) {
65 printk("Wrong thread crashed\n");
66 TC_END_REPORT(TC_FAIL);
67 k_fatal_halt(reason);
68 }
69
70 if (reason != expected_reason) {
71 printk("Wrong crash type got %d expected %d\n", reason,
72 expected_reason);
73 TC_END_REPORT(TC_FAIL);
74 k_fatal_halt(reason);
75 }
76
77 expected_reason = -1;
78 }
79
entry_cpu_exception(void * p1,void * p2,void * p3)80 void entry_cpu_exception(void *p1, void *p2, void *p3)
81 {
82 expected_reason = K_ERR_CPU_EXCEPTION;
83
84 #if defined(CONFIG_X86)
85 __asm__ volatile ("ud2");
86 #elif defined(CONFIG_ARC)
87 __asm__ volatile ("swi");
88 #elif defined(CONFIG_RISCV)
89 /* Illegal instruction on RISCV. */
90 __asm__ volatile (".word 0x77777777");
91 #else
92 /* Triggers usage fault on ARM, illegal instruction on
93 * xtensa, TLB exception (instruction fetch) on MIPS.
94 */
95 {
96 volatile long illegal = 0;
97 ((void(*)(void))&illegal)();
98 }
99 #endif
100 rv = TC_FAIL;
101 }
102
entry_cpu_exception_extend(void * p1,void * p2,void * p3)103 void entry_cpu_exception_extend(void *p1, void *p2, void *p3)
104 {
105 expected_reason = K_ERR_CPU_EXCEPTION;
106
107 #if defined(CONFIG_ARM64)
108 __asm__ volatile ("svc 0");
109 #elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
110 __asm__ volatile ("udf #0");
111 #elif defined(CONFIG_CPU_CORTEX_M)
112 __asm__ volatile ("udf #0");
113 #elif defined(CONFIG_RX)
114 __asm__ volatile ("brk");
115 #elif defined(CONFIG_RISCV)
116 /* In riscv architecture, use an undefined
117 * instruction to trigger illegal instruction on RISCV.
118 */
119 __asm__ volatile ("unimp");
120 /* In arc architecture, SWI instruction is used
121 * to trigger soft interrupt.
122 */
123 #elif defined(CONFIG_ARC)
124 __asm__ volatile ("swi");
125 #else
126 /* used to create a divide by zero error on X86 and MIPS */
127 volatile int error;
128 volatile int zero = 0;
129
130 error = 32; /* avoid static checker uninitialized warnings */
131 error = error / zero;
132 #endif
133 rv = TC_FAIL;
134 }
135
entry_oops(void * p1,void * p2,void * p3)136 void entry_oops(void *p1, void *p2, void *p3)
137 {
138 expected_reason = K_ERR_KERNEL_OOPS;
139
140 k_oops();
141 TC_ERROR("SHOULD NEVER SEE THIS\n");
142 rv = TC_FAIL;
143 }
144
entry_panic(void * p1,void * p2,void * p3)145 void entry_panic(void *p1, void *p2, void *p3)
146 {
147 expected_reason = K_ERR_KERNEL_PANIC;
148
149 k_panic();
150 TC_ERROR("SHOULD NEVER SEE THIS\n");
151 rv = TC_FAIL;
152 }
153
entry_zephyr_assert(void * p1,void * p2,void * p3)154 void entry_zephyr_assert(void *p1, void *p2, void *p3)
155 {
156 expected_reason = K_ERR_KERNEL_PANIC;
157
158 __ASSERT(0, "intentionally failed assertion");
159 rv = TC_FAIL;
160 }
161
entry_arbitrary_reason(void * p1,void * p2,void * p3)162 void entry_arbitrary_reason(void *p1, void *p2, void *p3)
163 {
164 expected_reason = INT_MAX;
165
166 z_except_reason(INT_MAX);
167 TC_ERROR("SHOULD NEVER SEE THIS\n");
168 rv = TC_FAIL;
169 }
170
entry_arbitrary_reason_negative(void * p1,void * p2,void * p3)171 void entry_arbitrary_reason_negative(void *p1, void *p2, void *p3)
172 {
173 expected_reason = -2;
174
175 z_except_reason(-2);
176 TC_ERROR("SHOULD NEVER SEE THIS\n");
177 rv = TC_FAIL;
178 }
179
180 #ifndef CONFIG_ARCH_POSIX
181 #ifdef CONFIG_STACK_SENTINEL
blow_up_stack(void)182 __no_optimization void blow_up_stack(void)
183 {
184 char buf[OVERFLOW_STACKSIZE];
185
186 expected_reason = K_ERR_STACK_CHK_FAIL;
187 TC_PRINT("posting %zu bytes of junk to stack...\n", sizeof(buf));
188 (void)memset(buf, 0xbb, sizeof(buf));
189 }
190 #else
191 /* stack sentinel doesn't catch it in time before it trashes the entire kernel
192 */
193
194 TOOLCHAIN_DISABLE_WARNING(TOOLCHAIN_WARNING_PRAGMAS)
TOOLCHAIN_DISABLE_WARNING(TOOLCHAIN_WARNING_INFINITE_RECURSION)195 TOOLCHAIN_DISABLE_WARNING(TOOLCHAIN_WARNING_INFINITE_RECURSION)
196
197 __no_optimization int stack_smasher(int val)
198 {
199 return stack_smasher(val * 2) + stack_smasher(val * 3);
200 }
201
202 TOOLCHAIN_ENABLE_WARNING(TOOLCHAIN_WARNING_PRAGMAS)
TOOLCHAIN_ENABLE_WARNING(TOOLCHAIN_WARNING_INFINITE_RECURSION)203 TOOLCHAIN_ENABLE_WARNING(TOOLCHAIN_WARNING_INFINITE_RECURSION)
204
205 void blow_up_stack(void)
206 {
207 expected_reason = K_ERR_STACK_CHK_FAIL;
208
209 stack_smasher(37);
210 }
211
212 #if defined(CONFIG_USERSPACE)
213
z_impl_blow_up_priv_stack(void)214 void z_impl_blow_up_priv_stack(void)
215 {
216 blow_up_stack();
217 }
218
z_vrfy_blow_up_priv_stack(void)219 static inline void z_vrfy_blow_up_priv_stack(void)
220 {
221 z_impl_blow_up_priv_stack();
222 }
223 #include <zephyr/syscalls/blow_up_priv_stack_mrsh.c>
224
225 #endif /* CONFIG_USERSPACE */
226 #endif /* CONFIG_STACK_SENTINEL */
227
stack_sentinel_timer(void * p1,void * p2,void * p3)228 void stack_sentinel_timer(void *p1, void *p2, void *p3)
229 {
230 /* We need to guarantee that we receive an interrupt, so set a
231 * k_timer and spin until we die. Spinning alone won't work
232 * on a tickless kernel.
233 */
234 static struct k_timer timer;
235
236 blow_up_stack();
237 k_timer_init(&timer, NULL, NULL);
238 k_timer_start(&timer, K_MSEC(1), K_NO_WAIT);
239 while (true) {
240 }
241 }
242
stack_sentinel_swap(void * p1,void * p2,void * p3)243 void stack_sentinel_swap(void *p1, void *p2, void *p3)
244 {
245 /* Test that stack overflow check due to swap works */
246 blow_up_stack();
247 TC_PRINT("swapping...\n");
248 z_swap_unlocked();
249 TC_ERROR("should never see this\n");
250 rv = TC_FAIL;
251 }
252
stack_hw_overflow(void * p1,void * p2,void * p3)253 void stack_hw_overflow(void *p1, void *p2, void *p3)
254 {
255 /* Test that HW stack overflow check works */
256 blow_up_stack();
257 TC_ERROR("should never see this\n");
258 rv = TC_FAIL;
259 }
260
261 #if defined(CONFIG_USERSPACE)
user_priv_stack_hw_overflow(void * p1,void * p2,void * p3)262 void user_priv_stack_hw_overflow(void *p1, void *p2, void *p3)
263 {
264 /* Test that HW stack overflow check works
265 * on a user thread's privilege stack.
266 */
267 blow_up_priv_stack();
268 TC_ERROR("should never see this\n");
269 rv = TC_FAIL;
270 }
271 #endif /* CONFIG_USERSPACE */
272
check_stack_overflow(k_thread_entry_t handler,uint32_t flags)273 void check_stack_overflow(k_thread_entry_t handler, uint32_t flags)
274 {
275 #ifdef CONFIG_STACK_SENTINEL
276 /* When testing stack sentinel feature, the overflow stack is a
277 * smaller section of alt_stack near the end.
278 * In this way when it gets overflowed by blow_up_stack() we don't
279 * corrupt anything else and prevent the test case from completing.
280 */
281 k_thread_create(&alt_thread, overflow_stack, OVERFLOW_STACKSIZE,
282 #else
283 k_thread_create(&alt_thread, alt_stack,
284 K_THREAD_STACK_SIZEOF(alt_stack),
285 #endif /* CONFIG_STACK_SENTINEL */
286 handler,
287 NULL, NULL, NULL, K_PRIO_PREEMPT(PRIORITY), flags,
288 K_NO_WAIT);
289
290 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
291 }
292 #endif /* !CONFIG_ARCH_POSIX */
293
294 /**
295 * @brief Test the kernel fatal error handling works correctly
296 * @details Manually trigger the crash with various ways and check
297 * that the kernel is handling that properly or not. Also the crash reason
298 * should match. Check for stack sentinel feature by overflowing the
299 * thread's stack and check for the exception.
300 *
301 * @ingroup kernel_fatal_tests
302 */
ZTEST(fatal_exception,test_fatal)303 ZTEST(fatal_exception, test_fatal)
304 {
305 rv = TC_PASS;
306
307 /*
308 * Main thread(test_main) priority was 10 but ztest thread runs at
309 * priority -1. To run the test smoothly make both main and ztest
310 * threads run at same priority level.
311 */
312 k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY));
313
314 #ifndef CONFIG_ARCH_POSIX
315 TC_PRINT("test alt thread 1: generic CPU exception\n");
316 k_thread_create(&alt_thread, alt_stack,
317 K_THREAD_STACK_SIZEOF(alt_stack),
318 entry_cpu_exception,
319 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
320 K_NO_WAIT);
321 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
322
323 TC_PRINT("test alt thread 1: generic CPU exception divide zero\n");
324 k_thread_create(&alt_thread, alt_stack,
325 K_THREAD_STACK_SIZEOF(alt_stack),
326 entry_cpu_exception_extend,
327 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
328 K_NO_WAIT);
329 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
330 #else
331 /*
332 * We want the native OS to handle segfaults so we can debug it
333 * with the normal linux tools
334 */
335 TC_PRINT("test alt thread 1: skipped for POSIX ARCH\n");
336 #endif
337
338 TC_PRINT("test alt thread 2: initiate kernel oops\n");
339 k_thread_create(&alt_thread, alt_stack,
340 K_THREAD_STACK_SIZEOF(alt_stack),
341 entry_oops,
342 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
343 K_NO_WAIT);
344 k_thread_abort(&alt_thread);
345 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
346
347 TC_PRINT("test alt thread 3: initiate kernel panic\n");
348 k_thread_create(&alt_thread, alt_stack,
349 K_THREAD_STACK_SIZEOF(alt_stack),
350 entry_panic,
351 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
352 K_NO_WAIT);
353 k_thread_abort(&alt_thread);
354 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
355
356 #if defined(CONFIG_ASSERT)
357 /* This test shall be skip while ASSERT is off */
358 TC_PRINT("test alt thread 4: fail assertion\n");
359 k_thread_create(&alt_thread, alt_stack,
360 K_THREAD_STACK_SIZEOF(alt_stack),
361 entry_zephyr_assert,
362 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
363 K_NO_WAIT);
364 k_thread_abort(&alt_thread);
365 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
366 #endif
367
368 TC_PRINT("test alt thread 5: initiate arbitrary SW exception\n");
369 k_thread_create(&alt_thread, alt_stack,
370 K_THREAD_STACK_SIZEOF(alt_stack),
371 entry_arbitrary_reason,
372 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
373 K_NO_WAIT);
374 k_thread_abort(&alt_thread);
375
376 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
377 TC_PRINT("test alt thread 6: initiate arbitrary SW exception negative\n");
378 k_thread_create(&alt_thread, alt_stack,
379 K_THREAD_STACK_SIZEOF(alt_stack),
380 entry_arbitrary_reason_negative,
381 NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
382 K_NO_WAIT);
383 k_thread_abort(&alt_thread);
384 zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
385
386 #ifndef CONFIG_ARCH_POSIX
387
388 #ifdef CONFIG_STACK_SENTINEL
389 TC_PRINT("test stack sentinel overflow - timer irq\n");
390 check_stack_overflow(stack_sentinel_timer, 0);
391
392 TC_PRINT("test stack sentinel overflow - swap\n");
393 check_stack_overflow(stack_sentinel_swap, 0);
394 #endif /* CONFIG_STACK_SENTINEL */
395
396 #ifdef CONFIG_HW_STACK_PROTECTION
397 /* HW based stack overflow detection.
398 * Do this twice to show that HW-based solutions work more than
399 * once.
400 */
401
402 TC_PRINT("test stack HW-based overflow - supervisor 1\n");
403 check_stack_overflow(stack_hw_overflow, 0);
404
405 TC_PRINT("test stack HW-based overflow - supervisor 2\n");
406 check_stack_overflow(stack_hw_overflow, 0);
407
408 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
409 TC_PRINT("test stack HW-based overflow (FPU thread) - supervisor 1\n");
410 check_stack_overflow(stack_hw_overflow, K_FP_REGS);
411
412 TC_PRINT("test stack HW-based overflow (FPU thread) - supervisor 2\n");
413 check_stack_overflow(stack_hw_overflow, K_FP_REGS);
414 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
415
416 #ifdef CONFIG_USERSPACE
417
418 TC_PRINT("test stack HW-based overflow - user 1\n");
419 check_stack_overflow(stack_hw_overflow, K_USER);
420
421 TC_PRINT("test stack HW-based overflow - user 2\n");
422 check_stack_overflow(stack_hw_overflow, K_USER);
423
424 TC_PRINT("test stack HW-based overflow - user priv stack 1\n");
425 check_stack_overflow(user_priv_stack_hw_overflow, K_USER);
426
427 TC_PRINT("test stack HW-based overflow - user priv stack 2\n");
428 check_stack_overflow(user_priv_stack_hw_overflow, K_USER);
429
430 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
431 TC_PRINT("test stack HW-based overflow (FPU thread) - user 1\n");
432 check_stack_overflow(stack_hw_overflow, K_USER | K_FP_REGS);
433
434 TC_PRINT("test stack HW-based overflow (FPU thread) - user 2\n");
435 check_stack_overflow(stack_hw_overflow, K_USER | K_FP_REGS);
436 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
437
438 #endif /* CONFIG_USERSPACE */
439
440 #endif /* CONFIG_HW_STACK_PROTECTION */
441
442 #endif /* !CONFIG_ARCH_POSIX */
443 }
444
fatal_setup(void)445 static void *fatal_setup(void)
446 {
447 #if defined(CONFIG_DEMAND_PAGING) && \
448 !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
449 uintptr_t pin_addr;
450 size_t pin_size, obj_size;
451
452 /* Need to pin the whole stack object (including reserved
453 * space), or else it would cause double faults: exception
454 * being processed while page faults on the stacks.
455 *
456 * Same applies for some variables needed during exception
457 * processing.
458 */
459 #if defined(CONFIG_STACK_SENTINEL) && !defined(CONFIG_ARCH_POSIX)
460
461 obj_size = K_THREAD_STACK_SIZEOF(overflow_stack);
462 #if defined(CONFIG_USERSPACE)
463 obj_size = K_THREAD_STACK_LEN(obj_size);
464 #endif
465
466 k_mem_region_align(&pin_addr, &pin_size,
467 POINTER_TO_UINT(&overflow_stack),
468 obj_size, CONFIG_MMU_PAGE_SIZE);
469
470 k_mem_pin(UINT_TO_POINTER(pin_addr), pin_size);
471 #endif /* CONFIG_STACK_SENTINEL && !CONFIG_ARCH_POSIX */
472
473 obj_size = K_THREAD_STACK_SIZEOF(alt_stack);
474 #if defined(CONFIG_USERSPACE)
475 obj_size = K_THREAD_STACK_LEN(obj_size);
476 #endif
477
478 k_mem_region_align(&pin_addr, &pin_size,
479 POINTER_TO_UINT(&alt_stack),
480 obj_size,
481 CONFIG_MMU_PAGE_SIZE);
482
483 k_mem_pin(UINT_TO_POINTER(pin_addr), pin_size);
484
485 k_mem_region_align(&pin_addr, &pin_size,
486 POINTER_TO_UINT((void *)&expected_reason),
487 sizeof(expected_reason),
488 CONFIG_MMU_PAGE_SIZE);
489
490 k_mem_pin(UINT_TO_POINTER(pin_addr), pin_size);
491 #endif /* CONFIG_DEMAND_PAGING
492 * && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
493 */
494
495 return NULL;
496 }
497
498 ZTEST_SUITE(fatal_exception, NULL, fatal_setup, NULL, NULL, NULL);
499