1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdlib.h>
8 #include <zephyr/ztest.h>
9 
10 #include <zephyr/app_memory/app_memdomain.h>
11 #ifdef CONFIG_USERSPACE
12 #include <zephyr/sys/libc-hooks.h>
13 #endif
14 #include <zephyr/logging/log_ctrl.h>
15 #include <zephyr/sys/reboot.h>
16 
17 #include <zephyr/llext/symbol.h>
18 
19 #include <zephyr/sys/barrier.h>
20 
21 #ifdef KERNEL
22 static struct k_thread ztest_thread;
23 #endif
24 static bool failed_expectation;
25 
26 #ifdef CONFIG_ZTEST_SHELL
27 #include <zephyr/shell/shell.h>
28 #endif
29 
30 #ifdef CONFIG_ZTEST_SHUFFLE
31 #include <time.h>
32 #include <zephyr/random/random.h>
33 #ifndef CONFIG_ZTEST_REPEAT
34 #define NUM_ITER_PER_SUITE CONFIG_ZTEST_SHUFFLE_SUITE_REPEAT_COUNT
35 #define NUM_ITER_PER_TEST  CONFIG_ZTEST_SHUFFLE_TEST_REPEAT_COUNT
36 #endif
37 #endif /* CONFIG_ZTEST_SHUFFLE */
38 
39 #ifdef CONFIG_ZTEST_REPEAT
40 #define NUM_ITER_PER_SUITE CONFIG_ZTEST_SUITE_REPEAT_COUNT
41 #define NUM_ITER_PER_TEST  CONFIG_ZTEST_TEST_REPEAT_COUNT
42 #else
43 #ifndef CONFIG_ZTEST_SHUFFLE
44 #define NUM_ITER_PER_SUITE 1
45 #define NUM_ITER_PER_TEST  1
46 #endif
47 #endif
48 
49 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
50 #include <coverage.h>
51 #endif
52 
53 /* ZTEST_DMEM and ZTEST_BMEM are used for the application shared memory test  */
54 
55 /**
56  * @brief The current status of the test binary
57  */
58 enum ztest_status {
59 	ZTEST_STATUS_OK,
60 	ZTEST_STATUS_HAS_FAILURE,
61 	ZTEST_STATUS_CRITICAL_ERROR
62 };
63 
64 /**
65  * @brief Tracks the current phase that ztest is operating in.
66  */
67 ZTEST_DMEM enum ztest_phase cur_phase = TEST_PHASE_FRAMEWORK;
68 
69 static ZTEST_BMEM enum ztest_status test_status = ZTEST_STATUS_OK;
70 
71 extern ZTEST_DMEM const struct ztest_arch_api ztest_api;
72 
73 static void __ztest_show_suite_summary(void);
74 
end_report(void)75 static void end_report(void)
76 {
77 	__ztest_show_suite_summary();
78 	if (test_status) {
79 		TC_END_REPORT(TC_FAIL);
80 	} else {
81 		TC_END_REPORT(TC_PASS);
82 	}
83 }
84 
cleanup_test(struct ztest_unit_test * test)85 static int cleanup_test(struct ztest_unit_test *test)
86 {
87 	int ret = TC_PASS;
88 	int mock_status;
89 
90 	mock_status = z_cleanup_mock();
91 
92 #ifdef KERNEL
93 	/* we need to remove the ztest_thread information from the timeout_q.
94 	 * Because we reuse the same k_thread structure this would
95 	 * causes some problems.
96 	 */
97 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
98 		k_thread_abort(&ztest_thread);
99 	}
100 #endif
101 
102 	if (!ret && mock_status == 1) {
103 		PRINT_DATA("Test %s failed: Unused mock parameter values\n", test->name);
104 		ret = TC_FAIL;
105 	} else if (!ret && mock_status == 2) {
106 		PRINT_DATA("Test %s failed: Unused mock return values\n", test->name);
107 		ret = TC_FAIL;
108 	} else {
109 		;
110 	}
111 
112 	return ret;
113 }
114 
115 #ifdef KERNEL
116 
117 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
118 #define MAX_NUM_CPUHOLD  (CONFIG_MP_MAX_NUM_CPUS - 1)
119 #define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
120 
121 struct cpuhold_pool_item {
122 	struct k_thread thread;
123 	bool used;
124 };
125 
126 static struct cpuhold_pool_item cpuhold_pool_items[MAX_NUM_CPUHOLD + 1];
127 
128 K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD + 1, CPUHOLD_STACK_SZ);
129 
130 static struct k_sem cpuhold_sem;
131 
132 volatile int cpuhold_active;
133 volatile bool cpuhold_spawned;
134 
find_unused_thread(void)135 static int find_unused_thread(void)
136 {
137 	for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
138 		if (!cpuhold_pool_items[i].used) {
139 			return i;
140 		}
141 	}
142 
143 	return -1;
144 }
145 
mark_thread_unused(struct k_thread * thread)146 static void mark_thread_unused(struct k_thread *thread)
147 {
148 	for (unsigned int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
149 		if (&cpuhold_pool_items[i].thread == thread) {
150 			cpuhold_pool_items[i].used = false;
151 		}
152 	}
153 }
154 
wait_for_thread_to_switch_out(struct k_thread * thread)155 static inline void wait_for_thread_to_switch_out(struct k_thread *thread)
156 {
157 	unsigned int key = arch_irq_lock();
158 	volatile void **shp = (void *)&thread->switch_handle;
159 
160 	while (*shp == NULL) {
161 		arch_spin_relax();
162 	}
163 	/* Read barrier: don't allow any subsequent loads in the
164 	 * calling code to reorder before we saw switch_handle go
165 	 * non-null.
166 	 */
167 	barrier_dmem_fence_full();
168 
169 	arch_irq_unlock(key);
170 }
171 
172 /* "Holds" a CPU for use with the "1cpu" test cases.  Note that we
173  * can't use tools like the cpumask feature because we have tests that
174  * may need to control that configuration themselves.  We do this at
175  * the lowest level, but locking interrupts directly and spinning.
176  */
cpu_hold(void * arg1,void * arg2,void * arg3)177 static void cpu_hold(void *arg1, void *arg2, void *arg3)
178 {
179 	struct k_thread *thread = arg1;
180 	unsigned int idx = (unsigned int)(uintptr_t)arg2;
181 	char tname[CONFIG_THREAD_MAX_NAME_LEN];
182 
183 	ARG_UNUSED(arg3);
184 
185 	if (arch_proc_id() == 0) {
186 		int i;
187 
188 		i = find_unused_thread();
189 
190 		__ASSERT_NO_MSG(i != -1);
191 
192 		cpuhold_spawned = false;
193 
194 		cpuhold_pool_items[i].used = true;
195 		k_thread_create(&cpuhold_pool_items[i].thread, cpuhold_stacks[i], CPUHOLD_STACK_SZ,
196 				cpu_hold, k_current_get(), (void *)(uintptr_t)idx, NULL,
197 				K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
198 
199 		/*
200 		 * Busy-wait until we know the spawned thread is running to
201 		 * ensure it does not spawn on CPU0.
202 		 */
203 
204 		while (!cpuhold_spawned) {
205 			k_busy_wait(1000);
206 		}
207 
208 		return;
209 	}
210 
211 	if (thread != NULL) {
212 		cpuhold_spawned = true;
213 
214 		/* Busywait until a new thread is scheduled in on CPU0 */
215 
216 		wait_for_thread_to_switch_out(thread);
217 
218 		mark_thread_unused(thread);
219 	}
220 
221 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
222 		snprintk(tname, CONFIG_THREAD_MAX_NAME_LEN, "cpuhold%02d", idx);
223 		k_thread_name_set(k_current_get(), tname);
224 	}
225 
226 	uint32_t dt, start_ms = k_uptime_get_32();
227 	unsigned int key = arch_irq_lock();
228 
229 	k_sem_give(&cpuhold_sem);
230 
231 #if (defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) && defined(CONFIG_FPU_SHARING)
232 	/*
233 	 * We'll be spinning with IRQs disabled. The flush-your-FPU request
234 	 * IPI will never be serviced during that time. Therefore we flush
235 	 * the FPU preemptively here to prevent any other CPU waiting after
236 	 * this CPU forever and deadlock the system.
237 	 */
238 	k_float_disable(_current_cpu->arch.fpu_owner);
239 #endif
240 
241 	while (cpuhold_active) {
242 		k_busy_wait(1000);
243 	}
244 
245 	/* Holding the CPU via spinning is expensive, and abusing this
246 	 * for long-running test cases tends to overload the CI system
247 	 * (qemu runs separate CPUs in different threads, but the CI
248 	 * logic views it as one "job") and cause other test failures.
249 	 */
250 	dt = k_uptime_get_32() - start_ms;
251 	zassert_true(dt < CONFIG_ZTEST_CPU_HOLD_TIME_MS, "1cpu test took too long (%d ms)", dt);
252 	arch_irq_unlock(key);
253 }
254 #endif /* CONFIG_SMP && (CONFIG_MP_MAX_NUM_CPUS > 1) */
255 
z_impl_z_test_1cpu_start(void)256 void z_impl_z_test_1cpu_start(void)
257 {
258 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
259 	unsigned int num_cpus = arch_num_cpus();
260 	int j;
261 
262 	cpuhold_active = 1;
263 
264 	k_sem_init(&cpuhold_sem, 0, 999);
265 
266 	/* Spawn N-1 threads to "hold" the other CPUs, waiting for
267 	 * each to signal us that it's locked and spinning.
268 	 */
269 	for (int i = 0; i < num_cpus - 1; i++) {
270 		j = find_unused_thread();
271 
272 		__ASSERT_NO_MSG(j != -1);
273 
274 		cpuhold_pool_items[j].used = true;
275 		k_thread_create(&cpuhold_pool_items[j].thread, cpuhold_stacks[j], CPUHOLD_STACK_SZ,
276 				cpu_hold, NULL, (void *)(uintptr_t)i, NULL, K_HIGHEST_THREAD_PRIO,
277 				0, K_NO_WAIT);
278 		k_sem_take(&cpuhold_sem, K_FOREVER);
279 	}
280 #endif
281 }
282 
z_impl_z_test_1cpu_stop(void)283 void z_impl_z_test_1cpu_stop(void)
284 {
285 #if defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)
286 	cpuhold_active = 0;
287 
288 	for (int i = 0; i <= MAX_NUM_CPUHOLD; i++) {
289 		if (cpuhold_pool_items[i].used) {
290 			k_thread_abort(&cpuhold_pool_items[i].thread);
291 			cpuhold_pool_items[i].used = false;
292 		}
293 	}
294 #endif
295 }
296 
297 #ifdef CONFIG_USERSPACE
z_vrfy_z_test_1cpu_start(void)298 void z_vrfy_z_test_1cpu_start(void)
299 {
300 	z_impl_z_test_1cpu_start();
301 }
302 #include <zephyr/syscalls/z_test_1cpu_start_mrsh.c>
303 
z_vrfy_z_test_1cpu_stop(void)304 void z_vrfy_z_test_1cpu_stop(void)
305 {
306 	z_impl_z_test_1cpu_stop();
307 }
308 #include <zephyr/syscalls/z_test_1cpu_stop_mrsh.c>
309 #endif /* CONFIG_USERSPACE */
310 #endif
311 
run_test_rules(bool is_before,struct ztest_unit_test * test,void * data)312 __maybe_unused static void run_test_rules(bool is_before, struct ztest_unit_test *test, void *data)
313 {
314 	for (struct ztest_test_rule *rule = _ztest_test_rule_list_start;
315 	     rule < _ztest_test_rule_list_end; ++rule) {
316 		if (is_before && rule->before_each) {
317 			rule->before_each(test, data);
318 		} else if (!is_before && rule->after_each) {
319 			rule->after_each(test, data);
320 		}
321 	}
322 }
323 
run_test_functions(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)324 static void run_test_functions(struct ztest_suite_node *suite, struct ztest_unit_test *test,
325 			       void *data)
326 {
327 	__ztest_set_test_phase(TEST_PHASE_TEST);
328 	test->test(data);
329 }
330 
331 COND_CODE_1(KERNEL, (ZTEST_BMEM), ()) static enum ztest_result test_result;
332 
get_final_test_result(const struct ztest_unit_test * test,int ret)333 static int get_final_test_result(const struct ztest_unit_test *test, int ret)
334 {
335 	enum ztest_expected_result expected_result = -1;
336 
337 	for (struct ztest_expected_result_entry *expectation =
338 		     _ztest_expected_result_entry_list_start;
339 	     expectation < _ztest_expected_result_entry_list_end; ++expectation) {
340 		if (strcmp(expectation->test_name, test->name) == 0 &&
341 		    strcmp(expectation->test_suite_name, test->test_suite_name) == 0) {
342 			expected_result = expectation->expected_result;
343 			break;
344 		}
345 	}
346 
347 	if (expected_result == ZTEST_EXPECTED_RESULT_FAIL) {
348 		/* Expected a failure:
349 		 * - If we got a failure, return TC_PASS
350 		 * - Otherwise force a failure
351 		 */
352 		return (ret == TC_FAIL) ? TC_PASS : TC_FAIL;
353 	}
354 	if (expected_result == ZTEST_EXPECTED_RESULT_SKIP) {
355 		/* Expected a skip:
356 		 * - If we got a skip, return TC_PASS
357 		 * - Otherwise force a failure
358 		 */
359 		return (ret == TC_SKIP) ? TC_PASS : TC_FAIL;
360 	}
361 	/* No expectation was made, no change is needed. */
362 	return ret;
363 }
364 
365 /**
366  * @brief Get a friendly name string for a given test phrase.
367  *
368  * @param phase an enum ztest_phase value describing the desired test phase
369  * @returns a string name for `phase`
370  */
get_friendly_phase_name(enum ztest_phase phase)371 static inline const char *get_friendly_phase_name(enum ztest_phase phase)
372 {
373 	switch (phase) {
374 	case TEST_PHASE_SETUP:
375 		return "setup";
376 	case TEST_PHASE_BEFORE:
377 		return "before";
378 	case TEST_PHASE_TEST:
379 		return "test";
380 	case TEST_PHASE_AFTER:
381 		return "after";
382 	case TEST_PHASE_TEARDOWN:
383 		return "teardown";
384 	case TEST_PHASE_FRAMEWORK:
385 		return "framework";
386 	default:
387 		return "(unknown)";
388 	}
389 }
390 
391 static bool current_test_failed_assumption;
ztest_skip_failed_assumption(void)392 void ztest_skip_failed_assumption(void)
393 {
394 	if (IS_ENABLED(CONFIG_ZTEST_FAIL_ON_ASSUME)) {
395 		current_test_failed_assumption = true;
396 		ztest_test_fail();
397 	} else {
398 		ztest_test_skip();
399 	}
400 }
401 
402 #ifndef KERNEL
403 
404 /* Static code analysis tool can raise a violation that the standard header
405  * <setjmp.h> shall not be used.
406  *
407  * setjmp is using in a test code, not in a runtime code, it is acceptable.
408  * It is a deliberate deviation.
409  */
410 #include <setjmp.h> /* parasoft-suppress MISRAC2012-RULE_21_4-a MISRAC2012-RULE_21_4-b*/
411 #include <signal.h>
412 #include <stdlib.h>
413 #include <string.h>
414 
415 #define FAIL_FAST 0
416 
417 static jmp_buf test_fail;
418 static jmp_buf test_pass;
419 static jmp_buf test_skip;
420 static jmp_buf stack_fail;
421 static jmp_buf test_suite_fail;
422 
ztest_test_fail(void)423 void ztest_test_fail(void)
424 {
425 	switch (cur_phase) {
426 	case TEST_PHASE_SETUP:
427 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
428 		longjmp(test_suite_fail, 1);
429 	case TEST_PHASE_BEFORE:
430 	case TEST_PHASE_TEST:
431 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
432 		longjmp(test_fail, 1);
433 	case TEST_PHASE_AFTER:
434 	case TEST_PHASE_TEARDOWN:
435 	case TEST_PHASE_FRAMEWORK:
436 		PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
437 			   get_friendly_phase_name(cur_phase));
438 		longjmp(stack_fail, 1);
439 	}
440 }
441 EXPORT_SYMBOL(ztest_test_fail);
442 
ztest_test_pass(void)443 void ztest_test_pass(void)
444 {
445 	if (cur_phase == TEST_PHASE_TEST) {
446 		longjmp(test_pass, 1);
447 	}
448 	PRINT_DATA(" ERROR: cannot pass in test phase '%s()', bailing\n",
449 		   get_friendly_phase_name(cur_phase));
450 	longjmp(stack_fail, 1);
451 }
452 EXPORT_SYMBOL(ztest_test_pass);
453 
ztest_test_skip(void)454 void ztest_test_skip(void)
455 {
456 	switch (cur_phase) {
457 	case TEST_PHASE_SETUP:
458 	case TEST_PHASE_BEFORE:
459 	case TEST_PHASE_TEST:
460 		longjmp(test_skip, 1);
461 	default:
462 		PRINT_DATA(" ERROR: cannot skip in test phase '%s()', bailing\n",
463 			   get_friendly_phase_name(cur_phase));
464 		longjmp(stack_fail, 1);
465 	}
466 }
467 EXPORT_SYMBOL(ztest_test_skip);
468 
ztest_test_expect_fail(void)469 void ztest_test_expect_fail(void)
470 {
471 	failed_expectation = true;
472 
473 	switch (cur_phase) {
474 	case TEST_PHASE_SETUP:
475 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
476 		break;
477 	case TEST_PHASE_BEFORE:
478 	case TEST_PHASE_TEST:
479 		PRINT_DATA(" at %s function\n", get_friendly_phase_name(cur_phase));
480 		break;
481 	case TEST_PHASE_AFTER:
482 	case TEST_PHASE_TEARDOWN:
483 	case TEST_PHASE_FRAMEWORK:
484 		PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
485 			   get_friendly_phase_name(cur_phase));
486 		longjmp(stack_fail, 1);
487 	}
488 }
489 
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)490 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
491 {
492 	int ret = TC_PASS;
493 
494 	TC_START(test->name);
495 	__ztest_set_test_phase(TEST_PHASE_BEFORE);
496 
497 	if (test_result == ZTEST_RESULT_SUITE_FAIL) {
498 		ret = TC_FAIL;
499 		goto out;
500 	}
501 
502 	if (setjmp(test_fail)) {
503 		ret = TC_FAIL;
504 		goto out;
505 	}
506 
507 	if (setjmp(test_pass)) {
508 		ret = TC_PASS;
509 		goto out;
510 	}
511 
512 	if (setjmp(test_skip)) {
513 		ret = TC_SKIP;
514 		goto out;
515 	}
516 
517 	run_test_rules(/*is_before=*/true, test, data);
518 	if (suite->before) {
519 		suite->before(data);
520 	}
521 	run_test_functions(suite, test, data);
522 out:
523 	if (failed_expectation) {
524 		failed_expectation = false;
525 		ret = TC_FAIL;
526 	}
527 
528 	__ztest_set_test_phase(TEST_PHASE_AFTER);
529 	if (test_result != ZTEST_RESULT_SUITE_FAIL) {
530 		if (suite->after != NULL) {
531 			suite->after(data);
532 		}
533 		run_test_rules(/*is_before=*/false, test, data);
534 	}
535 	__ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
536 	ret |= cleanup_test(test);
537 
538 	ret = get_final_test_result(test, ret);
539 	Z_TC_END_RESULT(ret, test->name);
540 	if (ret == TC_SKIP && current_test_failed_assumption) {
541 		test_status = 1;
542 	}
543 
544 	return ret;
545 }
546 
547 #else /* KERNEL */
548 
549 /* Zephyr's probably going to cause all tests to fail if one test fails, so
550  * skip the rest of tests if one of them fails
551  */
552 #ifdef CONFIG_ZTEST_FAIL_FAST
553 #define FAIL_FAST 1
554 #else
555 #define FAIL_FAST 0
556 #endif
557 
558 K_THREAD_STACK_DEFINE(ztest_thread_stack, CONFIG_ZTEST_STACK_SIZE + CONFIG_TEST_EXTRA_STACK_SIZE);
559 
test_finalize(void)560 static void test_finalize(void)
561 {
562 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
563 		k_thread_abort(&ztest_thread);
564 		if (k_is_in_isr()) {
565 			return;
566 		}
567 
568 		k_thread_abort(k_current_get());
569 		CODE_UNREACHABLE;
570 	}
571 }
572 
ztest_test_fail(void)573 void ztest_test_fail(void)
574 {
575 	switch (cur_phase) {
576 	case TEST_PHASE_SETUP:
577 		__ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
578 		break;
579 	case TEST_PHASE_BEFORE:
580 	case TEST_PHASE_TEST:
581 		__ztest_set_test_result(ZTEST_RESULT_FAIL);
582 		test_finalize();
583 		break;
584 	default:
585 		PRINT_DATA(" ERROR: cannot fail in test phase '%s()', bailing\n",
586 			   get_friendly_phase_name(cur_phase));
587 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
588 		break;
589 	}
590 }
591 EXPORT_SYMBOL(ztest_test_fail);
592 
ztest_test_pass(void)593 void ztest_test_pass(void)
594 {
595 	switch (cur_phase) {
596 	case TEST_PHASE_TEST:
597 		__ztest_set_test_result(ZTEST_RESULT_PASS);
598 		test_finalize();
599 		break;
600 	default:
601 		PRINT_DATA(" ERROR: cannot pass in test phase '%s()', bailing\n",
602 			   get_friendly_phase_name(cur_phase));
603 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
604 		if (cur_phase == TEST_PHASE_BEFORE) {
605 			test_finalize();
606 		}
607 		break;
608 	}
609 }
610 EXPORT_SYMBOL(ztest_test_pass);
611 
ztest_test_skip(void)612 void ztest_test_skip(void)
613 {
614 	switch (cur_phase) {
615 	case TEST_PHASE_SETUP:
616 		__ztest_set_test_result(ZTEST_RESULT_SUITE_SKIP);
617 		break;
618 	case TEST_PHASE_BEFORE:
619 	case TEST_PHASE_TEST:
620 		__ztest_set_test_result(ZTEST_RESULT_SKIP);
621 		test_finalize();
622 		break;
623 	default:
624 		PRINT_DATA(" ERROR: cannot skip in test phase '%s()', bailing\n",
625 			   get_friendly_phase_name(cur_phase));
626 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
627 		break;
628 	}
629 }
630 EXPORT_SYMBOL(ztest_test_skip);
631 
ztest_test_expect_fail(void)632 void ztest_test_expect_fail(void)
633 {
634 	failed_expectation = true;
635 }
636 
ztest_simple_1cpu_before(void * data)637 void ztest_simple_1cpu_before(void *data)
638 {
639 	ARG_UNUSED(data);
640 	z_test_1cpu_start();
641 }
642 
ztest_simple_1cpu_after(void * data)643 void ztest_simple_1cpu_after(void *data)
644 {
645 	ARG_UNUSED(data);
646 	z_test_1cpu_stop();
647 }
648 
test_cb(void * a,void * b,void * c)649 static void test_cb(void *a, void *b, void *c)
650 {
651 	struct ztest_suite_node *suite = a;
652 	struct ztest_unit_test *test = b;
653 	const bool config_user_mode = FIELD_GET(K_USER, test->thread_options) != 0;
654 
655 	if (!IS_ENABLED(CONFIG_USERSPACE) || !k_is_user_context()) {
656 		__ztest_set_test_result(ZTEST_RESULT_PENDING);
657 		run_test_rules(/*is_before=*/true, test, /*data=*/c);
658 		if (suite->before) {
659 			suite->before(/*data=*/c);
660 		}
661 		if (IS_ENABLED(CONFIG_USERSPACE) && config_user_mode) {
662 			k_thread_user_mode_enter(test_cb, a, b, c);
663 		}
664 	}
665 	run_test_functions(suite, test, c);
666 	__ztest_set_test_result(ZTEST_RESULT_PASS);
667 }
668 
run_test(struct ztest_suite_node * suite,struct ztest_unit_test * test,void * data)669 static int run_test(struct ztest_suite_node *suite, struct ztest_unit_test *test, void *data)
670 {
671 	int ret = TC_PASS;
672 
673 #if CONFIG_ZTEST_TEST_DELAY_MS > 0
674 	k_busy_wait(CONFIG_ZTEST_TEST_DELAY_MS * USEC_PER_MSEC);
675 #endif
676 	TC_START(test->name);
677 
678 	__ztest_set_test_phase(TEST_PHASE_BEFORE);
679 
680 	/* If the suite's setup function marked us as skipped, don't bother
681 	 * running the tests.
682 	 */
683 	if (IS_ENABLED(CONFIG_MULTITHREADING)) {
684 		get_start_time_cyc();
685 		k_thread_create(&ztest_thread, ztest_thread_stack,
686 				K_THREAD_STACK_SIZEOF(ztest_thread_stack), test_cb, suite, test,
687 				data, CONFIG_ZTEST_THREAD_PRIORITY, K_INHERIT_PERMS, K_FOREVER);
688 
689 		k_thread_access_grant(&ztest_thread, suite, test, suite->stats);
690 		if (test->name != NULL) {
691 			k_thread_name_set(&ztest_thread, test->name);
692 		}
693 		/* Only start the thread if we're not skipping the suite */
694 		if (test_result != ZTEST_RESULT_SUITE_SKIP &&
695 		    test_result != ZTEST_RESULT_SUITE_FAIL) {
696 			k_thread_start(&ztest_thread);
697 			k_thread_join(&ztest_thread, K_FOREVER);
698 		}
699 	} else if (test_result != ZTEST_RESULT_SUITE_SKIP &&
700 		   test_result != ZTEST_RESULT_SUITE_FAIL) {
701 		__ztest_set_test_result(ZTEST_RESULT_PENDING);
702 		get_start_time_cyc();
703 		run_test_rules(/*is_before=*/true, test, data);
704 		if (suite->before) {
705 			suite->before(data);
706 		}
707 		run_test_functions(suite, test, data);
708 	}
709 
710 	__ztest_set_test_phase(TEST_PHASE_AFTER);
711 	if (suite->after != NULL) {
712 		suite->after(data);
713 	}
714 	run_test_rules(/*is_before=*/false, test, data);
715 
716 	get_test_duration_ms();
717 	if (tc_spend_time > test->stats->duration_worst_ms) {
718 		test->stats->duration_worst_ms = tc_spend_time;
719 	}
720 
721 	__ztest_set_test_phase(TEST_PHASE_FRAMEWORK);
722 
723 	/* Flush all logs in case deferred mode and default logging thread are used. */
724 	while (IS_ENABLED(CONFIG_TEST_LOGGING_FLUSH_AFTER_TEST) &&
725 	       IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && log_data_pending()) {
726 		k_msleep(100);
727 	}
728 
729 	if (test_result == ZTEST_RESULT_FAIL || test_result == ZTEST_RESULT_SUITE_FAIL ||
730 	    failed_expectation) {
731 		ret = TC_FAIL;
732 		failed_expectation = false;
733 	} else if (test_result == ZTEST_RESULT_SKIP || test_result == ZTEST_RESULT_SUITE_SKIP) {
734 		ret = TC_SKIP;
735 	}
736 
737 	if (test_result == ZTEST_RESULT_PASS || !FAIL_FAST) {
738 		ret |= cleanup_test(test);
739 	}
740 
741 	ret = get_final_test_result(test, ret);
742 	Z_TC_END_RESULT(ret, test->name);
743 	if (ret == TC_SKIP && current_test_failed_assumption) {
744 		test_status = 1;
745 	}
746 
747 	return ret;
748 }
749 
750 #endif /* !KERNEL */
751 
ztest_find_test_suite(const char * name)752 static struct ztest_suite_node *ztest_find_test_suite(const char *name)
753 {
754 	struct ztest_suite_node *node;
755 
756 	for (node = _ztest_suite_node_list_start; node < _ztest_suite_node_list_end; ++node) {
757 		if (strcmp(name, node->name) == 0) {
758 			return node;
759 		}
760 	}
761 
762 	return NULL;
763 }
764 
z_ztest_get_next_test(const char * suite,struct ztest_unit_test * prev)765 struct ztest_unit_test *z_ztest_get_next_test(const char *suite, struct ztest_unit_test *prev)
766 {
767 	struct ztest_unit_test *test = (prev == NULL) ? _ztest_unit_test_list_start : prev + 1;
768 
769 	for (; test < _ztest_unit_test_list_end; ++test) {
770 		if (strcmp(suite, test->test_suite_name) == 0) {
771 			return test;
772 		}
773 	}
774 	return NULL;
775 }
776 
777 #if CONFIG_ZTEST_SHUFFLE
z_ztest_shuffle(bool shuffle,void * dest[],intptr_t start,size_t num_items,size_t element_size)778 static void z_ztest_shuffle(bool shuffle, void *dest[], intptr_t start, size_t num_items,
779 			    size_t element_size)
780 {
781 	/* Initialize dest array */
782 	for (size_t i = 0; i < num_items; ++i) {
783 		dest[i] = (void *)(start + (i * element_size));
784 	}
785 	void *tmp;
786 
787 	/* Shuffle dest array */
788 	if (shuffle) {
789 		for (size_t i = num_items - 1; i > 0; i--) {
790 			int j = sys_rand32_get() % (i + 1);
791 
792 			if (i != j) {
793 				tmp = dest[j];
794 				dest[j] = dest[i];
795 				dest[i] = tmp;
796 			}
797 		}
798 	}
799 }
800 #endif
801 
z_ztest_run_test_suite_ptr(struct ztest_suite_node * suite,bool shuffle,int suite_iter,int case_iter,void * param)802 static int z_ztest_run_test_suite_ptr(struct ztest_suite_node *suite, bool shuffle, int suite_iter,
803 				      int case_iter, void *param)
804 {
805 	struct ztest_unit_test *test = NULL;
806 	void *data = NULL;
807 	int fail = 0;
808 	int tc_result = TC_PASS;
809 
810 	if (FAIL_FAST && test_status != ZTEST_STATUS_OK) {
811 		return test_status;
812 	}
813 
814 	if (suite == NULL) {
815 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
816 		return -1;
817 	}
818 
819 #ifndef KERNEL
820 	if (setjmp(stack_fail)) {
821 		PRINT_DATA("TESTSUITE crashed.\n");
822 		test_status = ZTEST_STATUS_CRITICAL_ERROR;
823 		end_report();
824 		exit(1);
825 	}
826 #else
827 	k_object_access_all_grant(&ztest_thread);
828 #endif
829 
830 	TC_SUITE_START(suite->name);
831 	current_test_failed_assumption = false;
832 	__ztest_set_test_result(ZTEST_RESULT_PENDING);
833 	__ztest_set_test_phase(TEST_PHASE_SETUP);
834 #ifndef KERNEL
835 	if (setjmp(test_suite_fail)) {
836 		__ztest_set_test_result(ZTEST_RESULT_SUITE_FAIL);
837 	}
838 #endif
839 	if (test_result != ZTEST_RESULT_SUITE_FAIL && suite->setup != NULL) {
840 		data = suite->setup();
841 	}
842 	if (param != NULL) {
843 		data = param;
844 	}
845 
846 	for (int i = 0; i < case_iter; i++) {
847 #ifdef CONFIG_ZTEST_SHUFFLE
848 		struct ztest_unit_test *tests_to_run[ZTEST_TEST_COUNT];
849 
850 		memset(tests_to_run, 0, ZTEST_TEST_COUNT * sizeof(struct ztest_unit_test *));
851 		z_ztest_shuffle(shuffle, (void **)tests_to_run,
852 				(intptr_t)_ztest_unit_test_list_start, ZTEST_TEST_COUNT,
853 				sizeof(struct ztest_unit_test));
854 		for (size_t j = 0; j < ZTEST_TEST_COUNT; ++j) {
855 			test = tests_to_run[j];
856 			/* Make sure that the test belongs to this suite */
857 			if (strcmp(suite->name, test->test_suite_name) != 0) {
858 				continue;
859 			}
860 			if (ztest_api.should_test_run(suite->name, test->name)) {
861 				test->stats->run_count++;
862 				tc_result = run_test(suite, test, data);
863 				if (tc_result == TC_PASS) {
864 					test->stats->pass_count++;
865 				} else if (tc_result == TC_SKIP) {
866 					test->stats->skip_count++;
867 				} else if (tc_result == TC_FAIL) {
868 					test->stats->fail_count++;
869 				}
870 				if (tc_result == TC_FAIL) {
871 					fail++;
872 				}
873 			}
874 
875 			if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
876 				break;
877 			}
878 		}
879 #else
880 		while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
881 			if (ztest_api.should_test_run(suite->name, test->name)) {
882 				test->stats->run_count++;
883 				tc_result = run_test(suite, test, data);
884 				if (tc_result == TC_PASS) {
885 					test->stats->pass_count++;
886 				} else if (tc_result == TC_SKIP) {
887 					test->stats->skip_count++;
888 				} else if (tc_result == TC_FAIL) {
889 					test->stats->fail_count++;
890 				}
891 
892 				if (tc_result == TC_FAIL) {
893 					fail++;
894 				}
895 			}
896 
897 			if ((fail && FAIL_FAST) || test_status == ZTEST_STATUS_CRITICAL_ERROR) {
898 				break;
899 			}
900 		}
901 #endif
902 		if (test_status == ZTEST_STATUS_OK && fail != 0) {
903 			test_status = ZTEST_STATUS_HAS_FAILURE;
904 		}
905 	}
906 
907 	TC_SUITE_END(suite->name, (fail > 0 ? TC_FAIL : TC_PASS));
908 	__ztest_set_test_phase(TEST_PHASE_TEARDOWN);
909 	if (suite->teardown != NULL) {
910 		suite->teardown(data);
911 	}
912 
913 	return fail;
914 }
915 
z_ztest_run_test_suite(const char * name,bool shuffle,int suite_iter,int case_iter,void * param)916 int z_ztest_run_test_suite(const char *name, bool shuffle,
917 	int suite_iter, int case_iter, void *param)
918 {
919 	return z_ztest_run_test_suite_ptr(ztest_find_test_suite(name), shuffle, suite_iter,
920 					  case_iter, param);
921 }
922 
923 #ifdef CONFIG_USERSPACE
924 K_APPMEM_PARTITION_DEFINE(ztest_mem_partition);
925 #endif
926 
927 /* Show one line summary for a test suite.
928  */
__ztest_show_suite_summary_oneline(struct ztest_suite_node * suite)929 static void __ztest_show_suite_summary_oneline(struct ztest_suite_node *suite)
930 {
931 	int distinct_pass = 0, distinct_fail = 0, distinct_skip = 0, distinct_total = 0;
932 	int effective_total = 0;
933 	int expanded_pass = 0, expanded_passrate = 0;
934 	int passrate_major = 0, passrate_minor = 0, passrate_tail = 0;
935 	int suite_result = TC_PASS;
936 
937 	struct ztest_unit_test *test = NULL;
938 	unsigned int suite_duration_worst_ms = 0;
939 
940 	/** summary of distinct run  */
941 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
942 		distinct_total++;
943 		suite_duration_worst_ms += test->stats->duration_worst_ms;
944 		if (test->stats->skip_count == test->stats->run_count) {
945 			distinct_skip++;
946 		} else if (test->stats->pass_count == test->stats->run_count) {
947 			distinct_pass++;
948 		} else {
949 			distinct_fail++;
950 		}
951 	}
952 
953 	if (distinct_skip == distinct_total) {
954 		suite_result = TC_SKIP;
955 		passrate_major = passrate_minor = 0;
956 	} else {
957 		suite_result = (distinct_fail > 0) ? TC_FAIL : TC_PASS;
958 		effective_total = distinct_total - distinct_skip;
959 		expanded_pass = distinct_pass * 100000;
960 		expanded_passrate = expanded_pass / effective_total;
961 		passrate_major = expanded_passrate / 1000;
962 		passrate_minor = (expanded_passrate - passrate_major * 1000) / 10;
963 		passrate_tail = expanded_passrate - passrate_major * 1000 - passrate_minor * 10;
964 		if (passrate_tail >= 5) { /* rounding */
965 			passrate_minor++;
966 		}
967 	}
968 
969 	TC_SUMMARY_PRINT("SUITE %s - %3d.%02d%% [%s]: pass = %d, fail = %d, "
970 			 "skip = %d, total = %d duration = %u.%03u seconds\n",
971 			 TC_RESULT_TO_STR(suite_result), passrate_major, passrate_minor,
972 			 suite->name, distinct_pass, distinct_fail, distinct_skip, distinct_total,
973 			 suite_duration_worst_ms / 1000, suite_duration_worst_ms % 1000);
974 	log_flush();
975 }
976 
__ztest_show_suite_summary_verbose(struct ztest_suite_node * suite)977 static void __ztest_show_suite_summary_verbose(struct ztest_suite_node *suite)
978 {
979 	struct ztest_unit_test *test = NULL;
980 	int tc_result = TC_PASS;
981 	int flush_frequency = 0;
982 
983 	if (IS_ENABLED(CONFIG_ZTEST_VERBOSE_SUMMARY) == 0) {
984 		return;
985 	}
986 
987 	while (((test = z_ztest_get_next_test(suite->name, test)) != NULL)) {
988 		if (test->stats->skip_count == test->stats->run_count) {
989 			tc_result = TC_SKIP;
990 		} else if (test->stats->pass_count == test->stats->run_count) {
991 			tc_result = TC_PASS;
992 		} else if (test->stats->pass_count == 0) {
993 			tc_result = TC_FAIL;
994 		} else {
995 			tc_result = TC_FLAKY;
996 		}
997 
998 		if (tc_result == TC_FLAKY) {
999 			TC_SUMMARY_PRINT(
1000 				" - %s - [%s.%s] - (Failed %d of %d attempts)"
1001 				" - duration = %u.%03u seconds\n",
1002 				TC_RESULT_TO_STR(tc_result), test->test_suite_name, test->name,
1003 				test->stats->run_count - test->stats->pass_count,
1004 				test->stats->run_count, test->stats->duration_worst_ms / 1000,
1005 				test->stats->duration_worst_ms % 1000);
1006 		} else {
1007 			TC_SUMMARY_PRINT(" - %s - [%s.%s] duration = %u.%03u seconds\n",
1008 					 TC_RESULT_TO_STR(tc_result), test->test_suite_name,
1009 					 test->name, test->stats->duration_worst_ms / 1000,
1010 					 test->stats->duration_worst_ms % 1000);
1011 		}
1012 
1013 		if (flush_frequency % 3 == 0) {
1014 			/** Reduce the flush frequency a bit to speed up the output */
1015 			log_flush();
1016 		}
1017 		flush_frequency++;
1018 	}
1019 	TC_SUMMARY_PRINT("\n");
1020 	log_flush();
1021 }
1022 
__ztest_show_suite_summary(void)1023 static void __ztest_show_suite_summary(void)
1024 {
1025 	if (IS_ENABLED(CONFIG_ZTEST_SUMMARY) == 0) {
1026 		return;
1027 	}
1028 	/* Flush the log a lot to ensure that no summary content
1029 	 * is dropped if it goes through the logging subsystem.
1030 	 */
1031 	log_flush();
1032 	TC_SUMMARY_PRINT("\n------ TESTSUITE SUMMARY START ------\n\n");
1033 	log_flush();
1034 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1035 	     ptr < _ztest_suite_node_list_end; ++ptr) {
1036 
1037 		__ztest_show_suite_summary_oneline(ptr);
1038 		__ztest_show_suite_summary_verbose(ptr);
1039 	}
1040 	TC_SUMMARY_PRINT("------ TESTSUITE SUMMARY END ------\n\n");
1041 	log_flush();
1042 }
1043 
__ztest_run_test_suite(struct ztest_suite_node * ptr,const void * state,bool shuffle,int suite_iter,int case_iter,void * param)1044 static int __ztest_run_test_suite(struct ztest_suite_node *ptr, const void *state, bool shuffle,
1045 				  int suite_iter, int case_iter, void *param)
1046 {
1047 	struct ztest_suite_stats *stats = ptr->stats;
1048 	int count = 0;
1049 
1050 	for (int i = 0; i < suite_iter; i++) {
1051 		if (ztest_api.should_suite_run(state, ptr)) {
1052 			int fail = z_ztest_run_test_suite_ptr(ptr, shuffle,
1053 							suite_iter, case_iter, param);
1054 
1055 			count++;
1056 			stats->run_count++;
1057 			stats->fail_count += (fail != 0) ? 1 : 0;
1058 		} else {
1059 			stats->skip_count++;
1060 		}
1061 	}
1062 
1063 	return count;
1064 }
1065 
z_impl_ztest_run_test_suites(const void * state,bool shuffle,int suite_iter,int case_iter)1066 int z_impl_ztest_run_test_suites(const void *state, bool shuffle, int suite_iter, int case_iter)
1067 {
1068 	int count = 0;
1069 	void *param = NULL;
1070 	if (test_status == ZTEST_STATUS_CRITICAL_ERROR) {
1071 		return count;
1072 	}
1073 
1074 #ifdef CONFIG_ZTEST_COVERAGE_RESET_BEFORE_TESTS
1075 	gcov_reset_all_counts();
1076 #endif
1077 
1078 #ifdef CONFIG_ZTEST_SHUFFLE
1079 	struct ztest_suite_node *suites_to_run[ZTEST_SUITE_COUNT];
1080 
1081 	memset(suites_to_run, 0, ZTEST_SUITE_COUNT * sizeof(struct ztest_suite_node *));
1082 	z_ztest_shuffle(shuffle, (void **)suites_to_run, (intptr_t)_ztest_suite_node_list_start,
1083 			ZTEST_SUITE_COUNT, sizeof(struct ztest_suite_node));
1084 	for (size_t i = 0; i < ZTEST_SUITE_COUNT; ++i) {
1085 		count += __ztest_run_test_suite(suites_to_run[i], state, shuffle, suite_iter,
1086 						case_iter, param);
1087 		/* Stop running tests if we have a critical error or if we have a failure and
1088 		 * FAIL_FAST was set
1089 		 */
1090 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1091 		    (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1092 			break;
1093 		}
1094 	}
1095 #else
1096 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1097 	     ptr < _ztest_suite_node_list_end; ++ptr) {
1098 		count += __ztest_run_test_suite(ptr, state, shuffle, suite_iter, case_iter, param);
1099 		/* Stop running tests if we have a critical error or if we have a failure and
1100 		 * FAIL_FAST was set
1101 		 */
1102 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1103 		    (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1104 			break;
1105 		}
1106 	}
1107 #endif
1108 
1109 	return count;
1110 }
1111 
z_impl___ztest_set_test_result(enum ztest_result new_result)1112 void z_impl___ztest_set_test_result(enum ztest_result new_result)
1113 {
1114 	test_result = new_result;
1115 }
1116 
z_impl___ztest_set_test_phase(enum ztest_phase new_phase)1117 void z_impl___ztest_set_test_phase(enum ztest_phase new_phase)
1118 {
1119 	cur_phase = new_phase;
1120 }
1121 
1122 #ifdef CONFIG_USERSPACE
z_vrfy___ztest_set_test_result(enum ztest_result new_result)1123 void z_vrfy___ztest_set_test_result(enum ztest_result new_result)
1124 {
1125 	z_impl___ztest_set_test_result(new_result);
1126 }
1127 #include <zephyr/syscalls/__ztest_set_test_result_mrsh.c>
1128 
z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)1129 void z_vrfy___ztest_set_test_phase(enum ztest_phase new_phase)
1130 {
1131 	z_impl___ztest_set_test_phase(new_phase);
1132 }
1133 #include <zephyr/syscalls/__ztest_set_test_phase_mrsh.c>
1134 #endif /* CONFIG_USERSPACE */
1135 
ztest_verify_all_test_suites_ran(void)1136 void ztest_verify_all_test_suites_ran(void)
1137 {
1138 	bool all_tests_run = true;
1139 	struct ztest_suite_node *suite;
1140 	struct ztest_unit_test *test;
1141 
1142 	if (IS_ENABLED(CONFIG_ZTEST_VERIFY_RUN_ALL)) {
1143 		for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end;
1144 		     ++suite) {
1145 			if (suite->stats->run_count < 1) {
1146 				PRINT_DATA("ERROR: Test suite '%s' did not run.\n", suite->name);
1147 				all_tests_run = false;
1148 			}
1149 		}
1150 
1151 		for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1152 			suite = ztest_find_test_suite(test->test_suite_name);
1153 			if (suite == NULL) {
1154 				PRINT_DATA("ERROR: Test '%s' assigned to test suite '%s' which "
1155 					   "doesn't "
1156 					   "exist\n",
1157 					   test->name, test->test_suite_name);
1158 				all_tests_run = false;
1159 			}
1160 		}
1161 
1162 		if (!all_tests_run) {
1163 			test_status = ZTEST_STATUS_HAS_FAILURE;
1164 		}
1165 	}
1166 
1167 	for (test = _ztest_unit_test_list_start; test < _ztest_unit_test_list_end; ++test) {
1168 		if (test->stats->fail_count + test->stats->pass_count + test->stats->skip_count !=
1169 		    test->stats->run_count) {
1170 			PRINT_DATA("Bad stats for %s.%s\n", test->test_suite_name, test->name);
1171 			test_status = 1;
1172 		}
1173 	}
1174 }
1175 
ztest_run_all(const void * state,bool shuffle,int suite_iter,int case_iter)1176 void ztest_run_all(const void *state, bool shuffle, int suite_iter, int case_iter)
1177 {
1178 	ztest_api.run_all(state, shuffle, suite_iter, case_iter);
1179 }
1180 
test_main(void)1181 void __weak test_main(void)
1182 {
1183 #if CONFIG_ZTEST_SHUFFLE
1184 	ztest_run_all(NULL, true, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1185 #else
1186 	ztest_run_all(NULL, false, NUM_ITER_PER_SUITE, NUM_ITER_PER_TEST);
1187 #endif
1188 	ztest_verify_all_test_suites_ran();
1189 }
1190 
1191 #ifndef KERNEL
main(void)1192 int main(void)
1193 {
1194 	z_init_mock();
1195 	test_main();
1196 	end_report();
1197 #ifdef CONFIG_ZTEST_NO_YIELD
1198 	/*
1199 	 * Rather than yielding to idle thread, keep the part awake so debugger can
1200 	 * still access it, since some SOCs cannot be debugged in low power states.
1201 	 */
1202 	uint32_t key = irq_lock();
1203 
1204 	while (1) {
1205 		; /* Spin */
1206 	}
1207 	irq_unlock(key);
1208 #endif
1209 	return test_status;
1210 }
1211 #else
1212 
1213 /* Shell */
1214 
1215 #ifdef CONFIG_ZTEST_SHELL
cmd_list_suites(const struct shell * sh,size_t argc,char ** argv)1216 static int cmd_list_suites(const struct shell *sh, size_t argc, char **argv)
1217 {
1218 	struct ztest_suite_node *suite;
1219 
1220 	for (suite = _ztest_suite_node_list_start; suite < _ztest_suite_node_list_end; ++suite) {
1221 		shell_print(sh, "%s", suite->name);
1222 	}
1223 	return 0;
1224 }
1225 
cmd_list_cases(const struct shell * sh,size_t argc,char ** argv)1226 static int cmd_list_cases(const struct shell *sh, size_t argc, char **argv)
1227 {
1228 	struct ztest_suite_node *ptr;
1229 	struct ztest_unit_test *test = NULL;
1230 	int test_count = 0;
1231 
1232 	for (ptr = _ztest_suite_node_list_start; ptr < _ztest_suite_node_list_end; ++ptr) {
1233 		test = NULL;
1234 		while ((test = z_ztest_get_next_test(ptr->name, test)) != NULL) {
1235 			shell_print(sh, "%s::%s", test->test_suite_name, test->name);
1236 			test_count++;
1237 		}
1238 	}
1239 	return 0;
1240 }
1241 extern void ztest_set_test_args(char *argv);
1242 extern void ztest_reset_test_args(void);
1243 
cmd_runall(const struct shell * sh,size_t argc,char ** argv)1244 static int cmd_runall(const struct shell *sh, size_t argc, char **argv)
1245 {
1246 	ztest_reset_test_args();
1247 	ztest_run_all(NULL, false, 1, 1);
1248 	end_report();
1249 	return 0;
1250 }
1251 
1252 #ifdef CONFIG_ZTEST_SHUFFLE
cmd_shuffle(const struct shell * sh,size_t argc,char ** argv)1253 static int cmd_shuffle(const struct shell *sh, size_t argc, char **argv)
1254 {
1255 
1256 	struct getopt_state *state;
1257 	int opt;
1258 	static struct option long_options[] = {{"suite_iter", required_argument, 0, 's'},
1259 					       {"case_iter", required_argument, 0, 'c'},
1260 					       {0, 0, 0, 0}};
1261 	int opt_index = 0;
1262 	int val;
1263 	int opt_num = 0;
1264 
1265 	int suite_iter = 1;
1266 	int case_iter = 1;
1267 
1268 	while ((opt = getopt_long(argc, argv, "s:c:", long_options, &opt_index)) != -1) {
1269 		state = getopt_state_get();
1270 		switch (opt) {
1271 		case 's':
1272 			val = atoi(state->optarg);
1273 			if (val < 1) {
1274 				shell_error(sh, "Invalid number of suite iterations");
1275 				return -ENOEXEC;
1276 			}
1277 			suite_iter = val;
1278 			opt_num++;
1279 			break;
1280 		case 'c':
1281 			val = atoi(state->optarg);
1282 			if (val < 1) {
1283 				shell_error(sh, "Invalid number of case iterations");
1284 				return -ENOEXEC;
1285 			}
1286 			case_iter = val;
1287 			opt_num++;
1288 			break;
1289 		default:
1290 			shell_error(sh, "Invalid option or option usage: %s",
1291 				    argv[opt_index + 1]);
1292 			return -ENOEXEC;
1293 		}
1294 	}
1295 	ztest_reset_test_args();
1296 	ztest_run_all(NULL, true, suite_iter, case_iter);
1297 	end_report();
1298 	return 0;
1299 }
1300 #endif
1301 
cmd_run_suite(const struct shell * sh,size_t argc,char ** argv)1302 static int cmd_run_suite(const struct shell *sh, size_t argc, char **argv)
1303 {
1304 	struct getopt_state *state;
1305 	int opt;
1306 	static struct option long_options[] = {{"repeat_iter", required_argument, NULL, 'r'},
1307 		{NULL, 0, NULL, 0}};
1308 	int opt_index = 0;
1309 	int val;
1310 	int opt_num = 0;
1311 	void *param = NULL;
1312 	int repeat_iter = 1;
1313 
1314 	while ((opt = getopt_long(argc, argv, "r:p:", long_options, &opt_index)) != -1) {
1315 		state = getopt_state_get();
1316 		switch (opt) {
1317 		case 'r':
1318 			val = atoi(state->optarg);
1319 			if (val < 1) {
1320 				shell_fprintf(sh, SHELL_ERROR,
1321 					"Invalid number of suite interations\n");
1322 				return -ENOEXEC;
1323 			}
1324 			repeat_iter = val;
1325 			opt_num++;
1326 			break;
1327 		case 'p':
1328 			param = state->optarg;
1329 			opt_num++;
1330 			break;
1331 		default:
1332 			shell_fprintf(sh, SHELL_ERROR,
1333 				"Invalid option or option usage: %s\n", argv[opt_index + 1]);
1334 			return -ENOEXEC;
1335 		}
1336 	}
1337 	int count = 0;
1338 	bool shuffle = false;
1339 	const char *shell_command = argv[0];
1340 
1341 	/*
1342 	 * This if statement determines which argv contains the test name.
1343 	 * If the optional argument is used, the test name is in the third
1344 	 * argv instead of the first.
1345 	 */
1346 	if (opt_num == 1) {
1347 		ztest_set_test_args(argv[3]);
1348 	} else {
1349 		ztest_set_test_args(argv[1]);
1350 	}
1351 
1352 	for (struct ztest_suite_node *ptr = _ztest_suite_node_list_start;
1353 	     ptr < _ztest_suite_node_list_end; ++ptr) {
1354 		if (strcmp(shell_command, "run-testcase") == 0) {
1355 			count += __ztest_run_test_suite(ptr, NULL, shuffle, 1, repeat_iter, param);
1356 		} else if (strcmp(shell_command, "run-testsuite") == 0) {
1357 			count += __ztest_run_test_suite(ptr, NULL, shuffle, repeat_iter, 1, NULL);
1358 		}
1359 		if (test_status == ZTEST_STATUS_CRITICAL_ERROR ||
1360 		    (test_status == ZTEST_STATUS_HAS_FAILURE && FAIL_FAST)) {
1361 			break;
1362 		}
1363 	}
1364 	return 0;
1365 }
1366 
1367 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry);
1368 
1369 SHELL_DYNAMIC_CMD_CREATE(testsuite_names, testsuite_list_get);
1370 
testsuite_get_all_static(struct ztest_suite_node const ** suites)1371 static size_t testsuite_get_all_static(struct ztest_suite_node const **suites)
1372 {
1373 	*suites = _ztest_suite_node_list_start;
1374 	return _ztest_suite_node_list_end - _ztest_suite_node_list_start;
1375 }
1376 
suite_lookup(size_t idx,const char * prefix)1377 static const struct ztest_suite_node *suite_lookup(size_t idx, const char *prefix)
1378 {
1379 	size_t match_idx = 0;
1380 	const struct ztest_suite_node *suite;
1381 	size_t len = testsuite_get_all_static(&suite);
1382 	const struct ztest_suite_node *suite_end = suite + len;
1383 
1384 	while (suite < suite_end) {
1385 		if ((suite->name != NULL) && (strlen(suite->name) != 0) &&
1386 		    ((prefix == NULL) || (strncmp(prefix, suite->name, strlen(prefix)) == 0))) {
1387 			if (match_idx == idx) {
1388 				return suite;
1389 			}
1390 			++match_idx;
1391 		}
1392 		++suite;
1393 	}
1394 
1395 	return NULL;
1396 }
1397 
testsuite_list_get(size_t idx,struct shell_static_entry * entry)1398 static void testsuite_list_get(size_t idx, struct shell_static_entry *entry)
1399 {
1400 	const struct ztest_suite_node *suite = suite_lookup(idx, "");
1401 
1402 	entry->syntax = (suite != NULL) ? suite->name : NULL;
1403 	entry->handler = NULL;
1404 	entry->help = NULL;
1405 	entry->subcmd = NULL;
1406 }
1407 
1408 /* clang-format off */
1409 	SHELL_STATIC_SUBCMD_SET_CREATE(
1410 		sub_ztest_cmds,
1411 		SHELL_CMD_ARG(run-all, NULL, "Run all tests", cmd_runall, 0, 0),
1412 #ifdef CONFIG_ZTEST_SHUFFLE
1413 		SHELL_COND_CMD_ARG(CONFIG_ZTEST_SHUFFLE, shuffle, NULL,
1414 			"Shuffle tests", cmd_shuffle, 0, 2),
1415 #endif
1416 		SHELL_CMD_ARG(list-testsuites, NULL,
1417 			"List all test suites", cmd_list_suites, 0, 0),
1418 		SHELL_CMD_ARG(list-testcases, NULL,
1419 			"List all test cases", cmd_list_cases, 0, 0),
1420 		SHELL_CMD_ARG(run-testsuite, &testsuite_names,
1421 			"Run test suite", cmd_run_suite, 2, 2),
1422 		SHELL_CMD_ARG(run-testcase, NULL, "Run testcase", cmd_run_suite, 2, 2),
1423 		SHELL_SUBCMD_SET_END /* Array terminated. */
1424 	);
1425 /* clang-format on */
1426 
1427 SHELL_CMD_REGISTER(ztest, &sub_ztest_cmds, "Ztest commands", NULL);
1428 #endif /* CONFIG_ZTEST_SHELL */
1429 
main(void)1430 int main(void)
1431 {
1432 #ifdef CONFIG_USERSPACE
1433 	/* Partition containing globals tagged with ZTEST_DMEM and ZTEST_BMEM
1434 	 * macros. Any variables that user code may reference need to be
1435 	 * placed in this partition if no other memory domain configuration
1436 	 * is made.
1437 	 */
1438 	k_mem_domain_add_partition(&k_mem_domain_default, &ztest_mem_partition);
1439 #ifdef Z_MALLOC_PARTITION_EXISTS
1440 	/* Allow access to malloc() memory */
1441 	k_mem_domain_add_partition(&k_mem_domain_default, &z_malloc_partition);
1442 #endif
1443 #endif /* CONFIG_USERSPACE */
1444 
1445 	z_init_mock();
1446 #ifndef CONFIG_ZTEST_SHELL
1447 	test_main();
1448 	end_report();
1449 	log_flush();
1450 	LOG_PANIC();
1451 	if (IS_ENABLED(CONFIG_ZTEST_RETEST_IF_PASSED)) {
1452 		static __noinit struct {
1453 			uint32_t magic;
1454 			uint32_t boots;
1455 		} state;
1456 		const uint32_t magic = 0x152ac523;
1457 
1458 		if (state.magic != magic) {
1459 			state.magic = magic;
1460 			state.boots = 0;
1461 		}
1462 		state.boots += 1;
1463 		if (test_status == 0) {
1464 			PRINT_DATA("Reset board #%u to test again\n", state.boots);
1465 			k_msleep(10);
1466 			sys_reboot(SYS_REBOOT_COLD);
1467 		} else {
1468 			PRINT_DATA("Failed after %u attempts\n", state.boots);
1469 			state.boots = 0;
1470 		}
1471 	}
1472 #ifdef CONFIG_ZTEST_NO_YIELD
1473 	/*
1474 	 * Rather than yielding to idle thread, keep the part awake so debugger can
1475 	 * still access it, since some SOCs cannot be debugged in low power states.
1476 	 */
1477 	uint32_t key = irq_lock();
1478 
1479 	while (1) {
1480 		; /* Spin */
1481 	}
1482 	irq_unlock(key);
1483 #endif /* CONFIG_ZTEST_NO_YIELD */
1484 #endif /* CONFIG_ZTEST_SHELL */
1485 	return 0;
1486 }
1487 #endif
1488