1 /*
2 * Copyright (c) 2006-2024, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2024-01-25 Shell init ver.
9 */
10 #define __RT_KERNEL_SOURCE__
11 #include <rtthread.h>
12 #include "utest.h"
13
14 #define TEST_LOOP_TIMES (100 * 1000)
15 #define TEST_PROGRESS_COUNTS (36)
16 #define TEST_THREAD_COUNT (RT_CPUS_NR * 1)
17 #define TEST_PROGRESS_ON (TEST_LOOP_TIMES*TEST_THREAD_COUNT/TEST_PROGRESS_COUNTS)
18
19 static struct rt_semaphore _thr_exit_sem;
20 static rt_atomic_t _progress_counter;
21
22 static volatile rt_thread_t threads_group[TEST_THREAD_COUNT][2];
23
_thread_entry1(void * param)24 static void _thread_entry1(void *param)
25 {
26 rt_base_t critical_level;
27 size_t idx = (size_t)param;
28
29 for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
30 {
31 critical_level = rt_enter_critical();
32
33 rt_thread_suspend(rt_thread_self());
34 rt_thread_resume(threads_group[idx][1]);
35
36 rt_exit_critical_safe(critical_level);
37
38 if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
39 uassert_true(1);
40 }
41
42 rt_sem_release(&_thr_exit_sem);
43 return;
44 }
45
_thread_entry2(void * param)46 static void _thread_entry2(void *param)
47 {
48 rt_base_t critical_level;
49 size_t idx = (size_t)param;
50
51 for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
52 {
53 critical_level = rt_enter_critical();
54
55 rt_thread_suspend(rt_thread_self());
56 rt_thread_resume(threads_group[idx][0]);
57
58 rt_exit_critical_safe(critical_level);
59
60 if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
61 uassert_true(1);
62 }
63
64 rt_sem_release(&_thr_exit_sem);
65 return;
66 }
67
scheduler_tc(void)68 static void scheduler_tc(void)
69 {
70 for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
71 {
72 rt_thread_t t1 =
73 rt_thread_create(
74 "t1",
75 _thread_entry1,
76 (void *)i,
77 UTEST_THR_STACK_SIZE,
78 UTEST_THR_PRIORITY + 1,
79 100);
80 rt_thread_t t2 =
81 rt_thread_create(
82 "t2",
83 _thread_entry2,
84 (void *)i,
85 UTEST_THR_STACK_SIZE,
86 UTEST_THR_PRIORITY + 1,
87 100);
88
89 threads_group[i][0] = t1;
90 threads_group[i][1] = t2;
91 }
92
93 for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
94 {
95 rt_thread_startup(threads_group[i][0]);
96 rt_thread_startup(threads_group[i][1]);
97 }
98
99 for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
100 {
101 rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
102 }
103 }
104
utest_tc_init(void)105 static rt_err_t utest_tc_init(void)
106 {
107 rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
108 return RT_EOK;
109 }
110
utest_tc_cleanup(void)111 static rt_err_t utest_tc_cleanup(void)
112 {
113 rt_sem_detach(&_thr_exit_sem);
114 return RT_EOK;
115 }
116
testcase(void)117 static void testcase(void)
118 {
119 UTEST_UNIT_RUN(scheduler_tc);
120 }
121 UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.thread", utest_tc_init, utest_tc_cleanup, 10);
122