1 /*
2 * Copyright (c) 2006-2024, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2024-01-17 Shell the first version
9 */
10 #define __RT_IPC_SOURCE__
11
12 #include <rtthread.h>
13 #include "rthw.h"
14 #include "utest.h"
15
16 #define KERN_TEST_CONFIG_LOOP_TIMES 160
17 #define KERN_TEST_CONCURRENT_THREADS (RT_CPUS_NR * 2)
18 #define KERN_TEST_CONFIG_HIGHEST_PRIO 3
19 #define KERN_TEST_CONFIG_LOWEST_PRIO (RT_THREAD_PRIORITY_MAX - 2)
20
21 #define TEST_LEVEL_COUNTS (KERN_TEST_CONFIG_LOWEST_PRIO - KERN_TEST_CONFIG_HIGHEST_PRIO + 1)
22 #if TEST_LEVEL_COUNTS <= RT_CPUS_NR
23 #warning for the best of this test, TEST_LEVEL_COUNTS should greater than RT_CPUS_NR
24 #endif
25 #if KERN_TEST_CONCURRENT_THREADS < RT_CPUS_NR
26 #warning for the best of this test, KERN_TEST_CONCURRENT_THREADS should greater than RT_CPUS_NR
27 #endif
28 #if KERN_TEST_CONFIG_LOWEST_PRIO >= RT_THREAD_PRIORITY_MAX - 1
29 #error the thread priority should at least be greater than idle
30 #endif
31
32 static rt_atomic_t _star_counter;
33 static struct rt_semaphore _thr_exit_sem;
34 static struct rt_semaphore _level_waiting[TEST_LEVEL_COUNTS];
35 static rt_thread_t _thread_matrix[TEST_LEVEL_COUNTS][KERN_TEST_CONCURRENT_THREADS];
36 static rt_atomic_t _load_average[RT_CPUS_NR];
37
_print_char(rt_thread_t thr_self,int character)38 static void _print_char(rt_thread_t thr_self, int character)
39 {
40 rt_base_t current_counter;
41
42 #ifdef RT_USING_SMP
43 rt_kprintf("%c%d", character, RT_SCHED_CTX(thr_self).oncpu);
44 #else
45 rt_kprintf("%c0", character);
46 #endif /* RT_USING_SMP */
47
48 current_counter = rt_atomic_add(&_star_counter, 1);
49 if (current_counter % 30 == 0)
50 {
51 rt_kprintf("\n");
52 }
53 }
54
_stats_load_avg_inc(void)55 static void _stats_load_avg_inc(void)
56 {
57 int cpuid;
58
59 cpuid = rt_hw_cpu_id();
60 rt_atomic_add(&_load_average[cpuid], 1);
61 }
62
_stats_load_avg_print(void)63 static void _stats_load_avg_print(void)
64 {
65 rt_base_t counts = 0;
66 const rt_base_t total_test_counts = KERN_TEST_CONFIG_LOOP_TIMES * TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS;
67
68 for (size_t i = 0; i < RT_CPUS_NR; i++)
69 {
70 rt_kprintf("%ld ", _load_average[i]);
71 counts += _load_average[i];
72 }
73
74 rt_kprintf("\n");
75 uassert_int_equal(counts, total_test_counts);
76 }
77
_thread_entry(void * param)78 static void _thread_entry(void *param)
79 {
80 int level = (rt_ubase_t)param;
81 rt_thread_t thr_self = rt_thread_self();
82
83 if (level == 0)
84 {
85 /* always the first to execute among other working threads */
86 for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
87 {
88 /* notify our consumer */
89 rt_sem_release(&_level_waiting[level + 1]);
90
91 _stats_load_avg_inc();
92
93 /* waiting for resource of ours */
94 rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
95 }
96 }
97 else if (level == TEST_LEVEL_COUNTS - 1)
98 {
99
100 for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
101 {
102 /* waiting for our resource first */
103 rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
104
105 _stats_load_avg_inc();
106
107 _print_char(thr_self, '*');
108
109 rt_thread_delay(1);
110
111 /* produce for level 0 worker */
112 rt_sem_release(&_level_waiting[0]);
113 }
114 }
115 else
116 {
117 for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
118 {
119 /* waiting for resource of ours */
120 rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
121
122 _stats_load_avg_inc();
123
124 /* notify our consumer */
125 rt_sem_release(&_level_waiting[level + 1]);
126 }
127 }
128
129 uassert_true(1);
130 rt_sem_release(&_thr_exit_sem);
131
132 return;
133 }
134
scheduler_tc(void)135 static void scheduler_tc(void)
136 {
137 LOG_I("Test starts...");
138 for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
139 {
140 for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
141 {
142 rt_thread_startup(_thread_matrix[i][j]);
143 }
144 }
145 LOG_I("%d threads startup...", TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS);
146
147 /* waiting for sub-threads to exit */
148 for (size_t i = 0; i < TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS; i++)
149 {
150 rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
151 }
152
153 /* print load average */
154 _stats_load_avg_print();
155 }
156
utest_tc_init(void)157 static rt_err_t utest_tc_init(void)
158 {
159 LOG_I("Setup environment...");
160 _star_counter = 1;
161 rt_memset(_load_average, 0, sizeof(_load_average));
162 rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
163
164 for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
165 {
166 rt_sem_init(&_level_waiting[i], "test", 0, RT_IPC_FLAG_PRIO);
167
168 for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
169 {
170 _thread_matrix[i][j] =
171 rt_thread_create("test",
172 _thread_entry,
173 (void *)i,
174 UTEST_THR_STACK_SIZE,
175 KERN_TEST_CONFIG_HIGHEST_PRIO+i,
176 5);
177 if (!_thread_matrix[i][j])
178 uassert_not_null(_thread_matrix[i][j]);
179 }
180 }
181 return RT_EOK;
182 }
183
utest_tc_cleanup(void)184 static rt_err_t utest_tc_cleanup(void)
185 {
186 rt_sem_detach(&_thr_exit_sem);
187 for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
188 {
189 rt_sem_detach(&_level_waiting[i]);
190 }
191 return RT_EOK;
192 }
193
testcase(void)194 static void testcase(void)
195 {
196 UTEST_UNIT_RUN(scheduler_tc);
197 }
198 UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.sem", utest_tc_init, utest_tc_cleanup, 10);
199