1 /*
2  * Copyright (c) 2006-2024 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2024/10/28     Shell        Added smp.smoke
9  */
10 
11 #include <rtdevice.h>
12 #include <utest.h>
13 #include <utest_assert.h>
14 #include <smp_call.h>
15 
16 #define PERCPU_TEST_COUNT 10000
17 #define NEWLINE_ON 80
18 
19 static struct rt_semaphore _utestd_exited;
20 static rt_thread_t _utestd[RT_CPUS_NR];
21 static rt_atomic_t _entry_counts[RT_CPUS_NR];
22 
_logging_progress(void)23 static void _logging_progress(void)
24 {
25     static rt_atomic_t counts;
26     rt_ubase_t old;
27 
28     rt_kputs("#");
29     old = rt_atomic_add(&counts, 1);
30     if (old % NEWLINE_ON == 0)
31     {
32         rt_kputs("\n");
33     }
34 }
35 
_test_smp_cb(void * param)36 static void _test_smp_cb(void *param)
37 {
38     rt_ubase_t req_cpuid = (rt_ubase_t)param;
39 
40     if (!rt_hw_interrupt_is_disabled())
41     {
42         /* SYNC.004 */
43         uassert_true(0);
44     }
45 
46     _logging_progress();
47     rt_atomic_add(&_entry_counts[req_cpuid], 1);
48 }
49 
_utestd_entry(void * oncpu_param)50 static void _utestd_entry(void *oncpu_param)
51 {
52     rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
53     volatile int cpu_mask;
54     volatile int popcount = 0;
55     rt_ubase_t tested_cpus = 0;
56 
57     if (rt_hw_cpu_id() != oncpu)
58     {
59         /* SYNC.004 */
60         uassert_true(0);
61     }
62 
63     for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
64     {
65         cpu_mask = rand() % RT_ALL_CPU;
66         tested_cpus |= cpu_mask;
67 
68         rt_smp_call_cpu_mask(cpu_mask, _test_smp_cb, oncpu_param, SMP_CALL_WAIT_ALL);
69         popcount += __builtin_popcount(cpu_mask);
70     }
71 
72     LOG_D("popcount %d, _entry_counts[%d] %d", popcount, oncpu, _entry_counts[oncpu]);
73 
74     /* TARG.001 */
75     uassert_true(popcount == rt_atomic_load(&_entry_counts[oncpu]));
76 
77     /* TOP.001, TOP.002 */
78     uassert_true(tested_cpus == RT_ALL_CPU);
79 
80     rt_sem_release(&_utestd_exited);
81 }
82 
_blocking_mtsafe_call(void)83 static void _blocking_mtsafe_call(void)
84 {
85     rt_err_t error;
86     for (size_t i = 0; i < RT_CPUS_NR; i++)
87     {
88         error = rt_thread_startup(_utestd[i]);
89 
90         /* SYNC.001, SYNC.002, SYNC.003 */
91         uassert_true(!error);
92     }
93 
94     for (size_t i = 0; i < RT_CPUS_NR; i++)
95     {
96         rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
97     }
98 }
99 
utest_tc_init(void)100 static rt_err_t utest_tc_init(void)
101 {
102     for (size_t i = 0; i < RT_CPUS_NR; i++)
103     {
104         rt_atomic_store(&_entry_counts[i], 0);
105         _utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
106                                       UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY,
107                                       20);
108         rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
109 
110         /* SYNC.001, SYNC.002, SYNC.003 */
111         uassert_true(_utestd[i] != RT_NULL);
112     }
113 
114     rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
115     srand(rt_tick_get());
116 
117     return RT_EOK;
118 }
119 
utest_tc_cleanup(void)120 static rt_err_t utest_tc_cleanup(void)
121 {
122     rt_sem_detach(&_utestd_exited);
123 
124     return RT_EOK;
125 }
126 
_testcase(void)127 static void _testcase(void)
128 {
129     UTEST_UNIT_RUN(_blocking_mtsafe_call);
130 }
131 
132 UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.002", utest_tc_init, utest_tc_cleanup, 10);
133