1 /*
2  * Copyright (c) 2006-2024 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2024/10/28     Shell        Added smp.smoke
9  */
10 
11 #include <rtdevice.h>
12 #include <utest.h>
13 #include <utest_assert.h>
14 #include <smp_call.h>
15 
16 #define PERCPU_TEST_COUNT 10000
17 #define NEWLINE_ON 80
18 
19 static struct rt_semaphore _utestd_exited;
20 static rt_thread_t _utestd[RT_CPUS_NR];
21 static rt_atomic_t _entry_counts[RT_CPUS_NR];
22 
_logging_progress(void)23 static void _logging_progress(void)
24 {
25     static rt_atomic_t counts;
26     rt_ubase_t old;
27 
28     rt_kputs("#");
29     old = rt_atomic_add(&counts, 1);
30     if (old % NEWLINE_ON == 0)
31     {
32         rt_kputs("\n");
33     }
34 }
35 
_test_smp_cb(void * param)36 static void _test_smp_cb(void *param)
37 {
38     rt_ubase_t req_cpuid = (rt_ubase_t)param;
39 
40     if (!rt_hw_interrupt_is_disabled())
41     {
42         /* SYNC.004 */
43         uassert_true(0);
44     }
45     _logging_progress();
46     rt_atomic_add(&_entry_counts[req_cpuid], 1);
47 }
48 
_utestd_entry(void * oncpu_param)49 static void _utestd_entry(void *oncpu_param)
50 {
51     rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
52     volatile int cpu_mask;
53     volatile int popcount = 0;
54     rt_thread_t curthr = rt_thread_self();
55 
56     if (rt_hw_cpu_id() != oncpu)
57     {
58         /* SYNC.004 */
59         uassert_true(0);
60     }
61 
62     for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
63     {
64         cpu_mask = rand() % RT_ALL_CPU;
65 
66         rt_smp_call_cpu_mask(cpu_mask, _test_smp_cb, oncpu_param, 0);
67         popcount += __builtin_popcount(cpu_mask);
68     }
69 
70     for (size_t i = 0; i < RT_CPUS_NR; i++)
71     {
72         rt_thread_control(curthr, RT_THREAD_CTRL_BIND_CPU, (void *)i);
73     }
74 
75     LOG_D("popcount %d, _entry_counts[%d] %d", popcount, oncpu, _entry_counts[oncpu]);
76 
77     /* MP.002 */
78     uassert_true(popcount == rt_atomic_load(&_entry_counts[oncpu]));
79 
80     rt_sem_release(&_utestd_exited);
81 }
82 
_async_call(void)83 static void _async_call(void)
84 {
85     for (size_t i = 0; i < RT_CPUS_NR; i++)
86     {
87         rt_thread_startup(_utestd[i]);
88     }
89 
90     for (size_t i = 0; i < RT_CPUS_NR; i++)
91     {
92         rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
93     }
94 }
95 
utest_tc_init(void)96 static rt_err_t utest_tc_init(void)
97 {
98     for (size_t i = 0; i < RT_CPUS_NR; i++)
99     {
100         rt_atomic_store(&_entry_counts[i], 0);
101         _utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
102                                       UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY,
103                                       20);
104         rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
105 
106         /* SYNC.001, SYNC.002, SYNC.003 */
107         uassert_true(_utestd[i] != RT_NULL);
108     }
109 
110     rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
111     srand(rt_tick_get());
112 
113     return RT_EOK;
114 }
115 
utest_tc_cleanup(void)116 static rt_err_t utest_tc_cleanup(void)
117 {
118     rt_sem_detach(&_utestd_exited);
119 
120     return RT_EOK;
121 }
122 
_testcase(void)123 static void _testcase(void)
124 {
125     UTEST_UNIT_RUN(_async_call);
126 }
127 
128 UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.003", utest_tc_init, utest_tc_cleanup, 10);
129