1 /*
2  * Copyright (c) 2006-2024, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2024-01-25     Shell        init ver.
9  */
10 #define __RT_KERNEL_SOURCE__
11 #include <rtthread.h>
12 #include <stdlib.h>
13 #include "utest.h"
14 
15 #define TEST_SECONDS 10
16 #define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
17 #define TEST_PROGRESS_COUNTS (36)
18 #define TEST_PROGRESS_ON (TEST_LOOP_TICKS*2/TEST_PROGRESS_COUNTS)
19 
20 static struct rt_semaphore _thr_exit_sem;
21 static struct rt_mutex _ipc_primitive;
22 static struct rt_semaphore _cons_can_take_mtx;
23 static struct rt_semaphore _prod_can_take_mtx;
24 static rt_atomic_t _progress_counter;
25 #define CONSUMER_MAGIC 0x11223344
26 #define PRODUCER_MAGIC 0x44332211
27 static rt_atomic_t _last_holder_flag = CONSUMER_MAGIC;
28 static rt_base_t _timedout_failed_times = 0;
29 
30 /**
31  * Test on timedout IPC with racing condition where timedout routine and producer
32  * thread may race to wakeup sleeper.
33  *
34  * This test will fork 2 thread, one producer and one consumer. The producer will
35  * looping and trigger the IPC on the edge of new tick arrives. The consumer will
36  * wait on IPC with a timedout of 1 tick.
37  */
38 
_wait_until_edge(void)39 static void _wait_until_edge(void)
40 {
41     rt_tick_t entry_level, current;
42     rt_base_t random_latency;
43 
44     entry_level = rt_tick_get();
45     do
46     {
47         current = rt_tick_get();
48     }
49     while (current == entry_level);
50 
51     /* give a random latency for test */
52     random_latency = rand() % 1000 * 1000;
53     entry_level = current;
54     for (size_t i = 0; i < random_latency; i++)
55     {
56         current = rt_tick_get();
57         if (current != entry_level)
58             break;
59     }
60 }
61 
_producer_entry(void * param)62 static void _producer_entry(void *param)
63 {
64     rt_err_t error;
65     for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
66     {
67         /**
68          * only try to take mutex after consumer have taken it after last
69          * release from us.
70          */
71         error = rt_sem_take(&_prod_can_take_mtx, RT_WAITING_FOREVER);
72         if (error)
73         {
74             uassert_true(0);
75             break;
76         }
77 
78         error = rt_mutex_take(&_ipc_primitive, RT_WAITING_FOREVER);
79         if (error)
80         {
81             uassert_true(0);
82             break;
83         }
84 
85         /* ensure that mutex should be held in round-robin method */
86         if (rt_atomic_load(&_last_holder_flag) != CONSUMER_MAGIC)
87         {
88             uassert_true(0);
89             break;
90         }
91         else
92         {
93             rt_atomic_store(&_last_holder_flag, PRODUCER_MAGIC);
94             rt_sem_release(&_cons_can_take_mtx);
95         }
96 
97         _wait_until_edge();
98 
99         rt_mutex_release(&_ipc_primitive);
100 
101         if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
102             uassert_true(1);
103     }
104 
105     rt_sem_release(&_thr_exit_sem);
106     return;
107 }
108 
_consumer_entry(void * param)109 static void _consumer_entry(void *param)
110 {
111     rt_err_t error;
112     for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
113     {
114         /**
115          * only try to take mutex after producer have taken it after last
116          * release from us.
117          */
118         error = rt_sem_take(&_cons_can_take_mtx, RT_WAITING_FOREVER);
119         if (error)
120         {
121             uassert_true(0);
122             break;
123         }
124 
125         while (1)
126         {
127             error = rt_mutex_take_interruptible(&_ipc_primitive, 1);
128             if (error == -RT_ETIMEOUT)
129             {
130                 _timedout_failed_times++;
131                 if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
132                 {
133                     uassert_true(0);
134                     break;
135                 }
136             }
137             else
138             {
139                 break;
140             }
141         }
142 
143         if (error != RT_EOK)
144         {
145             uassert_true(0);
146             break;
147         }
148 
149         /* ensure that mutex should be held in round-robin method */
150         if (rt_atomic_load(&_last_holder_flag) != PRODUCER_MAGIC)
151         {
152             uassert_true(0);
153             break;
154         }
155         else
156         {
157             rt_atomic_store(&_last_holder_flag, CONSUMER_MAGIC);
158             rt_sem_release(&_prod_can_take_mtx);
159         }
160 
161         rt_mutex_release(&_ipc_primitive);
162         if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
163         {
164             uassert_true(0);
165             break;
166         }
167 
168         if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
169             uassert_true(1);
170     }
171 
172     rt_sem_release(&_thr_exit_sem);
173     return;
174 }
175 
timed_mtx_tc(void)176 static void timed_mtx_tc(void)
177 {
178     rt_thread_t prod = rt_thread_create(
179         "prod",
180         _producer_entry,
181         (void *)0,
182         UTEST_THR_STACK_SIZE,
183         UTEST_THR_PRIORITY + 1,
184         4);
185 
186     rt_thread_t cons = rt_thread_create(
187         "cons",
188         _consumer_entry,
189         (void *)0,
190         UTEST_THR_STACK_SIZE,
191         UTEST_THR_PRIORITY + 1,
192         100);
193 
194     rt_thread_startup(prod);
195     rt_thread_startup(cons);
196 
197     for (size_t i = 0; i < 2; i++)
198     {
199         uassert_int_equal(
200             rt_sem_take(&_thr_exit_sem, 4 * TEST_LOOP_TICKS),
201             RT_EOK);
202     }
203 
204     /* Summary */
205     LOG_I("Total failed times: %ld(in %d)\n", _timedout_failed_times, TEST_LOOP_TICKS);
206 }
207 
utest_tc_init(void)208 static rt_err_t utest_tc_init(void)
209 {
210     _timedout_failed_times = 0;
211 
212     rt_mutex_init(&_ipc_primitive, "ipc", RT_IPC_FLAG_PRIO);
213     rt_sem_init(&_cons_can_take_mtx, "test", 0, RT_IPC_FLAG_PRIO);
214     rt_sem_init(&_prod_can_take_mtx, "test", 1, RT_IPC_FLAG_PRIO);
215     rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
216     return RT_EOK;
217 }
218 
utest_tc_cleanup(void)219 static rt_err_t utest_tc_cleanup(void)
220 {
221     rt_mutex_detach(&_ipc_primitive);
222     rt_sem_detach(&_cons_can_take_mtx);
223     rt_sem_detach(&_prod_can_take_mtx);
224     rt_sem_detach(&_thr_exit_sem);
225     return RT_EOK;
226 }
227 
testcase(void)228 static void testcase(void)
229 {
230     UTEST_UNIT_RUN(timed_mtx_tc);
231 }
232 UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.timed_mtx", utest_tc_init, utest_tc_cleanup, TEST_SECONDS * 2);
233