1 /*
2 * Copyright (c) 2006-2019, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2021-10-14 tyx the first version
9 */
10
11 #include <rtthread.h>
12 #include <stdlib.h>
13 #include "utest.h"
14
15 #define TEST_SLAB_SIZE 1024 * 1024
16
_mem_cmp(void * ptr,rt_uint8_t v,rt_size_t size)17 static int _mem_cmp(void *ptr, rt_uint8_t v, rt_size_t size)
18 {
19 while (size-- != 0)
20 {
21 if (*(rt_uint8_t *)ptr != v)
22 return *(rt_uint8_t *)ptr - v;
23 }
24 return 0;
25 }
26
27 struct slab_alloc_context
28 {
29 rt_list_t node;
30 rt_size_t size;
31 rt_uint8_t magic;
32 };
33
34 struct slab_alloc_head
35 {
36 rt_list_t list;
37 rt_size_t count;
38 rt_tick_t start;
39 rt_tick_t end;
40 rt_tick_t interval;
41 };
42
43 #define SLAB_RANG_ALLOC_BLK_MIN 2
44 #define SLAB_RANG_ALLOC_BLK_MAX 5
45 #define SLAB_RANG_ALLOC_TEST_TIME 5
46
slab_alloc_test(void)47 static void slab_alloc_test(void)
48 {
49 struct slab_alloc_head head;
50 rt_uint8_t *buf;
51 rt_slab_t heap;
52 rt_size_t size;
53 struct slab_alloc_context *ctx;
54
55 /* init */
56 rt_list_init(&head.list);
57 head.count = 0;
58 head.start = rt_tick_get();
59 head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
60 head.interval = (head.end - head.start) / 20;
61 buf = rt_malloc(TEST_SLAB_SIZE);
62 uassert_not_null(buf);
63 uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
64 rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
65 heap = rt_slab_init("slab_tc", buf, TEST_SLAB_SIZE);
66 // test run
67 while (head.end - head.start < RT_TICK_MAX / 2)
68 {
69 if (rt_tick_get() - head.start >= head.interval)
70 {
71 head.start = rt_tick_get();
72 rt_kprintf("#");
73 }
74 // %60 probability to perform alloc operation
75 if (rand() % 10 >= 4)
76 {
77 size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
78 size *= sizeof(struct slab_alloc_context);
79 ctx = rt_slab_alloc(heap, size);
80 if (ctx == RT_NULL)
81 {
82 if (head.count == 0)
83 {
84 break;
85 }
86 size = head.count / 2;
87 while (size != head.count)
88 {
89 ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
90 rt_list_remove(&ctx->node);
91 if (ctx->size > sizeof(*ctx))
92 {
93 if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
94 {
95 uassert_true(0);
96 }
97 }
98 rt_memset(ctx, 0xAA, ctx->size);
99 rt_slab_free(heap, ctx);
100 head.count --;
101 }
102 continue;
103 }
104 //if (RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE) != (rt_ubase_t)ctx)
105 //{
106 // uassert_int_equal(RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE), (rt_ubase_t)ctx);
107 //}
108 rt_memset(ctx, 0, size);
109 rt_list_init(&ctx->node);
110 ctx->size = size;
111 ctx->magic = rand() & 0xff;
112 if (ctx->size > sizeof(*ctx))
113 {
114 rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
115 }
116 rt_list_insert_after(&head.list, &ctx->node);
117 head.count += 1;
118 }
119 else
120 {
121 if (!rt_list_isempty(&head.list))
122 {
123 ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
124 rt_list_remove(&ctx->node);
125 if (ctx->size > sizeof(*ctx))
126 {
127 if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
128 {
129 uassert_true(0);
130 }
131 }
132 rt_memset(ctx, 0xAA, ctx->size);
133 rt_slab_free(heap, ctx);
134 head.count --;
135 }
136 }
137 }
138 while (!rt_list_isempty(&head.list))
139 {
140 ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
141 rt_list_remove(&ctx->node);
142 if (ctx->size > sizeof(*ctx))
143 {
144 if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
145 {
146 uassert_true(0);
147 }
148 }
149 rt_memset(ctx, 0xAA, ctx->size);
150 rt_slab_free(heap, ctx);
151 head.count --;
152 }
153 uassert_int_equal(head.count, 0);
154 // slab heap deinit
155 rt_slab_detach(heap);
156 /* release test resources */
157 rt_free(buf);
158 }
159
160 #define SLAB_RANG_REALLOC_BLK_MIN 0
161 #define SLAB_RANG_REALLOC_BLK_MAX 5
162 #define SLAB_RANG_REALLOC_TEST_TIME 5
163
164 struct slab_realloc_context
165 {
166 rt_size_t size;
167 rt_uint8_t magic;
168 };
169
170 struct slab_realloc_head
171 {
172 struct slab_realloc_context **ctx_tab;
173 rt_size_t count;
174 rt_tick_t start;
175 rt_tick_t end;
176 rt_tick_t interval;
177 };
178
slab_realloc_test(void)179 static void slab_realloc_test(void)
180 {
181 struct slab_realloc_head head;
182 rt_uint8_t *buf;
183 rt_slab_t heap;
184 rt_size_t size, idx;
185 struct slab_realloc_context *ctx;
186 int res;
187
188 size = RT_ALIGN(sizeof(struct slab_realloc_context), RT_ALIGN_SIZE) + RT_ALIGN_SIZE;
189 size = TEST_SLAB_SIZE / size;
190 /* init */
191 head.ctx_tab = RT_NULL;
192 head.count = size;
193 head.start = rt_tick_get();
194 head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
195 head.interval = (head.end - head.start) / 20;
196 buf = rt_malloc(TEST_SLAB_SIZE);
197 uassert_not_null(buf);
198 uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
199 rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
200 heap = rt_slab_init("slab_tc", buf, TEST_SLAB_SIZE);
201 /* init ctx tab */
202 size = head.count * sizeof(struct slab_realloc_context *);
203 head.ctx_tab = rt_slab_alloc(heap, size);
204 uassert_not_null(head.ctx_tab);
205 rt_memset(head.ctx_tab, 0, size);
206 // test run
207 while (head.end - head.start < RT_TICK_MAX / 2)
208 {
209 if (rt_tick_get() - head.start >= head.interval)
210 {
211 head.start = rt_tick_get();
212 rt_kprintf("#");
213 }
214 size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
215 size *= sizeof(struct slab_realloc_context);
216 idx = rand() % head.count;
217 ctx = rt_slab_realloc(heap, head.ctx_tab[idx], size);
218 if (ctx == RT_NULL)
219 {
220 if (size == 0)
221 {
222 if (head.ctx_tab[idx])
223 {
224 head.ctx_tab[idx] = RT_NULL;
225 }
226 }
227 else
228 {
229 for (idx = 0; idx < head.count; idx++)
230 {
231 ctx = head.ctx_tab[idx];
232 if (rand() % 2 && ctx)
233 {
234 if (ctx->size > sizeof(*ctx))
235 {
236 res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
237 if (res != 0)
238 {
239 uassert_int_equal(res, 0);
240 }
241 }
242 rt_memset(ctx, 0xAA, ctx->size);
243 rt_slab_realloc(heap, ctx, 0);
244 head.ctx_tab[idx] = RT_NULL;
245 }
246 }
247 }
248 continue;
249 }
250 /* check slab */
251 if (head.ctx_tab[idx] != RT_NULL)
252 {
253 res = 0;
254 if (ctx->size < size)
255 {
256 if (ctx->size > sizeof(*ctx))
257 {
258 res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
259 }
260 }
261 else
262 {
263 if (size > sizeof(*ctx))
264 {
265 res = _mem_cmp(&ctx[1], ctx->magic, size - sizeof(*ctx));
266 }
267 }
268 if (res != 0)
269 {
270 uassert_int_equal(res, 0);
271 }
272 }
273 /* init slab */
274 ctx->magic = rand() & 0xff;
275 ctx->size = size;
276 if (ctx->size > sizeof(*ctx))
277 {
278 rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
279 }
280 head.ctx_tab[idx] = ctx;
281 }
282 // free all slab
283 for (idx = 0; idx < head.count; idx++)
284 {
285 ctx = head.ctx_tab[idx];
286 if (ctx == RT_NULL)
287 {
288 continue;
289 }
290 if (ctx->size > sizeof(*ctx))
291 {
292 res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
293 if (res != 0)
294 {
295 uassert_int_equal(res, 0);
296 }
297 }
298 rt_memset(ctx, 0xAA, ctx->size);
299 rt_slab_realloc(heap, ctx, 0);
300 head.ctx_tab[idx] = RT_NULL;
301 }
302 // slab heap deinit
303 rt_slab_detach(heap);
304 /* release test resources */
305 rt_free(buf);
306 }
307
utest_tc_init(void)308 static rt_err_t utest_tc_init(void)
309 {
310 return RT_EOK;
311 }
312
utest_tc_cleanup(void)313 static rt_err_t utest_tc_cleanup(void)
314 {
315 return RT_EOK;
316 }
317
testcase(void)318 static void testcase(void)
319 {
320 UTEST_UNIT_RUN(slab_alloc_test);
321 UTEST_UNIT_RUN(slab_realloc_test);
322 }
323 UTEST_TC_EXPORT(testcase, "testcases.kernel.slab_tc", utest_tc_init, utest_tc_cleanup, 20);
324