1 /*
2  * Copyright (c) 2006-2023, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2023-03-20     WangXiaoyao  Complete testcase for mm_aspace.c
9  */
10 #ifndef __TEST_ASPACE_API_H__
11 #define __TEST_ASPACE_API_H__
12 
13 #include "common.h"
14 #include "mm_aspace.h"
15 #include "mm_flag.h"
16 #include "test_aspace_api_internal.h"
17 #include "test_synchronization.h"
18 
19 /**
20  * @brief API for aspace create/destroy
21  *
22  * rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
23  * rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
24  * void rt_aspace_delete(rt_aspace_t aspace);
25  * void rt_aspace_detach(rt_aspace_t aspace);
26  *
27  * the init & detach is covered by create & detach
28  */
29 
aspace_create_tc(void)30 static void aspace_create_tc(void)
31 {
32     /* test robustness, detect failure and recover status of overall system */
33     rt_aspace_t aspace;
34 
35     CONSIST_HEAP(aspace = rt_aspace_create((void *)(0 - 0x1000), 0x1000, NULL));
36     uassert_true(!aspace);
37 }
38 
39 #if 1 /* make it clear to identify the block :) */
40     /* for testing on _aspace_traverse */
41     static void *_prev_end;
42     static size_t _count;
_test_increase(rt_varea_t varea,void * param)43     static int _test_increase(rt_varea_t varea, void *param)
44     {
45         uassert_true(varea->start >= _prev_end);
46         _prev_end = varea->start + varea->size;
47         _count += 1;
48         return 0;
49     }
50 #endif
51 
aspace_delete_tc(void)52 static void aspace_delete_tc(void)
53 {
54     /**
55      * @brief Requirements: delete should recycle all types of vareas properly inside
56      * and release the resource allocated for it
57      */
58     rt_aspace_t aspace;
59     struct rt_mm_va_hint hint = {.flags = 0,
60                                  .map_size = 0x1000,
61                                  .prefer = 0};
62     struct rt_varea varea_phy;
63     struct rt_varea varea_mobj;
64     void *pgtbl;
65     void *vaddr;
66 
67     /* compatible to armv7a */
68     pgtbl = rt_pages_alloc(2);
69     uassert_true(!!pgtbl);  /* page must be usable */
70     rt_memset(pgtbl, 0, ARCH_PAGE_SIZE);
71 
72     CONSIST_HEAP({
73         aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, pgtbl);
74         uassert_true(!!aspace);
75 
76         /* insert 4 types of vareas into this aspace */
77         hint.limit_start = aspace->start;
78         hint.limit_range_size = aspace->size;
79         uassert_true(!rt_aspace_map_phy(aspace, &hint, MMU_MAP_K_RWCB, 0, &vaddr));
80         uassert_true(!rt_aspace_map_phy_static(aspace, &varea_phy, &hint, MMU_MAP_K_RWCB, 0, &vaddr));
81         uassert_true(!rt_aspace_map(aspace, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
82         uassert_true(!rt_aspace_map_static(aspace, &varea_mobj, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
83 
84         /* for testing on _aspace_traverse */
85         _count = 0;
86         _prev_end = 0;
87         uassert_true(!rt_aspace_traversal(aspace, _test_increase, 0));
88         /* ensure the mapping is done */
89         uassert_true(_count == 4);
90 
91         rt_aspace_delete(aspace);
92 
93         uassert_true(rt_pages_free(pgtbl, 2) == 1); /* page free must success */
94     });
95 }
96 
97 /**
98  * @brief Memory Map on Virtual Address Space to Mappable Object
99  * int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
100  *                   mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
101  * int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
102  *                          rt_size_t length, rt_size_t attr, mm_flag_t flags,
103  *                          rt_mem_obj_t mem_obj, rt_size_t offset);
104  */
aspace_map_tc(void)105 static void aspace_map_tc(void)
106 {
107     /**
108      * @brief Requirement:
109      * Robustness, filter out invalid input
110      */
111     void *vaddr = RT_NULL;
112     uassert_true(rt_aspace_map(0, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
113     uassert_true(vaddr == RT_NULL);
114 
115     vaddr = (void *)USER_VADDR_START;
116     uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
117     uassert_true(vaddr == RT_NULL);
118 
119     uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, -1, &rt_mm_dummy_mapper, 0));
120     uassert_true(vaddr == RT_NULL);
121 
122     /**
123      * @brief Requirement:
124      * in _rt_aspace_map:_varea_install
125      * not covering an existed varea if a named mapping is mandatory
126      */
127     // vaddr = (void *)((rt_ubase_t)aspace_map_tc & ~ARCH_PAGE_MASK);
128     // CONSIST_HEAP(
129         //     uassert_true(
130             //         rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0)));
131     // uassert_true(vaddr == RT_NULL);
132 
133     /**
134      * @brief Requirement:
135      * in _rt_aspace_map:_varea_install:_find_free
136      * verify that this routine can choose a free region with specified size
137      * and specified alignment requirement
138      */
139     #define ALIGN_REQ (0x04000000)
140     CONSIST_HEAP({
141         uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_CREATE(0, ALIGN_REQ), &rt_mm_dummy_mapper, 0));
142         uassert_true(!((rt_ubase_t)vaddr & (ALIGN_REQ - 1)));
143         rt_aspace_unmap(&rt_kernel_space, vaddr);
144     });
145 
146     /* test internal APIs */
147     test_find_free();
148 }
149 
150 /**
151  * @brief Page frames mapping to varea
152  * complete the page table on specified varea, and handle tlb maintenance
153  * There are 2 variants of this API
154  *
155  * int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
156  * int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
157  */
158 
_create_varea(const size_t size)159 static rt_varea_t _create_varea(const size_t size)
160 {
161     rt_varea_t varea;
162     void *vaddr = rt_ioremap_start;
163 
164     varea = rt_malloc(sizeof(*varea));
165     uassert_true(!!varea);
166     uassert_true(!rt_aspace_map_static(&rt_kernel_space, varea, &vaddr, size, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
167     varea->flag &= ~MMF_STATIC_ALLOC;
168     uassert_true(!!vaddr);
169     return varea;
170 }
171 
test_varea_map_page(void)172 static void test_varea_map_page(void)
173 {
174     /**
175      * @brief rt_varea_map_page
176      * Requirements: complete the page table entry
177      */
178     const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
179     rt_varea_t varea = _create_varea(buf_sz);
180     for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
181     {
182         void *page = rt_pages_alloc(0);
183         uassert_true(!!page);
184         uassert_true(!rt_varea_map_page(varea, varea->start + i, page));
185         uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
186 
187         /* let page manager handle the free of page */
188         rt_varea_pgmgr_insert(varea, page);
189         uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
190     }
191 
192     uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
193 }
194 
test_varea_map_range(void)195 static void test_varea_map_range(void)
196 {
197     /**
198      * @brief rt_varea_map_range
199      * Requirements: complete the page table entry
200      */
201     const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
202     rt_varea_t varea = _create_varea(buf_sz);
203     void *page = rt_pages_alloc(rt_page_bits(buf_sz));
204     uassert_true(!!page);
205     uassert_true(!rt_varea_map_range(varea, varea->start, page + PV_OFFSET, buf_sz));
206     for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
207     {
208         uassert_true(rt_kmem_v2p(varea->start + i) == (page + i + PV_OFFSET));
209     }
210 
211     uassert_true(rt_pages_free(page, rt_page_bits(buf_sz)));
212     uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
213 }
214 
215 /**
216  * @brief rt_varea_unmap_page
217  * Requirements: cancel the page table entry
218  */
test_varea_unmap_page(void)219 static void test_varea_unmap_page(void)
220 {
221     /* Prepare environment */
222     const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
223     rt_varea_t varea = _create_varea(buf_sz);
224     for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
225     {
226         void *page = rt_pages_alloc(0);
227         uassert_true(!!page);
228         uassert_true(!rt_varea_map_page(varea, varea->start + i, page));
229 
230         /* let page manager handle the free of page */
231         rt_varea_pgmgr_insert(varea, page);
232         uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
233     }
234 
235     /* test if unmap is success */
236     for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
237     {
238         uassert_true(rt_varea_unmap_page(varea, varea->start + i) == RT_EOK);
239         uassert_true(rt_kmem_v2p(varea->start + i) == ARCH_MAP_FAILED);
240     }
241 
242     uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
243 }
244 
245 /**
246  * @brief rt_varea_map_range
247  * Requirements: complete the page table entry
248  */
test_varea_unmap_range(void)249 static void test_varea_unmap_range(void)
250 {
251     const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
252     rt_varea_t varea = _create_varea(buf_sz);
253     void *page = rt_pages_alloc(rt_page_bits(buf_sz));
254     uassert_true(!!page);
255     uassert_true(!rt_varea_map_range(varea, varea->start, page + PV_OFFSET, buf_sz));
256     for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
257     {
258         uassert_true(rt_kmem_v2p(varea->start + i) == (page + i + PV_OFFSET));
259     }
260 
261     /* test if unmap is success */
262     uassert_true(rt_varea_unmap_range(varea, varea->start, buf_sz) == RT_EOK);
263     for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
264     {
265         uassert_true(rt_kmem_v2p(varea->start + i) == ARCH_MAP_FAILED);
266     }
267 
268     uassert_true(rt_pages_free(page, rt_page_bits(buf_sz)));
269     uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
270 }
271 
varea_map_tc(void)272 static void varea_map_tc(void)
273 {
274     CONSIST_HEAP(test_varea_map_page());
275     CONSIST_HEAP(test_varea_map_range());
276     CONSIST_HEAP(test_varea_unmap_page());
277     CONSIST_HEAP(test_varea_unmap_range());
278 }
279 
aspace_traversal_tc(void)280 static void aspace_traversal_tc(void)
281 {
282     /**
283      * @brief Requirement
284      * Iterate over each varea in the kernel space
285      */
286     CONSIST_HEAP(aspace_delete_tc());
287     uassert_true(4 == _count);
288 }
289 
290 #ifdef ARCH_ARMV8
aspace_control_tc(void)291 static void aspace_control_tc(void)
292 {
293     /* this case is designed only for one page size */
294     const size_t buf_sz = ARCH_PAGE_SIZE;
295     void *vaddr = RT_NULL;
296     volatile char *remap_nocache;
297     int platform_cache_probe;
298     uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_PREFETCH, &rt_mm_dummy_mapper, 0));
299     uassert_true(!!vaddr);
300 
301     /* map non-cacheable region to verify cache */
302     remap_nocache = rt_ioremap(rt_kmem_v2p(vaddr), buf_sz);
303     uassert_true(!!remap_nocache);
304 
305     /* pre probing */
306     rt_memset(vaddr, 0xba, buf_sz);
307     /* no need to sync transaction on same core */
308     platform_cache_probe = memtest(remap_nocache, 0xab, buf_sz);
309 
310     if (!platform_cache_probe)
311     {
312         LOG_I("Cannot distinguish cache attribution on current platform");
313     }
314     else
315     {
316         LOG_I("Ready to verify attribution of cached & non-cacheable");
317     }
318 
319     /* verify cache */
320     uassert_true(!rt_aspace_control(&rt_kernel_space, vaddr, MMU_CNTL_NONCACHE));
321     rt_memset(vaddr, 0, buf_sz);
322     uassert_true(!memtest(remap_nocache, 0, buf_sz));
323 
324     /* another option as MMU_CNTL_CACHE */
325     uassert_true(!rt_aspace_control(&rt_kernel_space, vaddr, MMU_CNTL_CACHE));
326 
327     rt_iounmap(remap_nocache);
328     uassert_true(!rt_aspace_unmap(&rt_kernel_space, vaddr));
329 }
330 #endif
331 
aspace_tc(void)332 static void aspace_tc(void)
333 {
334     UTEST_UNIT_RUN(aspace_create_tc);
335     UTEST_UNIT_RUN(aspace_delete_tc);
336     UTEST_UNIT_RUN(aspace_map_tc);
337     UTEST_UNIT_RUN(aspace_traversal_tc);
338 #ifdef ARCH_ARMV8
339     UTEST_UNIT_RUN(aspace_control_tc);
340 #endif
341     UTEST_UNIT_RUN(varea_map_tc);
342 
343     /* functionality */
344     UTEST_UNIT_RUN(synchronization_tc);
345     return ;
346 }
347 
348 #endif /* __TEST_ASPACE_API_H__ */
349