1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-11-14 WangXiaoyao the first version
9 * 2023-08-17 Shell Add unmap_range for MAP_PRIVATE
10 */
11 #ifndef __MM_ASPACE_H__
12 #define __MM_ASPACE_H__
13
14 #include <rthw.h>
15 #include <rtthread.h>
16
17 #include "avl_adpt.h"
18 #include "mm_fault.h"
19 #include "mm_flag.h"
20
21 #include <stddef.h>
22 #include <string.h>
23
24 #define MM_PAGE_SHIFT 12
25 #define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
26 #define PV_OFFSET (rt_kmem_pvoff())
27
28 typedef struct rt_spinlock mm_spinlock_t;
29
30 #define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
31 #define MM_PGTBL_LOCK(aspace) (rt_spin_lock(&((aspace)->pgtbl_lock)))
32 #define MM_PGTBL_UNLOCK(aspace) (rt_spin_unlock(&((aspace)->pgtbl_lock)))
33
34 struct rt_aspace;
35 struct rt_varea;
36 struct rt_mem_obj;
37
38 extern struct rt_aspace rt_kernel_space;
39
40 typedef struct rt_aspace
41 {
42 void *start;
43 rt_size_t size;
44
45 void *page_table;
46 mm_spinlock_t pgtbl_lock;
47
48 struct _aspace_tree tree;
49 struct rt_mutex bst_lock;
50
51 struct rt_mem_obj *private_object;
52
53 #ifdef ARCH_USING_ASID
54 rt_uint64_t asid;
55 #endif /* ARCH_USING_ASID */
56
57 } *rt_aspace_t;
58
59 typedef struct rt_varea
60 {
61 void *start;
62 rt_size_t size;
63 rt_size_t offset;
64
65 rt_size_t attr;
66 rt_size_t flag;
67
68 struct rt_aspace *aspace;
69 struct rt_mem_obj *mem_obj;
70
71 struct _aspace_node node;
72
73 void *data;
74 } *rt_varea_t;
75
76 typedef struct rt_mm_va_hint
77 {
78 void *limit_start;
79 rt_size_t limit_range_size;
80
81 void *prefer;
82 const rt_size_t map_size;
83
84 mm_flag_t flags;
85 } *rt_mm_va_hint_t;
86
87 typedef struct rt_mem_obj
88 {
89 void (*hint_free)(rt_mm_va_hint_t hint);
90 void (*on_page_fault)(struct rt_varea *varea, struct rt_aspace_fault_msg *msg);
91
92 /* do pre open bushiness like inc a ref */
93 void (*on_varea_open)(struct rt_varea *varea);
94 /* do post close bushiness like def a ref */
95 void (*on_varea_close)(struct rt_varea *varea);
96
97 /* do preparation for address space modification of varea */
98 rt_err_t (*on_varea_shrink)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
99 /* do preparation for address space modification of varea */
100 rt_err_t (*on_varea_expand)(struct rt_varea *varea, void *new_vaddr, rt_size_t size);
101 /**
102 * this is like an on_varea_open() to `subset`, and an on_varea_shrink() to `existed`
103 * while resource can migrate from `existed` to `subset` at the same time
104 */
105 rt_err_t (*on_varea_split)(struct rt_varea *existed, void *unmap_start,
106 rt_size_t unmap_len, struct rt_varea *subset);
107 /**
108 * this is like a on_varea_expand() to `merge_to` and on_varea_close() to `merge_from`
109 * while resource can migrate from `merge_from` to `merge_to` at the same time
110 */
111 rt_err_t (*on_varea_merge)(struct rt_varea *merge_to, struct rt_varea *merge_from);
112
113 /* dynamic mem_obj API */
114 void (*page_read)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
115 void (*page_write)(struct rt_varea *varea, struct rt_aspace_io_msg *msg);
116
117 const char *(*get_name)(rt_varea_t varea);
118
119 void *(*on_varea_mremap)(struct rt_varea *varea, rt_size_t new_size, int flags, void *new_address);
120 } *rt_mem_obj_t;
121
122 extern struct rt_mem_obj rt_mm_dummy_mapper;
123
124 enum rt_mmu_cntl
125 {
126 MMU_CNTL_NONCACHE,
127 MMU_CNTL_CACHE,
128 MMU_CNTL_READONLY,
129 MMU_CNTL_READWRITE,
130 MMU_CNTL_OFFLOAD,
131 MMU_CNTL_INSTALL,
132 MMU_CNTL_DUMMY_END,
133 };
134
135 /**
136 * @brief Lock to access page table of address space
137 */
138 #define WR_LOCK(aspace) \
139 rt_thread_self() ? rt_mutex_take(&(aspace)->bst_lock, RT_WAITING_FOREVER) \
140 : 0
141 #define WR_UNLOCK(aspace) \
142 rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0
143
144 /* FIXME: fix rd_lock */
145 #define RD_LOCK(aspace) WR_LOCK(aspace)
146 #define RD_UNLOCK(aspace) WR_UNLOCK(aspace)
147 #define RDWR_LOCK(aspace) ((void)aspace)
148 #define RDWR_UNLOCK(aspace) ((void)aspace)
149
150 rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
151
152 rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
153
154 void rt_aspace_delete(rt_aspace_t aspace);
155
156 void rt_aspace_detach(rt_aspace_t aspace);
157
158 /**
159 * @brief Memory Map on Virtual Address Space to Mappable Object
160 * *INFO There is no restriction to use NULL address(physical/virtual).
161 * Vaddr passing in addr must be page aligned. If vaddr is RT_NULL,
162 * a suitable address will be chose automatically.
163 *
164 * @param aspace target virtual address space
165 * @param addr virtual address of the mapping
166 * @param length length of mapping region
167 * @param attr MMU attribution
168 * @param flags desired memory protection and behaviour of the mapping
169 * @param mem_obj memory map backing store object
170 * @param offset offset of mapping in 4KB page for mem_obj
171 * @return int E_OK on success, with addr set to vaddr of mapping
172 * E_INVAL
173 */
174 int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
175 mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
176
177 /** no malloc routines call */
178 int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
179 rt_size_t length, rt_size_t attr, mm_flag_t flags,
180 rt_mem_obj_t mem_obj, rt_size_t offset);
181
182 /**
183 * @brief Memory Map on Virtual Address Space to Physical Memory
184 *
185 * @param aspace target virtual address space
186 * @param hint hint of mapping va
187 * @param attr MMU attribution
188 * @param pa_off (physical address >> 12)
189 * @param ret_va pointer to the location to store va
190 * @return int E_OK on success, with ret_va set to vaddr of mapping
191 * E_INVAL
192 */
193 int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
194 rt_size_t pa_off, void **ret_va);
195
196 /** no malloc routines call */
197 int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
198 rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
199 void **ret_va);
200
201 /** map a private memory region to aspace */
202 int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
203 rt_size_t attr, mm_flag_t flags);
204
205 /**
206 * @brief Remove mappings containing address specified by addr
207 *
208 * @param aspace target virtual address space
209 * @param addr addresses that mapping to be removed contains
210 * @return int rt errno
211 */
212 int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
213
214 /**
215 * @brief Remove pages of existed mappings in the range [addr, addr+length)
216 * Length is automatically rounded up to the next multiple of the page size.
217 *
218 * @param aspace target virtual address space
219 * @param addr the beginning of the range of pages to be unmapped
220 * @param length length of range in bytes
221 * @return int rt errno
222 */
223 int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length);
224
225 void *rt_aspace_mremap_range(rt_aspace_t aspace, void *old_address, size_t old_size,
226 size_t new_size, int flags, void *new_address);
227
228 int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
229
230 int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
231
232 int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
233
234 rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer);
235
236 rt_err_t rt_aspace_page_get(rt_aspace_t aspace, void *page_va, void *buffer);
237
238 int rt_aspace_traversal(rt_aspace_t aspace,
239 int (*fn)(rt_varea_t varea, void *arg), void *arg);
240
241 void rt_aspace_print_all(rt_aspace_t aspace);
242
243 rt_base_t rt_aspace_count_vsz(rt_aspace_t aspace);
244
245 rt_varea_t rt_aspace_query(rt_aspace_t aspace, void *vaddr);
246
247 rt_err_t rt_aspace_duplicate_locked(rt_aspace_t src, rt_aspace_t dst);
248 rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst);
249 rt_err_t rt_aspace_compare(rt_aspace_t src, rt_aspace_t dst);
250
251 /**
252 * @brief Map one page to varea
253 *
254 * @note caller should take the read/write lock
255 *
256 * @param varea target varea
257 * @param addr user address
258 * @param page the page frame to be mapped
259 * @return int
260 */
261 int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
262
263 /**
264 * @brief Unmap one page in varea
265 *
266 * @note caller should take the read/write lock
267 *
268 * @param varea target varea
269 * @param addr user address
270 * @param page the page frame to be mapped
271 * @return int
272 */
273 int rt_varea_unmap_page(rt_varea_t varea, void *vaddr);
274
275 /**
276 * @brief Map a range of physical address to varea
277 *
278 * @warning Caller should take care of synchronization of its varea among all
279 * the map/unmap operation
280 *
281 * @param varea target varea
282 * @param vaddr user address
283 * @param paddr physical address
284 * @param length map range
285 * @return int
286 */
287 int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
288
289 /**
290 * @brief Unmap a range of physical address in varea
291 *
292 * @warning Caller should take care of synchronization of its varea among all
293 * the map/unmap operation
294 *
295 * @param varea target varea
296 * @param vaddr user address
297 * @param length map range
298 * @return int
299 */
300 int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length);
301
302 /**
303 * @brief Insert page to page manager of varea
304 * The page will be freed by varea on uninstall automatically
305 *
306 * @param varea target varea
307 * @param page_addr the page frame to be added
308 */
309 void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
310
rt_mem_obj_create(rt_mem_obj_t source)311 rt_inline rt_mem_obj_t rt_mem_obj_create(rt_mem_obj_t source)
312 {
313 rt_mem_obj_t target;
314 target = rt_malloc(sizeof(*target));
315 if (target)
316 memcpy(target, source, sizeof(*target));
317 return target;
318 }
319
320 const rt_ubase_t rt_kmem_pvoff(void);
321
322 void rt_kmem_pvoff_set(rt_ubase_t pvoff);
323
324 int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
325
326 void *rt_kmem_v2p(void *vaddr);
327
328 void *rt_kmem_p2v(void *paddr);
329
330 void rt_kmem_list(void);
331
332 #endif /* __MM_ASPACE_H__ */
333