1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-11-14 WangXiaoyao the first version
9 * 2023-08-17 Shell Add unmap_range for MAP_PRIVATE
10 * Support MAP_FIXED in aspace_map(), and
11 * Add better support of permission in mmap
12 */
13
14 /**
15 * @brief Virtual Address Space
16 */
17
18 #include <rtthread.h>
19
20 #define DBG_TAG "mm.aspace"
21 #define DBG_LVL DBG_INFO
22 #include <rtdbg.h>
23
24 #include "avl_adpt.h"
25 #include "mm_private.h"
26
27 #include <mmu.h>
28 #include <tlb.h>
29
30 #include <stddef.h>
31 #include <stdint.h>
32
33 #define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
34
35 static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
36 void *limit_start, rt_size_t limit_size,
37 mm_flag_t flags);
38 static int _unmap_range_locked(rt_aspace_t aspace, void *addr, size_t length);
39
40 struct rt_aspace rt_kernel_space;
41
_init_lock(rt_aspace_t aspace)42 static int _init_lock(rt_aspace_t aspace)
43 {
44 int err;
45 MM_PGTBL_LOCK_INIT(aspace);
46 err = rt_mutex_init(&aspace->bst_lock, "aspace", RT_IPC_FLAG_FIFO);
47
48 return err;
49 }
50
rt_aspace_init(rt_aspace_t aspace,void * start,rt_size_t length,void * pgtbl)51 rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl)
52 {
53 int err = RT_EOK;
54
55 if (pgtbl)
56 {
57 aspace->page_table = pgtbl;
58 aspace->start = start;
59 aspace->size = length;
60 aspace->private_object = RT_NULL;
61
62 err = _aspace_bst_init(aspace);
63 if (err == RT_EOK)
64 {
65 /**
66 * It has the side effect that lock will be added to object
67 * system management. So it must be paired with a detach once
68 * the initialization return successfully.
69 */
70 err = _init_lock(aspace);
71 }
72 }
73 else
74 {
75 err = -RT_EINVAL;
76 }
77
78 return err;
79 }
80
rt_aspace_create(void * start,rt_size_t length,void * pgtbl)81 rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl)
82 {
83 rt_aspace_t aspace = NULL;
84 int err;
85
86 RT_ASSERT(length <= 0 - (rt_size_t)start);
87 aspace = (rt_aspace_t)rt_malloc(sizeof(*aspace));
88 if (aspace)
89 {
90 rt_memset(aspace, 0, sizeof(*aspace));
91
92 err = rt_aspace_init(aspace, start, length, pgtbl);
93
94 if (err != RT_EOK)
95 {
96 LOG_W("%s(%p, %lx, %p): failed with code %d\n", __func__,
97 start, length, pgtbl, err);
98 rt_free(aspace);
99 aspace = RT_NULL;
100 }
101 }
102 return aspace;
103 }
104
rt_aspace_detach(rt_aspace_t aspace)105 void rt_aspace_detach(rt_aspace_t aspace)
106 {
107 rt_varea_t varea;
108
109 WR_LOCK(aspace);
110 varea = ASPACE_VAREA_FIRST(aspace);
111 while (varea)
112 {
113 rt_varea_t prev = varea;
114 varea = ASPACE_VAREA_NEXT(varea);
115
116 _varea_uninstall_locked(prev);
117 if (VAREA_NOT_STATIC(prev))
118 {
119 rt_free(prev);
120 }
121 }
122 WR_UNLOCK(aspace);
123
124 rt_aspace_anon_ref_dec(aspace->private_object);
125
126 rt_mutex_detach(&aspace->bst_lock);
127 }
128
rt_aspace_delete(rt_aspace_t aspace)129 void rt_aspace_delete(rt_aspace_t aspace)
130 {
131 RT_ASSERT(aspace);
132 rt_aspace_detach(aspace);
133 rt_free(aspace);
134 }
135
_get_effect_attr(rt_aspace_t aspace,rt_varea_t varea)136 rt_inline rt_size_t _get_effect_attr(rt_aspace_t aspace, rt_varea_t varea)
137 {
138 rt_size_t attr = varea->attr;
139
140 /* not write permission for user on private mapping */
141 if (rt_varea_is_private_locked(varea))
142 attr = rt_hw_mmu_attr_rm_perm(attr, RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE);
143
144 return attr;
145 }
146
_do_named_map(rt_aspace_t aspace,rt_varea_t varea,void * vaddr,rt_size_t length,rt_size_t offset,rt_size_t attr)147 static int _do_named_map(rt_aspace_t aspace, rt_varea_t varea, void *vaddr,
148 rt_size_t length, rt_size_t offset, rt_size_t attr)
149 {
150 LOG_D("%s: va %p length %p", __func__, vaddr, length);
151 int err = RT_EOK;
152
153 /* it's ensured by caller that (void*)end will not overflow */
154 void *phyaddr = (void *)(offset << MM_PAGE_SHIFT);
155 void *ret;
156
157 attr = _get_effect_attr(aspace, varea);
158 ret = rt_hw_mmu_map(aspace, vaddr, phyaddr, length, attr);
159 if (ret == RT_NULL)
160 {
161 err = -RT_ERROR;
162 }
163
164 if (err == RT_EOK)
165 rt_hw_tlb_invalidate_range(aspace, vaddr, length, ARCH_PAGE_SIZE);
166
167 return err;
168 }
169
_do_page_fault(struct rt_aspace_fault_msg * msg,rt_size_t off,void * vaddr,rt_mem_obj_t mem_obj,rt_varea_t varea)170 rt_inline void _do_page_fault(struct rt_aspace_fault_msg *msg, rt_size_t off,
171 void *vaddr, rt_mem_obj_t mem_obj,
172 rt_varea_t varea)
173 {
174 msg->off = off;
175 msg->fault_vaddr = vaddr;
176 msg->fault_op = MM_FAULT_OP_READ;
177 msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
178 msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
179 msg->response.vaddr = 0;
180 msg->response.size = 0;
181
182 RT_ASSERT(mem_obj->on_page_fault);
183 mem_obj->on_page_fault(varea, msg);
184 }
185
rt_varea_map_with_msg(rt_varea_t varea,struct rt_aspace_fault_msg * msg)186 int rt_varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
187 {
188 rt_aspace_t aspace;
189 int err = -RT_ERROR;
190 if (msg->response.status == MM_FAULT_STATUS_OK)
191 {
192 /**
193 * the page returned by handler is not checked
194 * cause no much assumption can make on it
195 */
196 char *store = msg->response.vaddr;
197 rt_size_t store_sz = msg->response.size;
198 if ((char *)msg->fault_vaddr + store_sz > (char *)varea->start + varea->size)
199 {
200 LOG_W("%s: too much (0x%lx) of buffer on vaddr %p is provided",
201 __func__, store_sz, msg->fault_vaddr);
202 }
203 else
204 {
205 void *map;
206 rt_size_t attr;
207 void *v_addr = msg->fault_vaddr;
208 void *p_addr = store + PV_OFFSET;
209
210 aspace = varea->aspace;
211 RT_ASSERT(aspace);
212
213 attr = _get_effect_attr(aspace, varea);
214 map = rt_hw_mmu_map(aspace, v_addr, p_addr, store_sz, attr);
215
216 if (!map)
217 {
218 LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
219 msg->fault_vaddr, store + PV_OFFSET, store_sz);
220 }
221 else
222 {
223 rt_hw_tlb_invalidate_range(varea->aspace, v_addr, store_sz, ARCH_PAGE_SIZE);
224 err = RT_EOK;
225 }
226 }
227 }
228 else if (msg->response.status == MM_FAULT_STATUS_OK_MAPPED)
229 {
230 if (rt_hw_mmu_v2p(varea->aspace, msg->fault_vaddr) == ARCH_MAP_FAILED)
231 {
232 LOG_D("%s: no page is mapped on %p", __func__, msg->fault_vaddr);
233 }
234 err = RT_EOK;
235 }
236 else
237 {
238 LOG_W("%s: failed on va %p inside varea %p(%s)", __func__, msg->fault_vaddr, varea,
239 varea->mem_obj->get_name ? varea->mem_obj->get_name(varea) : "unknow");
240 }
241 return err;
242 }
243
244 /* allocate memory page for mapping range */
_do_prefetch(rt_aspace_t aspace,rt_varea_t varea,void * start,rt_size_t size)245 static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
246 rt_size_t size)
247 {
248 int err = RT_EOK;
249
250 /* it's ensured by caller that start & size ara page-aligned */
251 char *end = (char *)start + size;
252 char *vaddr = start;
253 rt_size_t off = varea->offset + ((vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT);
254
255 while (vaddr != end)
256 {
257 /* TODO try to map with huge TLB, when flag & HUGEPAGE */
258 struct rt_aspace_fault_msg msg;
259 _do_page_fault(&msg, off, vaddr, varea->mem_obj, varea);
260
261 if (rt_varea_map_with_msg(varea, &msg))
262 {
263 err = -RT_ENOMEM;
264 break;
265 }
266 /**
267 * It's hard to identify the mapping pattern on a customized handler
268 * So we terminate the prefetch process on that case
269 */
270 if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
271 break;
272
273 vaddr += msg.response.size;
274 off += msg.response.size >> ARCH_PAGE_SHIFT;
275 }
276
277 return err;
278 }
279
_expand_varea(rt_varea_t varea,void * new_va,rt_size_t size)280 static rt_err_t _expand_varea(rt_varea_t varea, void *new_va, rt_size_t size)
281 {
282 rt_err_t error;
283 rt_aspace_t aspace;
284 void *old_va;
285
286 if (varea->mem_obj && varea->mem_obj->on_varea_expand)
287 error = varea->mem_obj->on_varea_expand(varea, new_va, size);
288 else
289 error = -RT_EPERM;
290
291 if (error == RT_EOK)
292 {
293 aspace = varea->aspace;
294 old_va = varea->start;
295 varea->size = size;
296
297 if (old_va != new_va)
298 {
299 varea->start = new_va;
300 varea->offset += ((long)new_va - (long)old_va) >> MM_PAGE_SHIFT;
301 _aspace_bst_remove(aspace, varea);
302 _aspace_bst_insert(aspace, varea);
303 }
304 }
305 return error;
306 }
307
308 struct _mapping_property {
309 rt_size_t attr;
310 rt_size_t flags;
311 rt_size_t offset;
312 struct rt_mem_obj *mem_obj;
313 };
314
315 #define INIT_PROP(obj,off,fl,attr) \
316 {.mem_obj = (obj), \
317 .offset = (off), \
318 .flags = (fl), \
319 .attr = (attr),}
320
_contiguous_offset(rt_varea_t neighbour,rt_size_t map_size,struct _mapping_property * prop)321 static rt_bool_t _contiguous_offset(rt_varea_t neighbour, rt_size_t map_size,
322 struct _mapping_property *prop)
323 {
324 rt_size_t n_off = neighbour->offset;
325 rt_size_t map_off = prop->offset;
326 return n_off < map_off ?
327 n_off + (neighbour->size >> MM_PAGE_SHIFT) == map_off :
328 map_off + (map_size >> MM_PAGE_SHIFT) == n_off;
329 }
330
_compatible(rt_varea_t neighbour,rt_size_t map_size,struct _mapping_property * prop)331 static rt_bool_t _compatible(rt_varea_t neighbour, rt_size_t map_size,
332 struct _mapping_property *prop)
333 {
334 return (prop->attr == neighbour->attr && prop->flags == neighbour->flag &&
335 prop->mem_obj == neighbour->mem_obj &&
336 _contiguous_offset(neighbour, map_size, prop));
337 }
338
_migrate_and_release_varea(rt_aspace_t aspace,rt_varea_t to,rt_varea_t from,rt_err_t (* on_varea_merge)(struct rt_varea * to,struct rt_varea * from))339 rt_inline rt_err_t _migrate_and_release_varea(rt_aspace_t aspace, rt_varea_t to, rt_varea_t from,
340 rt_err_t (*on_varea_merge)(struct rt_varea *to, struct rt_varea *from))
341 {
342 rt_err_t error;
343 error = on_varea_merge(to, from);
344 if (error == RT_EOK)
345 {
346 /* uninstall operand & release the varea */
347 _aspace_bst_remove(aspace, from);
348 to->size += from->size;
349
350 if (VAREA_NOT_STATIC(from))
351 rt_free(from);
352 }
353 return error;
354 }
355
_merge_surrounding(rt_aspace_t aspace,rt_varea_t operand,struct _mapping_property * prop)356 static rt_varea_t _merge_surrounding(rt_aspace_t aspace, rt_varea_t operand,
357 struct _mapping_property *prop)
358 {
359 int again;
360 rt_err_t error;
361 int can_merge_fw;
362 int can_merge_bw;
363 rt_varea_t neighbour;
364 char *operand_start;
365 size_t operand_size;
366 rt_err_t (*on_varea_merge)(struct rt_varea *to, struct rt_varea *from);
367
368 if (operand->mem_obj && operand->mem_obj->on_varea_merge)
369 {
370 on_varea_merge = operand->mem_obj->on_varea_merge;
371 do {
372 operand_start = operand->start;
373 operand_size = operand->size;
374 LOG_D("search op_start=%p,op_size=0x%lx", operand_start, operand_size);
375
376 /* find a compatible neighbour if any and setup direction */
377 can_merge_fw = can_merge_bw = 0;
378 neighbour = _aspace_bst_search(aspace, operand_start - 1);
379 if (!neighbour || !_compatible(neighbour, operand_size, prop))
380 {
381 neighbour = _aspace_bst_search(aspace, operand_start + operand_size);
382 if (neighbour && _compatible(neighbour, operand_size, prop))
383 can_merge_bw = 1;
384 }
385 else
386 can_merge_fw = 1;
387
388 if (can_merge_fw || can_merge_bw)
389 {
390 /* merge operand with its predecessor or successor */
391 if (can_merge_fw)
392 {
393 error = _migrate_and_release_varea(aspace, neighbour, operand, on_varea_merge);
394 operand = neighbour;
395 }
396 else
397 error = _migrate_and_release_varea(aspace, operand, neighbour, on_varea_merge);
398
399 if (error == RT_EOK)
400 again = 1;
401 }
402 else
403 again = 0;
404
405 } while (again);
406 }
407 return operand;
408 }
409
410 /**
411 * Brief: expand and merge surrounding until not possible and
412 * setup the pvarea if new virt address region is installed
413 */
_try_expand_and_merge_okay(rt_aspace_t aspace,rt_varea_t * pvarea,void * alloc_va,rt_mm_va_hint_t hint,struct _mapping_property * prop)414 static rt_bool_t _try_expand_and_merge_okay(rt_aspace_t aspace, rt_varea_t *pvarea,
415 void *alloc_va, rt_mm_va_hint_t hint,
416 struct _mapping_property *prop)
417 {
418 int can_expand_fw;
419 int can_expand_bw;
420 rt_varea_t neighbour;
421 rt_varea_t new_region_at = RT_NULL;
422 rt_bool_t install_ok = RT_FALSE;
423 char *operand_start = alloc_va;
424 size_t operand_size = hint->map_size;
425
426 /* find a compatible neighbour if any and setup direction */
427 LOG_D("search op_start=%p,op_size=0x%lx", operand_start, operand_size);
428 can_expand_fw = can_expand_bw = 0;
429 neighbour = _aspace_bst_search(aspace, operand_start - 1);
430 if (!neighbour || !_compatible(neighbour, operand_size, prop))
431 {
432 neighbour = _aspace_bst_search(aspace, operand_start + operand_size);
433 if (neighbour && _compatible(neighbour, operand_size, prop))
434 can_expand_bw = 1;
435 }
436 else
437 can_expand_fw = 1;
438
439 if (can_expand_fw || can_expand_bw)
440 {
441 /* expand varea at head or tailing */
442 if (can_expand_fw)
443 operand_start = neighbour->start;
444 operand_size += neighbour->size;
445
446 LOG_D("expand op_start=%p,op_size=0x%lx", operand_start, operand_size);
447
448 if (_expand_varea(neighbour, operand_start, operand_size) == RT_EOK)
449 {
450 new_region_at = _merge_surrounding(aspace, neighbour, prop);
451 *pvarea = new_region_at;
452 install_ok = RT_TRUE;
453 }
454 }
455
456 return install_ok;
457 }
458
459 static rt_varea_t _varea_create(void *start, rt_size_t size);
460
_insert_new_varea(rt_aspace_t aspace,rt_varea_t * pvarea,void * alloc_va,rt_mm_va_hint_t hint)461 static int _insert_new_varea(rt_aspace_t aspace, rt_varea_t *pvarea,
462 void *alloc_va, rt_mm_va_hint_t hint)
463 {
464 int err;
465 rt_varea_t varea = *pvarea;
466 if (varea == RT_NULL)
467 {
468 /* no preallocate buffer is provided, then create one */
469 varea = _varea_create(hint->prefer, hint->map_size);
470 hint->flags &= ~MMF_STATIC_ALLOC;
471 *pvarea = varea;
472 }
473
474 if (varea)
475 {
476 varea->start = alloc_va;
477 _aspace_bst_insert(aspace, varea);
478 err = RT_EOK;
479 }
480 else
481 {
482 LOG_W("%s: Out of memory", __func__);
483 err = -RT_ENOMEM;
484 }
485
486 return err;
487 }
488
_varea_post_install(rt_varea_t varea,rt_aspace_t aspace,rt_size_t attr,rt_size_t flags,rt_mem_obj_t mem_obj,rt_size_t offset)489 static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
490 rt_size_t attr, rt_size_t flags,
491 rt_mem_obj_t mem_obj, rt_size_t offset)
492 {
493 varea->aspace = aspace;
494 varea->attr = attr;
495 varea->mem_obj = mem_obj;
496 varea->flag = flags;
497 varea->offset = offset;
498
499 if (varea->mem_obj && varea->mem_obj->on_varea_open)
500 varea->mem_obj->on_varea_open(varea);
501 }
502
503 /**
504 * Install new virtual address region into address space
505 * The pvarea will be set to the varea where new virt memory installed which can
506 * be a newly created or existed one.
507 *
508 * Note: caller must hold the aspace lock
509 */
_varea_install(rt_aspace_t aspace,rt_varea_t * pvarea,rt_mm_va_hint_t hint,struct _mapping_property * prop,void ** pva)510 static int _varea_install(rt_aspace_t aspace, rt_varea_t *pvarea,
511 rt_mm_va_hint_t hint, struct _mapping_property *prop,
512 void **pva)
513 {
514 void *alloc_va;
515 int err = RT_EOK;
516
517 if (hint->flags & MMF_MAP_FIXED)
518 {
519 alloc_va = hint->prefer;
520 err = _unmap_range_locked(aspace, alloc_va, hint->map_size);
521 if (err != RT_EOK)
522 {
523 /* Note: MAP_FIXED must failed if unable to unmap existing mapping */
524 LOG_I("%s: unmap range failed in %p with size 0x%lx, error=%d", __func__, alloc_va, hint->map_size, err);
525 }
526 }
527 else
528 {
529 alloc_va =
530 _find_free(aspace, hint->prefer, hint->map_size, hint->limit_start,
531 hint->limit_range_size, hint->flags);
532 if (alloc_va == RT_NULL)
533 err = -RT_ENOSPC;
534 }
535
536 if (alloc_va != RT_NULL)
537 {
538 /* TODO: fix to private mapping directly */
539 if (!_try_expand_and_merge_okay(aspace, pvarea, alloc_va, hint, prop))
540 {
541 err = _insert_new_varea(aspace, pvarea, alloc_va, hint);
542
543 if (err == RT_EOK)
544 _varea_post_install(*pvarea, aspace, prop->attr, prop->flags,
545 prop->mem_obj, prop->offset);
546 }
547
548 if (err == RT_EOK)
549 {
550 RT_ASSERT(*pvarea);
551 *pva = alloc_va;
552 }
553 }
554
555 return err;
556 }
557
558 /**
559 * restore context modified by varea install
560 */
_varea_uninstall_locked(rt_varea_t varea)561 void _varea_uninstall_locked(rt_varea_t varea)
562 {
563 rt_aspace_t aspace = varea->aspace;
564
565 if (varea->mem_obj && varea->mem_obj->on_varea_close)
566 varea->mem_obj->on_varea_close(varea);
567 else
568 {
569 rt_hw_mmu_unmap(aspace, varea->start, varea->size);
570 rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
571 }
572
573 _aspace_bst_remove(aspace, varea);
574 }
575
_mm_aspace_map(rt_aspace_t aspace,rt_varea_t * pvarea,void ** addr,rt_size_t length,rt_size_t attr,mm_flag_t flags,rt_mem_obj_t mem_obj,rt_size_t offset)576 int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t *pvarea, void **addr,
577 rt_size_t length, rt_size_t attr, mm_flag_t flags,
578 rt_mem_obj_t mem_obj, rt_size_t offset)
579 {
580 int err = RT_EOK;
581 rt_varea_t varea;
582 struct _mapping_property prop = INIT_PROP(mem_obj, offset, flags, attr);
583
584 WR_LOCK(aspace);
585
586 /**
587 * @brief .prefer & .map_size are scratched from varea which setup by caller
588 * .limit_start & .limit_range_size have default to be in range of aspace
589 * .flags is from parameter, and will be fill in varea if install successfully
590 */
591 struct rt_mm_va_hint hint = {.prefer = *addr,
592 .map_size = length,
593 .limit_start = aspace->start,
594 .limit_range_size = aspace->size,
595 .flags = flags};
596
597 if (mem_obj->hint_free)
598 {
599 /* mem object can control mapping range and so by modifing hint */
600 mem_obj->hint_free(&hint);
601 }
602
603 /* try to allocate a virtual address region for varea */
604 err = _varea_install(aspace, pvarea, &hint, &prop, addr);
605
606 if (err == RT_EOK)
607 {
608 varea = *pvarea;
609 if (MMF_TEST_CNTL(flags, MMF_PREFETCH))
610 {
611 /* do the MMU & TLB business */
612 err = _do_prefetch(aspace, varea, varea->start, varea->size);
613 if (err)
614 {
615 LOG_I("%s: failed to prefetch page into varea(%s)",
616 __func__, VAREA_NAME(varea));
617
618 /* restore data structure and MMU */
619 _varea_uninstall_locked(varea);
620 if (!(varea->flag & MMF_STATIC_ALLOC))
621 rt_free(varea);
622 }
623 }
624 }
625
626 WR_UNLOCK(aspace);
627
628 return err;
629 }
630
_varea_create(void * start,rt_size_t size)631 static rt_varea_t _varea_create(void *start, rt_size_t size)
632 {
633 rt_varea_t varea;
634 varea = (rt_varea_t)rt_malloc(sizeof(struct rt_varea));
635 if (varea)
636 {
637 varea->start = start;
638 varea->size = size;
639 }
640 return varea;
641 }
642
643 #define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start)))
644 #define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((char *)(start) - (char *)(limit_start))) > (limit_size))
645
_not_in_range(rt_size_t flags,void * start,rt_size_t length,void * limit_start,rt_size_t limit_size)646 static inline int _not_in_range(rt_size_t flags, void *start, rt_size_t length,
647 void *limit_start, rt_size_t limit_size)
648 {
649 /* assuming (base + length) will not overflow except (0) */
650 int rc = (flags & MMF_MAP_FIXED || start != RT_NULL)
651 ? (_IS_OVERFLOW(start, length) || start < limit_start ||
652 _IS_OVERSIZE(start, length, limit_start, limit_size))
653 : length > limit_size;
654 if (rc)
655 LOG_D("%s: [%p : %p] [%p : %p]", __func__, start, length, limit_start, limit_size);
656 return rc;
657 }
658
_not_align(void * start,rt_size_t length,rt_size_t mask)659 static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
660 {
661 return (start != RT_NULL) &&
662 (((uintptr_t)start & mask) || (length & mask));
663 }
664
665 /** if the flag is currently supported */
_not_support(rt_size_t flags)666 static inline int _not_support(rt_size_t flags)
667 {
668 rt_size_t support_ops = MMF_CREATE(((__MMF_INVALID - 1) << 1) - 1, 1);
669 return flags & ~(support_ops);
670 }
671
rt_aspace_map(rt_aspace_t aspace,void ** addr,rt_size_t length,rt_size_t attr,mm_flag_t flags,rt_mem_obj_t mem_obj,rt_size_t offset)672 int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
673 rt_size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj,
674 rt_size_t offset)
675 {
676 int err;
677 rt_varea_t varea = RT_NULL;
678
679 RT_DEBUG_SCHEDULER_AVAILABLE(1);
680
681 if (!aspace || !addr || !mem_obj || length == 0)
682 {
683 err = -RT_EINVAL;
684 LOG_I("%s(%p, %p, %lx, %lx, %lx, %p, %lx): Invalid input",
685 __func__, aspace, addr, length, attr, flags, mem_obj, offset);
686 }
687 else if (_not_in_range(flags, *addr, length, aspace->start, aspace->size))
688 {
689 err = -RT_EINVAL;
690 LOG_I("%s(addr:%p, len:%lx): out of range", __func__, *addr, length);
691 }
692 else if (_not_support(flags))
693 {
694 LOG_I("%s: no support flags 0x%lx", __func__, flags);
695 err = -RT_ENOSYS;
696 }
697 else
698 {
699 RT_ASSERT((length & ARCH_PAGE_MASK) == 0);
700 RT_ASSERT(((long)*addr & ARCH_PAGE_MASK) == 0);
701 err = _mm_aspace_map(aspace, &varea, addr, length, attr, flags, mem_obj, offset);
702 }
703
704 if (err != RT_EOK)
705 {
706 *addr = NULL;
707 }
708
709 return err;
710 }
711
rt_aspace_map_static(rt_aspace_t aspace,rt_varea_t varea,void ** addr,rt_size_t length,rt_size_t attr,mm_flag_t flags,rt_mem_obj_t mem_obj,rt_size_t offset)712 int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
713 rt_size_t length, rt_size_t attr, mm_flag_t flags,
714 rt_mem_obj_t mem_obj, rt_size_t offset)
715 {
716 int err;
717
718 if (!aspace || !varea || !addr || !mem_obj || length == 0 ||
719 _not_in_range(flags, *addr, length, aspace->start, aspace->size))
720 {
721 err = -RT_EINVAL;
722 LOG_W("%s: Invalid input", __func__);
723 }
724 else if (_not_support(flags))
725 {
726 LOG_W("%s: no support flags", __func__);
727 err = -RT_ENOSYS;
728 }
729 else
730 {
731 varea->size = length;
732 varea->start = *addr;
733 flags |= MMF_STATIC_ALLOC;
734
735 /**
736 * todo: fix if mapping expand, the static varea is not used at all
737 */
738 err = _mm_aspace_map(aspace, &varea, addr, length, attr, flags, mem_obj, offset);
739 }
740
741 if (err != RT_EOK)
742 {
743 *addr = NULL;
744 }
745 else
746 {
747 *addr = varea->start;
748 }
749 return err;
750 }
751
_mm_aspace_map_phy(rt_aspace_t aspace,rt_varea_t varea,rt_mm_va_hint_t hint,rt_size_t attr,rt_size_t pa_off,void ** ret_va)752 int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
753 rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
754 void **ret_va)
755 {
756 int err;
757 void *vaddr;
758
759 if (!aspace || !hint || !hint->limit_range_size || !hint->map_size)
760 {
761 LOG_W("%s: Invalid input", __func__);
762 err = -RT_EINVAL;
763 }
764 else if (_not_align(hint->prefer, hint->map_size, ARCH_PAGE_MASK))
765 {
766 LOG_W("%s: not aligned", __func__);
767 err = -RT_EINVAL;
768 }
769 else if (_not_in_range(hint->flags, hint->limit_start, hint->limit_range_size, aspace->start,
770 aspace->size) ||
771 _not_in_range(hint->flags, hint->prefer, hint->map_size, aspace->start,
772 aspace->size))
773 {
774 LOG_W("%s: not in range", __func__);
775 err = -RT_EINVAL;
776 }
777 else
778 {
779 struct _mapping_property prop = INIT_PROP(0, pa_off, hint->flags, attr);
780
781 WR_LOCK(aspace);
782 err = _varea_install(aspace, &varea, hint, &prop, &vaddr);
783 if (err == RT_EOK)
784 {
785 err = _do_named_map(aspace, varea, varea->start, varea->size,
786 (rt_size_t)pa_off, attr);
787
788 if (err != RT_EOK)
789 {
790 _varea_uninstall_locked(varea);
791 }
792 }
793
794 WR_UNLOCK(aspace);
795 }
796
797 if (ret_va)
798 {
799 if (err == RT_EOK)
800 *ret_va = vaddr;
801 else
802 *ret_va = RT_NULL;
803 }
804
805 return err;
806 }
807
rt_aspace_map_phy(rt_aspace_t aspace,rt_mm_va_hint_t hint,rt_size_t attr,rt_size_t pa_off,void ** ret_va)808 int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
809 rt_size_t pa_off, void **ret_va)
810 {
811 int err;
812
813 if (hint)
814 {
815 rt_varea_t varea = _varea_create(hint->prefer, hint->map_size);
816 if (varea)
817 {
818 hint->flags &= ~MMF_STATIC_ALLOC;
819 err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
820 if (err != RT_EOK)
821 {
822 rt_free(varea);
823 }
824 }
825 else
826 {
827 err = -RT_ENOMEM;
828 }
829 }
830 else
831 {
832 err = -RT_EINVAL;
833 }
834
835 return err;
836 }
837
rt_aspace_map_phy_static(rt_aspace_t aspace,rt_varea_t varea,rt_mm_va_hint_t hint,rt_size_t attr,rt_size_t pa_off,void ** ret_va)838 int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
839 rt_mm_va_hint_t hint, rt_size_t attr,
840 rt_size_t pa_off, void **ret_va)
841 {
842 int err;
843
844 if (varea && hint)
845 {
846 varea->start = hint->prefer;
847 varea->size = hint->map_size;
848 hint->flags |= (MMF_STATIC_ALLOC);
849 LOG_D("%s: start %p size %p phy at %p", __func__, varea->start, varea->size, pa_off << MM_PAGE_SHIFT);
850 err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
851 }
852 else
853 {
854 err = -RT_EINVAL;
855 }
856
857 return err;
858 }
859
_aspace_unmap(rt_aspace_t aspace,void * addr)860 int _aspace_unmap(rt_aspace_t aspace, void *addr)
861 {
862 int error;
863 rt_varea_t varea;
864
865 WR_LOCK(aspace);
866 varea = _aspace_bst_search(aspace, addr);
867
868 if (varea == RT_NULL)
869 {
870 LOG_D("%s: No such entry found at %p\n", __func__, addr);
871 error = -RT_ENOENT;
872 }
873 else
874 {
875 _varea_uninstall_locked(varea);
876 if (!(varea->flag & MMF_STATIC_ALLOC))
877 {
878 rt_free(varea);
879 }
880 error = RT_EOK;
881 }
882
883 WR_UNLOCK(aspace);
884 return error;
885 }
886
rt_aspace_unmap(rt_aspace_t aspace,void * addr)887 int rt_aspace_unmap(rt_aspace_t aspace, void *addr)
888 {
889 int error;
890 if (!aspace)
891 {
892 LOG_I("%s: Invalid input", __func__);
893 error = -RT_EINVAL;
894 }
895 else if (_not_in_range(MMF_MAP_FIXED, addr, 1, aspace->start, aspace->size))
896 {
897 LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
898 aspace->start, (char *)aspace->start + aspace->size);
899 error = -RT_EINVAL;
900 }
901 else
902 {
903 error = _aspace_unmap(aspace, addr);
904 }
905
906 return error;
907 }
908
909 /**
910 * modify the property of existed varea by shrink its size. Mem_obj is
911 * notified to released the resource.
912 */
_shrink_varea(rt_varea_t varea,void * new_va,rt_size_t size)913 static rt_err_t _shrink_varea(rt_varea_t varea, void *new_va, rt_size_t size)
914 {
915 rt_err_t error;
916 rt_aspace_t aspace;
917 void *old_va;
918
919 if (varea->mem_obj && varea->mem_obj->on_varea_shrink)
920 error = varea->mem_obj->on_varea_shrink(varea, new_va, size);
921 else
922 error = -RT_EPERM;
923
924 if (error == RT_EOK)
925 {
926 aspace = varea->aspace;
927 old_va = varea->start;
928 varea->size = size;
929
930 if (old_va != new_va)
931 {
932 varea->start = new_va;
933 varea->offset += ((long)new_va - (long)old_va) >> MM_PAGE_SHIFT;
934 _aspace_bst_remove(aspace, varea);
935 _aspace_bst_insert(aspace, varea);
936 }
937 }
938 return error;
939 }
940
_split_varea(rt_varea_t existed,char * ex_end,char * unmap_start,char * unmap_end,rt_size_t unmap_len)941 static rt_err_t _split_varea(rt_varea_t existed, char *ex_end, char *unmap_start, char *unmap_end, rt_size_t unmap_len)
942 {
943 int error;
944 size_t rela_offset;
945 rt_varea_t subset;
946 char *subset_start;
947 size_t subset_size;
948
949 if (existed->mem_obj && existed->mem_obj->on_varea_split)
950 {
951 subset_start = unmap_end;
952 subset_size = ex_end - subset_start;
953 subset = _varea_create(subset_start, subset_size);
954 if (subset)
955 {
956 rela_offset = MM_PA_TO_OFF(subset_start - (char *)existed->start);
957 subset->aspace = existed->aspace;
958 subset->attr = existed->attr;
959 subset->mem_obj = existed->mem_obj;
960 subset->flag = existed->flag & ~MMF_STATIC_ALLOC;
961 subset->offset = existed->offset + rela_offset;
962
963 error = existed->mem_obj->on_varea_split(existed, unmap_start, unmap_len, subset);
964 if (error == RT_EOK)
965 {
966 existed->size = unmap_start - (char *)existed->start;
967 _aspace_bst_insert(existed->aspace, subset);
968 }
969
970 if (error != RT_EOK)
971 rt_free(subset);
972 }
973 else
974 error = -RT_ENOMEM;
975 }
976 else
977 error = -RT_EPERM;
978
979 return error;
980 }
981
982 /* remove overlapped pages from varea */
_remove_overlapped_varea(rt_varea_t existed,char * unmap_start,rt_size_t unmap_len)983 static int _remove_overlapped_varea(rt_varea_t existed, char *unmap_start, rt_size_t unmap_len)
984 {
985 int error;
986 char *ex_start = existed->start;
987 char *ex_end = ex_start + existed->size;
988 char *unmap_end = unmap_start + unmap_len;
989
990 if (ex_start < unmap_start)
991 {
992 if (ex_end > unmap_end)
993 error = _split_varea(existed, ex_end, unmap_start, unmap_end, unmap_len);
994 else
995 error = _shrink_varea(existed, ex_start, unmap_start - ex_start);
996 }
997 else if (ex_end > unmap_end)
998 error = _shrink_varea(existed, unmap_end, ex_end - unmap_end);
999 else
1000 {
1001 _varea_uninstall_locked(existed);
1002 if (VAREA_NOT_STATIC(existed))
1003 {
1004 rt_free(existed);
1005 }
1006 error = RT_EOK;
1007 }
1008
1009 return error;
1010 }
1011
_unmap_range_locked(rt_aspace_t aspace,void * addr,size_t length)1012 static int _unmap_range_locked(rt_aspace_t aspace, void *addr, size_t length)
1013 {
1014 int error = RT_EOK;
1015 rt_varea_t existed;
1016 struct _mm_range unmap_range;
1017
1018 unmap_range.start = addr;
1019 unmap_range.end = addr + length - 1;
1020
1021 existed = _aspace_bst_search_overlap(aspace, unmap_range);
1022 while (existed)
1023 {
1024 error = _remove_overlapped_varea(existed, addr, length);
1025
1026 if (error == RT_EOK)
1027 existed = _aspace_bst_search_overlap(aspace, unmap_range);
1028 else
1029 break;
1030 }
1031
1032 return error;
1033 }
1034
rt_aspace_unmap_range(rt_aspace_t aspace,void * addr,size_t length)1035 int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length)
1036 {
1037 int error;
1038
1039 if (!aspace)
1040 {
1041 LOG_I("%s: Invalid input", __func__);
1042 error = -RT_EINVAL;
1043 }
1044 else if (_not_in_range(MMF_MAP_FIXED, addr, length, aspace->start, aspace->size))
1045 {
1046 LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
1047 aspace->start, (char *)aspace->start + aspace->size);
1048 error = -RT_EINVAL;
1049 }
1050 else if (!ALIGNED(addr))
1051 {
1052 LOG_I("%s(addr=%p): Unaligned address", __func__, addr);
1053 error = -RT_EINVAL;
1054 }
1055 else
1056 {
1057 /**
1058 * Brief: re-arrange the address space to remove existing pages mapping
1059 * in [unmap_start, unmap_start + unmap_len)
1060 */
1061 length = RT_ALIGN(length, ARCH_PAGE_SIZE);
1062
1063 WR_LOCK(aspace);
1064 error = _unmap_range_locked(aspace, addr, length);
1065 WR_UNLOCK(aspace);
1066 }
1067
1068 return error;
1069 }
1070
rt_aspace_mremap_range(rt_aspace_t aspace,void * old_address,size_t old_size,size_t new_size,int flags,void * new_address)1071 void *rt_aspace_mremap_range(rt_aspace_t aspace, void *old_address, size_t old_size,
1072 size_t new_size, int flags, void *new_address)
1073 {
1074 void *ret = RT_NULL;
1075
1076 if (!aspace)
1077 {
1078 LOG_I("%s: Invalid input", __func__);
1079 }
1080 else if (_not_in_range(MMF_MAP_FIXED, old_address, old_size,
1081 aspace->start, aspace->size))
1082 {
1083 LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, old_address,
1084 aspace->start, (char *)aspace->start + aspace->size);
1085 }
1086 else if (!ALIGNED(old_address))
1087 {
1088 LOG_I("%s(old_address=%p): Unaligned address", __func__, old_address);
1089 }
1090 else
1091 {
1092 /**
1093 * Brief: re-arrange the address space to remove existing pages mapping
1094 * in [unmap_start, unmap_start + unmap_len)
1095 */
1096 old_size = RT_ALIGN(old_size, ARCH_PAGE_SIZE);
1097
1098 WR_LOCK(aspace);
1099 {
1100 rt_varea_t existed;
1101 struct _mm_range unmap_range;
1102
1103 unmap_range.start = old_address;
1104 unmap_range.end = old_address + old_size - 1;
1105
1106 existed = _aspace_bst_search_overlap(aspace, unmap_range);
1107 if (existed && existed->mem_obj && existed->mem_obj->on_varea_mremap)
1108 {
1109 ret = existed->mem_obj->on_varea_mremap(existed, new_size, flags, new_address);
1110 }
1111 }
1112 WR_UNLOCK(aspace);
1113
1114 if (ret)
1115 {
1116 int error = rt_aspace_unmap_range(aspace, old_address, old_size);
1117 if (error != RT_EOK)
1118 {
1119 LOG_I("%s: unmap old failed, addr %p size %p", __func__, old_address, old_size);
1120 }
1121 }
1122 }
1123
1124 return ret;
1125 }
1126
_lower(void * a,void * b)1127 static inline void *_lower(void *a, void *b)
1128 {
1129 return a < b ? a : b;
1130 }
1131
_align(void * va,rt_ubase_t align_mask)1132 static inline void *_align(void *va, rt_ubase_t align_mask)
1133 {
1134 return (void *)((rt_ubase_t)((char *)va + ~align_mask) & align_mask);
1135 }
1136
_ascending_search(rt_varea_t varea,rt_size_t req_size,rt_ubase_t align_mask,struct _mm_range limit)1137 static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
1138 rt_ubase_t align_mask, struct _mm_range limit)
1139 {
1140 void *ret = RT_NULL;
1141 while (varea && varea->start < limit.end)
1142 {
1143 char *candidate = (char *)varea->start + varea->size;
1144 candidate = _align(candidate, align_mask);
1145
1146 if (candidate > (char *)limit.end || (char *)limit.end - candidate + 1 < req_size)
1147 break;
1148
1149 rt_varea_t nx_va = ASPACE_VAREA_NEXT(varea);
1150 if (nx_va)
1151 {
1152 if (candidate < (char *)nx_va->start)
1153 {
1154 rt_size_t gap_size =
1155 (char *)_lower(limit.end, (char *)nx_va->start - 1) -
1156 candidate + 1;
1157
1158 if (gap_size >= req_size)
1159 {
1160 ret = candidate;
1161 break;
1162 }
1163 }
1164 }
1165 else
1166 {
1167 ret = candidate;
1168 }
1169 varea = nx_va;
1170 }
1171 return ret;
1172 }
1173
1174 /** find suitable place in [limit_start, limit_end] */
_find_head_and_asc_search(rt_aspace_t aspace,rt_size_t req_size,rt_ubase_t align_mask,struct _mm_range limit)1175 static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
1176 rt_ubase_t align_mask,
1177 struct _mm_range limit)
1178 {
1179 void *va = RT_NULL;
1180 char *candidate = _align(limit.start, align_mask);
1181
1182 rt_varea_t varea = _aspace_bst_search_exceed(aspace, candidate);
1183 if (varea)
1184 {
1185 rt_size_t gap_size = (char *)varea->start - candidate;
1186 if (gap_size >= req_size)
1187 {
1188 /* try previous memory region of varea if possible */
1189 rt_varea_t former = ASPACE_VAREA_PREV(varea);
1190 if (former)
1191 {
1192 candidate = _align((char *)former->start + former->size, align_mask);
1193 gap_size = (char *)varea->start - candidate;
1194
1195 if (gap_size >= req_size)
1196 va = candidate;
1197 else
1198 va = _ascending_search(varea, req_size, align_mask, limit);
1199 }
1200 else
1201 {
1202 va = candidate;
1203 }
1204 }
1205 else
1206 {
1207 va = _ascending_search(varea, req_size, align_mask, limit);
1208 }
1209 }
1210 else
1211 {
1212 rt_size_t gap_size = (char *)limit.end - candidate + 1;
1213
1214 if (gap_size >= req_size)
1215 va = candidate;
1216 }
1217
1218 return va;
1219 }
1220
1221 /**
1222 * Find a memory region that:
1223 * - is free
1224 * - sits inside the limit range
1225 * - meets the alignment requirement
1226 */
_find_free(rt_aspace_t aspace,void * prefer,rt_size_t req_size,void * limit_start,rt_size_t limit_size,mm_flag_t flags)1227 static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
1228 void *limit_start, rt_size_t limit_size,
1229 mm_flag_t flags)
1230 {
1231 rt_varea_t varea = NULL;
1232 void *va = RT_NULL;
1233 struct _mm_range limit = {limit_start, (char *)limit_start + limit_size - 1};
1234
1235 rt_ubase_t align_mask = ~0ul;
1236 if (flags & MMF_REQUEST_ALIGN)
1237 {
1238 align_mask = ~((1 << MMF_GET_ALIGN(flags)) - 1);
1239 }
1240
1241 if (flags & MMF_MAP_FIXED)
1242 {
1243 struct _mm_range range = {prefer, (char *)prefer + req_size - 1};
1244
1245 /* caller should guarantee that the request region is legal */
1246 RT_ASSERT(!_not_in_range(flags, prefer, req_size, limit_start, limit_size));
1247
1248 varea = _aspace_bst_search_overlap(aspace, range);
1249 if (!varea)
1250 {
1251 va = prefer;
1252 }
1253 else
1254 {
1255 /* region not freed */
1256 }
1257 }
1258 else if (prefer != RT_NULL)
1259 {
1260 struct _mm_range range;
1261
1262 /* ceiling the prefer address */
1263 prefer = _align(prefer, align_mask);
1264 if (_not_in_range(flags, prefer, req_size, limit_start, limit_size))
1265 {
1266 prefer = limit_start;
1267 }
1268
1269 range.start = prefer;
1270 range.end = (char *)prefer + req_size - 1;
1271 varea = _aspace_bst_search_overlap(aspace, range);
1272
1273 if (!varea)
1274 {
1275 /* if preferred and free, just return the prefer region */
1276 va = prefer;
1277 }
1278 else
1279 {
1280 /* search from `varea` in ascending order */
1281 va = _ascending_search(varea, req_size, align_mask, limit);
1282 if (va == RT_NULL)
1283 {
1284 /* rewind to first range */
1285 limit.end = (char *)varea->start - 1;
1286 va = _find_head_and_asc_search(aspace, req_size, align_mask,
1287 limit);
1288 }
1289 }
1290 }
1291 else
1292 {
1293 va = _find_head_and_asc_search(aspace, req_size, align_mask, limit);
1294 }
1295
1296 return va;
1297 }
1298
rt_aspace_load_page(rt_aspace_t aspace,void * addr,rt_size_t npage)1299 int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
1300 {
1301 int err = RT_EOK;
1302 rt_varea_t varea;
1303 char *end = (char *)addr + (npage << ARCH_PAGE_SHIFT);
1304
1305 WR_LOCK(aspace);
1306 varea = _aspace_bst_search(aspace, addr);
1307 WR_UNLOCK(aspace);
1308
1309 if (!varea)
1310 {
1311 LOG_W("%s: varea not exist(addr=%p)", __func__, addr);
1312 err = -RT_ENOENT;
1313 }
1314 else if ((char *)addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
1315 _not_in_range(MMF_MAP_FIXED, addr, npage << ARCH_PAGE_SHIFT,
1316 varea->start, varea->size))
1317 {
1318 LOG_W("%s: Unaligned parameter or out of range", __func__);
1319 err = -RT_EINVAL;
1320 }
1321 else
1322 {
1323 err = _do_prefetch(aspace, varea, addr, npage << ARCH_PAGE_SHIFT);
1324 }
1325 return err;
1326 }
1327
rt_varea_map_page(rt_varea_t varea,void * vaddr,void * page)1328 int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page)
1329 {
1330 int err = RT_EOK;
1331 void *page_pa = rt_kmem_v2p(page);
1332
1333 if (!varea || !vaddr || !page)
1334 {
1335 LOG_W("%s(%p,%p,%p): invalid input", __func__, varea, vaddr, page);
1336 err = -RT_EINVAL;
1337 }
1338 else if (page_pa == ARCH_MAP_FAILED)
1339 {
1340 LOG_W("%s: page is not in kernel space", __func__);
1341 err = -RT_ERROR;
1342 }
1343 else if (_not_in_range(MMF_MAP_FIXED, vaddr, ARCH_PAGE_SIZE,
1344 varea->start, varea->size))
1345 {
1346 LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
1347 vaddr, ARCH_PAGE_SIZE, varea->start, varea->size);
1348 err = -RT_EINVAL;
1349 }
1350 else
1351 {
1352 err = _do_named_map(
1353 varea->aspace,
1354 varea,
1355 vaddr,
1356 ARCH_PAGE_SIZE,
1357 MM_PA_TO_OFF(page_pa),
1358 varea->attr
1359 );
1360 }
1361
1362 return err;
1363 }
1364
rt_varea_unmap_page(rt_varea_t varea,void * vaddr)1365 int rt_varea_unmap_page(rt_varea_t varea, void *vaddr)
1366 {
1367 void *va_aligned = (void *)RT_ALIGN_DOWN((rt_base_t)vaddr, ARCH_PAGE_SIZE);
1368 return rt_varea_unmap_range(varea, va_aligned, ARCH_PAGE_SIZE);
1369 }
1370
1371 /**
1372 * @note Caller should take care of synchronization of its varea among all the map/unmap operation
1373 */
rt_varea_map_range(rt_varea_t varea,void * vaddr,void * paddr,rt_size_t length)1374 int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length)
1375 {
1376 int err;
1377 if (!varea || !vaddr || !paddr || !length ||
1378 !ALIGNED(vaddr) || !ALIGNED(paddr) || !(ALIGNED(length)))
1379 {
1380 LOG_W("%s(%p,%p,%p,%lx): invalid input", __func__, varea, vaddr, paddr, length);
1381 err = -RT_EINVAL;
1382 }
1383 else if (_not_in_range(MMF_MAP_FIXED, vaddr, length,
1384 varea->start, varea->size))
1385 {
1386 LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
1387 vaddr, length, varea->start, varea->size);
1388 err = -RT_EINVAL;
1389 }
1390 else
1391 {
1392 err = _do_named_map(
1393 varea->aspace,
1394 varea,
1395 vaddr,
1396 length,
1397 MM_PA_TO_OFF(paddr),
1398 varea->attr
1399 );
1400 }
1401 return err;
1402 }
1403
1404 /**
1405 * @note Caller should take care of synchronization of its varea among all the map/unmap operation
1406 */
rt_varea_unmap_range(rt_varea_t varea,void * vaddr,rt_size_t length)1407 int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length)
1408 {
1409 int err;
1410 rt_base_t va_align;
1411
1412 if (!varea || !vaddr || !length)
1413 {
1414 LOG_W("%s(%p,%p,%lx): invalid input", __func__, varea, vaddr, length);
1415 err = -RT_EINVAL;
1416 }
1417 else if (_not_in_range(MMF_MAP_FIXED, vaddr, length,
1418 varea->start, varea->size))
1419 {
1420 LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
1421 vaddr, length, varea->start, varea->size);
1422 err = -RT_EINVAL;
1423 }
1424 else
1425 {
1426 va_align = RT_ALIGN_DOWN((rt_base_t)vaddr, ARCH_PAGE_SIZE);
1427 rt_hw_mmu_unmap(varea->aspace, (void *)va_align, length);
1428 rt_hw_tlb_invalidate_range(varea->aspace, (void *)va_align, length, ARCH_PAGE_SIZE);
1429 err = RT_EOK;
1430 }
1431 return err;
1432 }
1433
rt_aspace_offload_page(rt_aspace_t aspace,void * addr,rt_size_t npage)1434 int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
1435 {
1436 return -RT_ENOSYS;
1437 }
1438
rt_aspace_control(rt_aspace_t aspace,void * addr,enum rt_mmu_cntl cmd)1439 int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
1440 {
1441 int err;
1442 rt_varea_t varea;
1443
1444 WR_LOCK(aspace);
1445 varea = _aspace_bst_search(aspace, addr);
1446 WR_UNLOCK(aspace);
1447
1448 if (varea)
1449 {
1450 err = rt_hw_mmu_control(aspace, varea->start, varea->size, cmd);
1451 if (err == RT_EOK)
1452 {
1453 rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
1454 }
1455 }
1456 else
1457 {
1458 err = -RT_ENOENT;
1459 }
1460
1461 return err;
1462 }
1463
rt_aspace_traversal(rt_aspace_t aspace,int (* fn)(rt_varea_t varea,void * arg),void * arg)1464 int rt_aspace_traversal(rt_aspace_t aspace,
1465 int (*fn)(rt_varea_t varea, void *arg), void *arg)
1466 {
1467 rt_varea_t varea;
1468 rt_varea_t next;
1469 WR_LOCK(aspace);
1470 varea = ASPACE_VAREA_FIRST(aspace);
1471 while (varea)
1472 {
1473 next = ASPACE_VAREA_NEXT(varea);
1474 fn(varea, arg);
1475 varea = next;
1476 }
1477 WR_UNLOCK(aspace);
1478
1479 return 0;
1480 }
1481
_dump(rt_varea_t varea,void * arg)1482 static int _dump(rt_varea_t varea, void *arg)
1483 {
1484 if (varea->mem_obj && varea->mem_obj->get_name)
1485 {
1486 rt_kprintf("[%p - %p] %s\n", varea->start, (char *)varea->start + varea->size,
1487 varea->mem_obj->get_name(varea));
1488 }
1489 else
1490 {
1491 rt_kprintf("[%p - %p] phy-map\n", varea->start, (char *)varea->start + varea->size);
1492 rt_kprintf("\t\\_ paddr = %p\n", varea->offset << MM_PAGE_SHIFT);
1493 }
1494 return 0;
1495 }
1496
rt_aspace_print_all(rt_aspace_t aspace)1497 void rt_aspace_print_all(rt_aspace_t aspace)
1498 {
1499 rt_aspace_traversal(aspace, _dump, NULL);
1500 }
1501
_count_vsz(rt_varea_t varea,void * arg)1502 static int _count_vsz(rt_varea_t varea, void *arg)
1503 {
1504 rt_base_t *pvsz = arg;
1505 RT_ASSERT(varea);
1506 *pvsz = *pvsz + varea->size;
1507 return 0;
1508 }
1509
rt_aspace_count_vsz(rt_aspace_t aspace)1510 rt_base_t rt_aspace_count_vsz(rt_aspace_t aspace)
1511 {
1512 rt_base_t vsz = 0;
1513 rt_aspace_traversal(aspace, _count_vsz, &vsz);
1514 return vsz;
1515 }
1516
_dup_varea(rt_varea_t src_varea,void * arg)1517 static int _dup_varea(rt_varea_t src_varea, void *arg)
1518 {
1519 int err;
1520 rt_aspace_t dst = arg;
1521 rt_aspace_t src = src_varea->aspace;
1522
1523 void *pa = RT_NULL;
1524 void *va = RT_NULL;
1525 rt_mem_obj_t mem_obj = src_varea->mem_obj;
1526
1527 if (!mem_obj)
1528 {
1529 /* duplicate a physical mapping */
1530 pa = rt_hw_mmu_v2p(src, (void *)src_varea->start);
1531 RT_ASSERT(pa != ARCH_MAP_FAILED);
1532 struct rt_mm_va_hint hint = {.flags = src_varea->flag,
1533 .limit_range_size = dst->size,
1534 .limit_start = dst->start,
1535 .prefer = src_varea->start,
1536 .map_size = src_varea->size};
1537 err = rt_aspace_map_phy(dst, &hint, src_varea->attr,
1538 MM_PA_TO_OFF(pa), &va);
1539 if (err != RT_EOK)
1540 {
1541 LOG_W("%s: aspace map failed at %p with size %p", __func__,
1542 src_varea->start, src_varea->size);
1543 }
1544 }
1545 else
1546 {
1547 /* duplicate a mem_obj backing mapping */
1548 rt_base_t flags = src_varea->flag | MMF_MAP_FIXED;
1549 flags &= ~MMF_STATIC_ALLOC;
1550 flags &= ~MMF_PREFETCH;
1551 va = src_varea->start;
1552
1553 err = rt_aspace_map(dst, &va, src_varea->size, src_varea->attr,
1554 flags, mem_obj, src_varea->offset);
1555 if (err != RT_EOK)
1556 {
1557 LOG_W("%s: aspace map failed at %p with size %p", __func__,
1558 src_varea->start, src_varea->size);
1559 }
1560 }
1561
1562 if (va != (void *)src_varea->start)
1563 {
1564 return -1;
1565 }
1566 return 0;
1567 }
1568
1569 struct _compare_param {
1570 rt_aspace_t dst;
1571 int rc;
1572 };
1573
rt_aspace_duplicate_locked(rt_aspace_t src,rt_aspace_t dst)1574 rt_err_t rt_aspace_duplicate_locked(rt_aspace_t src, rt_aspace_t dst)
1575 {
1576 return rt_aspace_traversal(src, _dup_varea, dst);
1577 }
1578
_varea_same(rt_varea_t a,rt_varea_t b)1579 rt_inline int _varea_same(rt_varea_t a, rt_varea_t b)
1580 {
1581 return a->attr == b->attr && a->flag == b->flag && a->mem_obj == b->mem_obj;
1582 }
1583
_dump_varea(rt_varea_t varea)1584 rt_inline void _dump_varea(rt_varea_t varea)
1585 {
1586 LOG_W("%s(attr=0x%lx, flags=0x%lx, start=0x%lx, size=0x%lx, mem_obj=%p)", VAREA_NAME(varea), varea->attr, varea->flag, varea->start, varea->size, varea->mem_obj);
1587 }
1588
_compare_varea(rt_varea_t src_varea,void * arg)1589 static int _compare_varea(rt_varea_t src_varea, void *arg)
1590 {
1591 struct _compare_param *param = arg;
1592 rt_varea_t dst_varea;
1593 rt_aspace_t dst = param->dst;
1594 rt_aspace_t src = src_varea->aspace;
1595
1596 dst_varea = _aspace_bst_search(dst, src_varea->start);
1597 if (dst_varea)
1598 {
1599 char *buf1 = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
1600 char *buf2 = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
1601 char *vend = src_varea->start + src_varea->size;
1602 for (char *i = src_varea->start; i < vend; i += ARCH_PAGE_SIZE)
1603 {
1604 int rc;
1605 rt_aspace_page_get(src, i, buf1);
1606 rt_aspace_page_get(dst, i, buf2);
1607 rc = memcmp(buf1, buf2, ARCH_PAGE_SIZE);
1608 if (rc)
1609 {
1610 if (param->rc == 0)
1611 param->rc = rc;
1612 LOG_E("%s(a_varea=%s, b_varea=%s)", __func__, VAREA_NAME(src_varea), VAREA_NAME(dst_varea));
1613 _dump_varea(src_varea);
1614 _dump_varea(dst_varea);
1615 RT_ASSERT(0);
1616 }
1617 }
1618
1619 rt_pages_free(buf1, 0);
1620 rt_pages_free(buf2, 0);
1621 }
1622 else
1623 {
1624 param->rc = -RT_ENOENT;
1625 }
1626
1627 return 0;
1628 }
1629
rt_aspace_compare(rt_aspace_t src,rt_aspace_t dst)1630 rt_err_t rt_aspace_compare(rt_aspace_t src, rt_aspace_t dst)
1631 {
1632 struct _compare_param param = {.rc = 0, .dst = dst};
1633 rt_aspace_traversal(src, _compare_varea, ¶m);
1634 return param.rc;
1635 }
1636
1637 /* dst are page aligned */
_page_put(rt_varea_t varea,void * page_va,void * buffer)1638 rt_inline rt_err_t _page_put(rt_varea_t varea, void *page_va, void *buffer)
1639 {
1640 struct rt_aspace_io_msg iomsg;
1641 rt_err_t rc;
1642
1643 rt_mm_io_msg_init(&iomsg, VAREA_VA_TO_OFFSET(varea, page_va), page_va, buffer);
1644 varea->mem_obj->page_write(varea, &iomsg);
1645
1646 if (iomsg.response.status == MM_FAULT_STATUS_UNRECOVERABLE)
1647 rc = -RT_ERROR;
1648 else
1649 rc = RT_EOK;
1650 return rc;
1651 }
1652
1653 /* dst are page aligned */
_page_get(rt_varea_t varea,void * page_va,void * buffer)1654 rt_inline rt_err_t _page_get(rt_varea_t varea, void *page_va, void *buffer)
1655 {
1656 struct rt_aspace_io_msg iomsg;
1657 rt_err_t rc;
1658
1659 rt_mm_io_msg_init(&iomsg, VAREA_VA_TO_OFFSET(varea, page_va), page_va, buffer);
1660 varea->mem_obj->page_read(varea, &iomsg);
1661
1662 if (iomsg.response.status == MM_FAULT_STATUS_UNRECOVERABLE)
1663 rc = -RT_ERROR;
1664 else
1665 rc = RT_EOK;
1666 return rc;
1667 }
1668
1669 #ifdef RT_USING_SMART
1670 #include "lwp.h"
_current_uspace(void)1671 rt_inline rt_aspace_t _current_uspace(void)
1672 {
1673 rt_lwp_t this_proc = lwp_self();
1674 return this_proc ? this_proc->aspace : RT_NULL;
1675 }
1676 #else
_current_uspace(void)1677 rt_inline rt_aspace_t _current_uspace(void)
1678 {
1679 return RT_NULL;
1680 }
1681 #endif
1682
rt_aspace_page_get_phy(rt_aspace_t aspace,void * page_va,void * buffer)1683 rt_err_t rt_aspace_page_get_phy(rt_aspace_t aspace, void *page_va, void *buffer)
1684 {
1685 rt_err_t rc = -RT_ERROR;
1686
1687 char *frame_ka = rt_hw_mmu_v2p(aspace, page_va);
1688 if (frame_ka != ARCH_MAP_FAILED)
1689 {
1690 frame_ka = rt_kmem_p2v(frame_ka);
1691 if (frame_ka)
1692 {
1693 rt_memcpy(buffer, frame_ka, ARCH_PAGE_SIZE);
1694 rc = RT_EOK;
1695 }
1696 else if (aspace == _current_uspace() || aspace == &rt_kernel_space)
1697 {
1698 /* direct IO */
1699 rt_memcpy(buffer, page_va, ARCH_PAGE_SIZE);
1700 rc = RT_EOK;
1701 }
1702 else
1703 {
1704 /* user memory region remap ? */
1705 LOG_W("%s(aspace=0x%lx,va=%p): Operation not support",
1706 __func__, aspace, page_va);
1707 rc = -RT_ENOSYS;
1708 }
1709 }
1710 else
1711 {
1712 LOG_W("%s(aspace=0x%lx,va=%p): PTE not existed",
1713 __func__, aspace, page_va);
1714 rc = -RT_ENOENT;
1715 }
1716 return rc;
1717 }
1718
rt_aspace_page_put_phy(rt_aspace_t aspace,void * page_va,void * buffer)1719 rt_err_t rt_aspace_page_put_phy(rt_aspace_t aspace, void *page_va, void *buffer)
1720 {
1721 rt_err_t rc = -RT_ERROR;
1722
1723 char *frame_ka = rt_hw_mmu_v2p(aspace, page_va);
1724 if (frame_ka != ARCH_MAP_FAILED)
1725 {
1726 frame_ka = rt_kmem_p2v(frame_ka);
1727 if (frame_ka)
1728 {
1729 rt_memcpy(frame_ka, buffer, ARCH_PAGE_SIZE);
1730 rc = RT_EOK;
1731 }
1732 else if (aspace == _current_uspace() || aspace == &rt_kernel_space)
1733 {
1734 /* direct IO */
1735 rt_memcpy(page_va, buffer, ARCH_PAGE_SIZE);
1736 rc = RT_EOK;
1737 }
1738 else
1739 {
1740 /* user memory region remap ? */
1741 LOG_W("%s(aspace=0x%lx,va=%p): Operation not support",
1742 __func__, aspace, page_va);
1743 rc = -RT_ENOSYS;
1744 }
1745 }
1746 else
1747 {
1748 LOG_W("%s(aspace=0x%lx,va=%p): PTE not existed",
1749 __func__, aspace, page_va);
1750 rc = -RT_ENOENT;
1751 }
1752
1753 return rc;
1754 }
1755
rt_aspace_page_put(rt_aspace_t aspace,void * page_va,void * buffer)1756 rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer)
1757 {
1758 rt_err_t rc = -RT_ERROR;
1759 rt_varea_t varea;
1760
1761 RT_ASSERT(aspace);
1762 RD_LOCK(aspace);
1763 varea = _aspace_bst_search(aspace, page_va);
1764 if (varea && ALIGNED(page_va))
1765 {
1766 if (varea->mem_obj)
1767 {
1768 if (varea->mem_obj->page_write)
1769 {
1770 if (rt_varea_is_private_locked(varea))
1771 {
1772 RDWR_LOCK(aspace);
1773 struct rt_aspace_fault_msg msg;
1774 msg.fault_op = MM_FAULT_OP_WRITE;
1775 msg.fault_type = MM_FAULT_TYPE_GENERIC_MMU;
1776 msg.fault_vaddr = page_va;
1777 rc = rt_varea_fix_private_locked(varea, rt_hw_mmu_v2p(aspace, page_va),
1778 &msg, RT_TRUE);
1779 RDWR_UNLOCK(aspace);
1780 if (rc == MM_FAULT_FIXABLE_TRUE)
1781 {
1782 varea = _aspace_bst_search(aspace, page_va);
1783 rc = _page_put(varea, page_va, buffer);
1784 }
1785 else
1786 rc = -RT_ERROR;
1787 }
1788 else
1789 rc = _page_put(varea, page_va, buffer);
1790 }
1791 else
1792 {
1793 rc = -RT_EINVAL;
1794 LOG_I("%s: Operation not allowed", __func__);
1795 }
1796 }
1797 else
1798 {
1799 rc = rt_aspace_page_put_phy(aspace, page_va, buffer);
1800 }
1801 }
1802 else
1803 rc = -RT_EINVAL;
1804 RD_UNLOCK(aspace);
1805
1806 return rc;
1807 }
1808
rt_aspace_page_get(rt_aspace_t aspace,void * page_va,void * buffer)1809 rt_err_t rt_aspace_page_get(rt_aspace_t aspace, void *page_va, void *buffer)
1810 {
1811 rt_err_t rc = -RT_ERROR;
1812 rt_varea_t varea;
1813
1814 /* TODO: cache the last search item */
1815 RT_ASSERT(aspace);
1816 RD_LOCK(aspace);
1817 varea = _aspace_bst_search(aspace, page_va);
1818 if (varea && ALIGNED(page_va))
1819 {
1820 if (varea->mem_obj)
1821 {
1822 if (varea->mem_obj->page_read)
1823 {
1824 rc = _page_get(varea, page_va, buffer);
1825 }
1826 else
1827 {
1828 LOG_I("%s: Operation not allowed", __func__);
1829 }
1830 }
1831 else
1832 {
1833 rc = rt_aspace_page_get_phy(aspace, page_va, buffer);
1834 }
1835 }
1836 else
1837 {
1838 rc = -RT_EINVAL;
1839 LOG_D("%s(va=%p,varea=0x%lx): Invalid address",
1840 __func__, page_va, varea);
1841 }
1842 RD_UNLOCK(aspace);
1843
1844 return rc;
1845 }
1846
rt_aspace_query(rt_aspace_t aspace,void * vaddr)1847 rt_varea_t rt_aspace_query(rt_aspace_t aspace, void *vaddr)
1848 {
1849 return _aspace_bst_search(aspace, vaddr);
1850 }
1851