1 /*
2  * Copyright (c) 2006-2023, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2023-08-19     Shell        Support PRIVATE mapping and COW
9  */
10 
11 #define DBG_TAG "mm.anon"
12 #define DBG_LVL DBG_INFO
13 #include <rtdbg.h>
14 
15 #include <string.h>
16 #include "mm_private.h"
17 #include <mmu.h>
18 
19 /**
20  * Anonymous Object directly represent the mappings without backup files in the
21  * aspace. Their only backup is in the aspace->pgtbl.
22  */
23 
24 typedef struct rt_private_ctx {
25     struct rt_mem_obj mem_obj;
26     rt_aspace_t backup_aspace;
27     /* both varea and aspace can holds a reference */
28     rt_atomic_t reference;
29     /* readonly `private` is shared object */
30     long readonly;
31 } *rt_private_ctx_t;
32 
_anon_obj_get_backup(rt_mem_obj_t mobj)33 rt_inline rt_aspace_t _anon_obj_get_backup(rt_mem_obj_t mobj)
34 {
35     rt_private_ctx_t pctx;
36     rt_aspace_t backup;
37     pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
38     backup = pctx->backup_aspace;
39     return backup;
40 }
41 
_anon_obj_get_reference(rt_mem_obj_t mobj)42 rt_inline rt_atomic_t *_anon_obj_get_reference(rt_mem_obj_t mobj)
43 {
44     rt_private_ctx_t pctx;
45     pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
46     return &pctx->reference;
47 }
48 
_anon_mobj_to_pctx(rt_mem_obj_t mobj)49 rt_inline rt_private_ctx_t _anon_mobj_to_pctx(rt_mem_obj_t mobj)
50 {
51     return rt_container_of(mobj, struct rt_private_ctx, mem_obj);
52 }
53 
rt_aspace_anon_ref_inc(rt_mem_obj_t aobj)54 static long rt_aspace_anon_ref_inc(rt_mem_obj_t aobj)
55 {
56     long rc;
57     if (aobj)
58     {
59         rc = rt_atomic_add(_anon_obj_get_reference(aobj), 1);
60         LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, rc + 1);
61     }
62     else
63         rc = -1;
64     return rc;
65 }
66 
rt_aspace_anon_ref_dec(rt_mem_obj_t aobj)67 rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj)
68 {
69     rt_err_t rc;
70     rt_aspace_t aspace;
71     rt_private_ctx_t pctx;
72     long former_reference;
73 
74     if (aobj)
75     {
76         pctx = _anon_mobj_to_pctx(aobj);
77         RT_ASSERT(pctx);
78 
79         former_reference = rt_atomic_add(_anon_obj_get_reference(aobj), -1);
80         LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, former_reference - 1);
81         if (pctx->readonly)
82         {
83             if (former_reference - 1 <= pctx->readonly)
84             {
85                 void *pgtbl;
86                 RT_ASSERT(former_reference - 1 == pctx->readonly);
87                 aspace = _anon_obj_get_backup(aobj);
88 
89                 pctx->readonly = 0;
90                 pgtbl = aspace->page_table;
91                 rt_aspace_delete(aspace);
92                 rt_hw_mmu_pgtbl_delete(pgtbl);
93             }
94         }
95         else if (former_reference < 2)
96         {
97             aspace = _anon_obj_get_backup(aobj);
98             aspace->private_object = RT_NULL;
99 
100             rt_free(pctx);
101         }
102         rc = RT_EOK;
103     }
104     else
105     {
106         rc = -RT_EINVAL;
107     }
108 
109     return rc;
110 }
111 
rt_varea_pgmgr_insert(rt_varea_t varea,void * page_addr)112 void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
113 {
114     /* each mapping of page frame in the varea is binding with a reference */
115     rt_page_ref_inc(page_addr, 0);
116 }
117 
118 /**
119  * Private unmapping of address space
120  */
_pgmgr_pop_all(rt_varea_t varea)121 static void _pgmgr_pop_all(rt_varea_t varea)
122 {
123     rt_aspace_t aspace = varea->aspace;
124     char *iter = varea->start;
125     char *end_addr = iter + varea->size;
126 
127     RT_ASSERT(iter < end_addr);
128     RT_ASSERT(!((long)iter & ARCH_PAGE_MASK));
129     RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
130 
131     for (; iter != end_addr; iter += ARCH_PAGE_SIZE)
132     {
133         void *page_pa = rt_hw_mmu_v2p(aspace, iter);
134         char *page_va = rt_kmem_p2v(page_pa);
135         if (page_pa != ARCH_MAP_FAILED && page_va)
136         {
137             rt_varea_unmap_page(varea, iter);
138             rt_pages_free(page_va, 0);
139         }
140     }
141 }
142 
_pgmgr_pop_range(rt_varea_t varea,void * rm_start,void * rm_end)143 static void _pgmgr_pop_range(rt_varea_t varea, void *rm_start, void *rm_end)
144 {
145     void *page_va;
146 
147     RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
148     RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
149     while (rm_start != rm_end)
150     {
151         page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
152 
153         if (page_va != ARCH_MAP_FAILED)
154         {
155             page_va -= PV_OFFSET;
156             LOG_D("%s: free page %p", __func__, page_va);
157             rt_varea_unmap_page(varea, rm_start);
158             rt_pages_free(page_va, 0);
159         }
160         rm_start += ARCH_PAGE_SIZE;
161     }
162 }
163 
_anon_get_name(rt_varea_t varea)164 static const char *_anon_get_name(rt_varea_t varea)
165 {
166     return varea->aspace == _anon_obj_get_backup(varea->mem_obj) ? "anonymous" : "reference";
167 }
168 
169 /**
170  * Migration handler on varea re-construction
171  */
172 
_anon_varea_open(struct rt_varea * varea)173 static void _anon_varea_open(struct rt_varea *varea)
174 {
175     rt_aspace_anon_ref_inc(varea->mem_obj);
176 
177     if (varea->aspace == _anon_obj_get_backup(varea->mem_obj))
178         varea->offset = MM_PA_TO_OFF(varea->start);
179 
180     varea->data = NULL;
181 }
182 
_anon_varea_close(struct rt_varea * varea)183 static void _anon_varea_close(struct rt_varea *varea)
184 {
185     rt_aspace_anon_ref_dec(varea->mem_obj);
186 
187     /* unmap and dereference page frames in the varea region */
188     _pgmgr_pop_all(varea);
189 }
190 
_anon_varea_expand(struct rt_varea * varea,void * new_vaddr,rt_size_t size)191 static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
192 {
193     return RT_EOK;
194 }
195 
_anon_varea_shrink(rt_varea_t varea,void * new_start,rt_size_t size)196 static rt_err_t _anon_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
197 {
198     char *varea_start = varea->start;
199     void *rm_start;
200     void *rm_end;
201 
202     if (varea_start == (char *)new_start)
203     {
204         rm_start = varea_start + size;
205         rm_end = varea_start + varea->size;
206     }
207     else /* if (varea_start < (char *)new_start) */
208     {
209         RT_ASSERT(varea_start < (char *)new_start);
210         rm_start = varea_start;
211         rm_end = new_start;
212     }
213 
214     _pgmgr_pop_range(varea, rm_start, rm_end);
215     return RT_EOK;
216 }
217 
_anon_varea_split(struct rt_varea * existed,void * unmap_start,rt_size_t unmap_len,struct rt_varea * subset)218 static rt_err_t _anon_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
219 {
220     /* remove the resource in the unmap region, and do nothing for the subset */
221     _pgmgr_pop_range(existed, unmap_start, (char *)unmap_start + unmap_len);
222 
223     _anon_varea_open(subset);
224     return RT_EOK;
225 }
226 
_anon_varea_merge(struct rt_varea * merge_to,struct rt_varea * merge_from)227 static rt_err_t _anon_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
228 {
229     /* do nothing for the varea merge */
230     return RT_EOK;
231 }
232 
233 /**
234  * Private mapping of address space
235  */
236 
_map_page_in_varea(rt_aspace_t asapce,rt_varea_t varea,struct rt_aspace_fault_msg * msg,char * fault_addr)237 rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
238                                   struct rt_aspace_fault_msg *msg, char *fault_addr)
239 {
240     char *page_va = msg->response.vaddr;
241     if (rt_varea_map_page(varea, fault_addr, page_va) == RT_EOK)
242     {
243         msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
244         rt_varea_pgmgr_insert(varea, page_va);
245     }
246     else
247     {
248         msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
249         LOG_W("%s: failed to map page into varea", __func__);
250     }
251 }
252 
253 /* page frame inquiry or allocation in backup address space */
_get_page_from_backup(rt_aspace_t backup,rt_base_t offset_in_mobj)254 static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
255 {
256     void *frame_pa;
257     char *backup_addr;
258     rt_varea_t backup_varea;
259     void *rc = RT_NULL;
260 
261     backup_addr = (char *)(offset_in_mobj << MM_PAGE_SHIFT);
262     backup_varea = rt_aspace_query(backup, backup_addr);
263 
264     if (backup_varea)
265     {
266         /* synchronize between multiple request by aspace lock of backup */
267         WR_LOCK(backup);
268 
269         frame_pa = rt_hw_mmu_v2p(backup, backup_addr);
270         if (frame_pa == ARCH_MAP_FAILED)
271         {
272             /* provide the page in backup varea */
273             struct rt_aspace_fault_msg msg;
274             msg.fault_op = MM_FAULT_OP_WRITE;
275             msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
276             msg.fault_vaddr = backup_addr;
277             msg.off = offset_in_mobj;
278             rt_mm_fault_res_init(&msg.response);
279 
280             rt_mm_dummy_mapper.on_page_fault(backup_varea, &msg);
281             if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
282             {
283                 _map_page_in_varea(backup, backup_varea, &msg, backup_addr);
284                 if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
285                 {
286                     rc = msg.response.vaddr;
287                 }
288                 rt_pages_free(msg.response.vaddr, 0);
289             }
290         }
291         else
292         {
293             rc = rt_kmem_p2v(frame_pa);
294             if (!rc)
295                 RT_ASSERT(0 && "No kernel address of target page frame");
296         }
297         WR_UNLOCK(backup);
298     }
299     else
300     {
301         /* out of range error */
302         LOG_E("(backup_addr=%p): Page request out of range", backup_addr);
303     }
304 
305     return rc;
306 }
307 
308 /* get the backup page in kernel for the address in user space */
_fetch_page_for_varea(struct rt_varea * varea,struct rt_aspace_fault_msg * msg,rt_bool_t need_map)309 static void _fetch_page_for_varea(struct rt_varea *varea, struct rt_aspace_fault_msg *msg, rt_bool_t need_map)
310 {
311     void *paddr;
312     char *frame_ka;
313     rt_aspace_t curr_aspace = varea->aspace;
314     rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
315 
316     RDWR_LOCK(curr_aspace);
317 
318     /**
319      * if the page is already mapped(this may caused by data race while other
320      * thread success to take the lock and mapped the page before this), return okay
321      */
322     paddr = rt_hw_mmu_v2p(curr_aspace, msg->fault_vaddr);
323     if (paddr == ARCH_MAP_FAILED)
324     {
325         if (backup == curr_aspace)
326         {
327             rt_mm_dummy_mapper.on_page_fault(varea, msg);
328             if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
329             {
330                 /* if backup == curr_aspace, a page fetch always binding with a pte filling */
331                 _map_page_in_varea(backup, varea, msg, msg->fault_vaddr);
332                 if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
333                 {
334                     rt_pages_free(msg->response.vaddr, 0);
335                 }
336             }
337         }
338         else
339         {
340             frame_ka = _get_page_from_backup(backup, msg->off);
341             if (frame_ka)
342             {
343                 msg->response.vaddr = frame_ka;
344                 msg->response.size = ARCH_PAGE_SIZE;
345                 if (!need_map)
346                 {
347                     msg->response.status = MM_FAULT_STATUS_OK;
348                 }
349                 else
350                 {
351                     _map_page_in_varea(curr_aspace, varea, msg, msg->fault_vaddr);
352                 }
353             }
354         }
355     }
356     else
357     {
358         msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
359     }
360     RDWR_UNLOCK(curr_aspace);
361 }
362 
_anon_page_fault(struct rt_varea * varea,struct rt_aspace_fault_msg * msg)363 static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
364 {
365     _fetch_page_for_varea(varea, msg, RT_TRUE);
366 }
367 
read_by_mte(rt_aspace_t aspace,struct rt_aspace_io_msg * iomsg)368 static void read_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
369 {
370     if (rt_aspace_page_get_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
371         iomsg->response.status = MM_FAULT_STATUS_OK;
372 }
373 
_anon_page_read(struct rt_varea * varea,struct rt_aspace_io_msg * iomsg)374 static void _anon_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
375 {
376     rt_aspace_t curr_aspace = varea->aspace;
377     rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
378 
379     if (rt_hw_mmu_v2p(curr_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
380     {
381         struct rt_aspace_fault_msg msg;
382         msg.fault_op = MM_FAULT_OP_READ;
383         msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
384         msg.fault_vaddr = iomsg->fault_vaddr;
385         msg.off = iomsg->off;
386         rt_mm_fault_res_init(&msg.response);
387 
388         _fetch_page_for_varea(varea, &msg, RT_FALSE);
389         if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
390         {
391             void *saved_fault_va = iomsg->fault_vaddr;
392             iomsg->fault_vaddr = (void *)(iomsg->off << MM_PAGE_SHIFT);
393             read_by_mte(backup, iomsg);
394             iomsg->fault_vaddr = saved_fault_va;
395         }
396     }
397     else
398     {
399         read_by_mte(curr_aspace, iomsg);
400     }
401 }
402 
write_by_mte(rt_aspace_t aspace,struct rt_aspace_io_msg * iomsg)403 static void write_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
404 {
405     if (rt_aspace_page_put_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
406         iomsg->response.status = MM_FAULT_STATUS_OK;
407 }
408 
_anon_page_write(struct rt_varea * varea,struct rt_aspace_io_msg * iomsg)409 static void _anon_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
410 {
411     rt_aspace_t from_aspace = varea->aspace;
412     rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
413 
414     if (from_aspace != backup)
415     {
416         /* varea in guest aspace cannot modify the page */
417         iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
418     }
419     else if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
420     {
421         struct rt_aspace_fault_msg msg;
422         msg.fault_op = MM_FAULT_OP_WRITE;
423         msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
424         msg.fault_vaddr = iomsg->fault_vaddr;
425         msg.off = iomsg->off;
426         rt_mm_fault_res_init(&msg.response);
427 
428         _fetch_page_for_varea(varea, &msg, RT_TRUE);
429         if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
430         {
431             write_by_mte(backup, iomsg);
432         }
433         else
434         {
435             /* mapping failed, report an error */
436             iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
437         }
438     }
439     else
440     {
441         write_by_mte(backup, iomsg);
442     }
443 }
444 
445 static struct rt_private_ctx _priv_obj = {
446     .mem_obj.get_name = _anon_get_name,
447     .mem_obj.on_page_fault = _anon_page_fault,
448     .mem_obj.hint_free = NULL,
449     .mem_obj.on_varea_open = _anon_varea_open,
450     .mem_obj.on_varea_close = _anon_varea_close,
451     .mem_obj.on_varea_shrink = _anon_varea_shrink,
452     .mem_obj.on_varea_split = _anon_varea_split,
453     .mem_obj.on_varea_expand = _anon_varea_expand,
454     .mem_obj.on_varea_merge = _anon_varea_merge,
455     .mem_obj.page_read = _anon_page_read,
456     .mem_obj.page_write = _anon_page_write,
457 };
458 
rt_private_obj_create_n_bind(rt_aspace_t aspace)459 rt_inline rt_private_ctx_t rt_private_obj_create_n_bind(rt_aspace_t aspace)
460 {
461     rt_private_ctx_t private_object;
462     private_object = rt_malloc(sizeof(struct rt_private_ctx));
463     if (private_object)
464     {
465         memcpy(&private_object->mem_obj, &_priv_obj, sizeof(_priv_obj));
466 
467         /* hold a init ref from backup aspace */
468         rt_atomic_store(&private_object->reference, 1);
469 
470         private_object->readonly = RT_FALSE;
471         private_object->backup_aspace = aspace;
472         aspace->private_object = &private_object->mem_obj;
473     }
474 
475     return private_object;
476 }
477 
_get_private_obj(rt_aspace_t aspace)478 rt_inline rt_mem_obj_t _get_private_obj(rt_aspace_t aspace)
479 {
480     rt_private_ctx_t priv;
481     rt_mem_obj_t rc;
482     rc = aspace->private_object;
483     if (!aspace->private_object)
484     {
485         priv = rt_private_obj_create_n_bind(aspace);
486         if (priv)
487         {
488             rc = &priv->mem_obj;
489             aspace->private_object = rc;
490         }
491     }
492     return rc;
493 }
494 
_override_map(rt_varea_t varea,rt_aspace_t aspace,void * fault_vaddr,struct rt_aspace_fault_msg * msg,void * page)495 static int _override_map(rt_varea_t varea, rt_aspace_t aspace, void *fault_vaddr, struct rt_aspace_fault_msg *msg, void *page)
496 {
497     int rc = MM_FAULT_FIXABLE_FALSE;
498     rt_mem_obj_t private_object;
499     rt_varea_t map_varea = RT_NULL;
500     rt_err_t error;
501     rt_size_t flags;
502     rt_size_t attr;
503 
504     LOG_D("%s", __func__);
505 
506     private_object = _get_private_obj(aspace);
507 
508     if (private_object)
509     {
510         flags = varea->flag | MMF_MAP_FIXED;
511         /* don't prefetch and do it latter */
512         flags &= ~MMF_PREFETCH;
513         attr = rt_hw_mmu_attr_add_perm(varea->attr, RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE);
514 
515         /* override existing mapping at fault_vaddr */
516         error = _mm_aspace_map(
517             aspace, &map_varea, &fault_vaddr, ARCH_PAGE_SIZE, attr,
518             flags, private_object, MM_PA_TO_OFF(fault_vaddr));
519 
520         if (error == RT_EOK)
521         {
522             msg->response.status = MM_FAULT_STATUS_OK;
523             msg->response.vaddr = page;
524             msg->response.size = ARCH_PAGE_SIZE;
525             if (rt_varea_map_with_msg(map_varea, msg) != RT_EOK)
526             {
527                 LOG_E("%s: fault_va=%p,(priv_va=%p,priv_sz=0x%lx) at %s", __func__, msg->fault_vaddr, map_varea->start, map_varea->size, VAREA_NAME(map_varea));
528                 RT_ASSERT(0 && "should never failed");
529             }
530             RT_ASSERT(rt_hw_mmu_v2p(aspace, msg->fault_vaddr) == (page + PV_OFFSET));
531             rc = MM_FAULT_FIXABLE_TRUE;
532             rt_varea_pgmgr_insert(map_varea, page);
533             rt_pages_free(page, 0);
534         }
535         else
536         {
537             /* private object will be release on destruction of aspace */
538             rt_free(map_varea);
539         }
540     }
541     else
542     {
543         LOG_I("%s: out of memory", __func__);
544         rc = MM_FAULT_FIXABLE_FALSE;
545     }
546 
547     return rc;
548 }
549 
550 /**
551  * replace an existing mapping to a private one, this is identical to:
552  * => aspace_unmap(ex_varea, )
553  * => aspace_map()
554  */
rt_varea_fix_private_locked(rt_varea_t ex_varea,void * pa,struct rt_aspace_fault_msg * msg,rt_bool_t dont_copy)555 int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
556                                 struct rt_aspace_fault_msg *msg,
557                                 rt_bool_t dont_copy)
558 {
559     /**
560      * todo: READ -> WRITE lock here
561      */
562     void *page;
563     void *fault_vaddr;
564     rt_aspace_t aspace;
565     rt_mem_obj_t ex_obj;
566     int rc = MM_FAULT_FIXABLE_FALSE;
567     ex_obj = ex_varea->mem_obj;
568 
569     if (ex_obj)
570     {
571         fault_vaddr = msg->fault_vaddr;
572         aspace = ex_varea->aspace;
573         RT_ASSERT(!!aspace);
574 
575         /**
576          * todo: what if multiple pages are required?
577          */
578         if (aspace->private_object == ex_obj)
579         {
580             RT_ASSERT(0 && "recursion");
581         }
582         else if (ex_obj->page_read)
583         {
584             page = rt_pages_alloc_tagged(0, RT_PAGE_PICK_AFFID(fault_vaddr), PAGE_ANY_AVAILABLE);
585             if (page)
586             {
587                 /** setup message & fetch the data from source object */
588                 if (!dont_copy)
589                 {
590                     struct rt_aspace_io_msg io_msg;
591                     rt_mm_io_msg_init(&io_msg, msg->off, msg->fault_vaddr, page);
592                     ex_obj->page_read(ex_varea, &io_msg);
593                     /**
594                      * Note: if ex_obj have mapped into varea, it's still okay since
595                      * we will override it latter
596                      */
597                     if (io_msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
598                     {
599                         rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
600                     }
601                     else
602                     {
603                         rt_pages_free(page, 0);
604                         LOG_I("%s: page read(va=%p) fault from %s(start=%p,size=%p)", __func__,
605                             msg->fault_vaddr, VAREA_NAME(ex_varea), ex_varea->start, ex_varea->size);
606                     }
607                 }
608                 else
609                 {
610                     rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
611                 }
612             }
613             else
614             {
615                 LOG_I("%s: pages allocation failed", __func__);
616             }
617         }
618         else
619         {
620             LOG_I("%s: no page read method provided from %s", __func__, VAREA_NAME(ex_varea));
621         }
622     }
623     else
624     {
625         LOG_I("%s: unavailable memory object", __func__);
626     }
627 
628     return rc;
629 }
630 
rt_aspace_map_private(rt_aspace_t aspace,void ** addr,rt_size_t length,rt_size_t attr,mm_flag_t flags)631 int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
632                           rt_size_t attr, mm_flag_t flags)
633 {
634     int rc;
635     rt_mem_obj_t priv_obj;
636 
637     if (flags & MMF_STATIC_ALLOC)
638     {
639         rc = -RT_EINVAL;
640     }
641     else
642     {
643         priv_obj = _get_private_obj(aspace);
644         if (priv_obj)
645         {
646             flags |= MMF_MAP_PRIVATE;
647             flags &= ~MMF_PREFETCH;
648             rc = rt_aspace_map(aspace, addr, length, attr, flags, priv_obj, 0);
649         }
650         else
651         {
652             rc = -RT_ENOMEM;
653         }
654     }
655     return rc;
656 }
657 
_release_shared(rt_varea_t varea,void * arg)658 static int _release_shared(rt_varea_t varea, void *arg)
659 {
660     rt_aspace_t src = varea->aspace;
661     rt_mem_obj_t mem_obj = varea->mem_obj;
662 
663     if (mem_obj != _get_private_obj(src))
664     {
665         _varea_uninstall_locked(varea);
666         if (VAREA_NOT_STATIC(varea))
667         {
668             rt_free(varea);
669         }
670     }
671 
672     return 0;
673 }
674 
_convert_readonly(rt_aspace_t aspace,long base_reference)675 static rt_err_t _convert_readonly(rt_aspace_t aspace, long base_reference)
676 {
677     rt_mem_obj_t aobj;
678     rt_private_ctx_t pctx;
679     aobj = _get_private_obj(aspace);
680     pctx = _anon_mobj_to_pctx(aobj);
681 
682     LOG_D("Ref(cur=%d,base=%d)", pctx->reference, base_reference);
683     rt_aspace_traversal(aspace, _release_shared, 0);
684     pctx->readonly = base_reference;
685     return 0;
686 }
687 
_switch_aspace(rt_aspace_t * pa,rt_aspace_t * pb)688 rt_inline void _switch_aspace(rt_aspace_t *pa, rt_aspace_t *pb)
689 {
690     rt_aspace_t temp;
691     temp = *pa;
692     *pa = *pb;
693     *pb = temp;
694 }
695 
rt_aspace_fork(rt_aspace_t * psrc,rt_aspace_t * pdst)696 rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst)
697 {
698     rt_err_t rc;
699     void *pgtbl;
700     rt_aspace_t backup;
701     rt_aspace_t src = *psrc;
702     rt_aspace_t dst = *pdst;
703     long base_reference;
704 
705     pgtbl = rt_hw_mmu_pgtbl_create();
706     if (pgtbl)
707     {
708         backup = rt_aspace_create(src->start, src->size, pgtbl);
709         if (backup)
710         {
711             WR_LOCK(src);
712             base_reference = rt_atomic_load(_anon_obj_get_reference(src->private_object));
713             rc = rt_aspace_duplicate_locked(src, dst);
714             WR_UNLOCK(src);
715 
716             if (!rc)
717             {
718                 /* WR_LOCK(dst) is not necessary since dst is not available currently */
719                 rc = rt_aspace_duplicate_locked(dst, backup);
720                 if (!rc)
721                 {
722                     _switch_aspace(psrc, &backup);
723                     _convert_readonly(backup, base_reference);
724                 }
725             }
726         }
727         else
728         {
729             rc = -RT_ENOMEM;
730         }
731     }
732     else
733     {
734         rc = -RT_ENOMEM;
735     }
736 
737     return rc;
738 }
739