1 /*
2  * Copyright (c) 2006-2021, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2019-10-28     Jesven       first version
9  * 2021-02-06     lizhirui     fixed fixed vtable size problem
10  * 2021-02-12     lizhirui     add 64-bit support for lwp_brk
11  * 2021-02-19     lizhirui     add riscv64 support for lwp_user_accessable and lwp_get_from_user
12  * 2021-06-07     lizhirui     modify user space bound check
13  * 2022-12-25     wangxiaoyao  adapt to new mm
14  * 2023-08-12     Shell        Fix parameter passing of lwp_mmap()/lwp_munmap()
15  * 2023-08-29     Shell        Add API accessible()/data_get()/data_set()/data_put()
16  * 2023-09-13     Shell        Add lwp_memcpy and support run-time choice of memcpy base on memory attr
17  * 2023-09-19     Shell        add lwp_user_memory_remap_to_kernel
18  */
19 
20 #include <rtthread.h>
21 #include <rthw.h>
22 #include <string.h>
23 
24 #ifdef ARCH_MM_MMU
25 
26 #include "lwp_internal.h"
27 
28 #include <mm_aspace.h>
29 #include <mm_fault.h>
30 #include <mm_flag.h>
31 #include <mm_page.h>
32 #include <mmu.h>
33 #include <page.h>
34 
35 #ifdef RT_USING_MUSLLIBC
36 #include "libc_musl.h"
37 #endif
38 
39 #define DBG_TAG "LwP.mman"
40 #define DBG_LVL DBG_INFO
41 #include <rtdbg.h>
42 
43 #include <stdlib.h>
44 
45 #define STACK_OBJ _null_object
46 
_null_get_name(rt_varea_t varea)47 static const char *_null_get_name(rt_varea_t varea)
48 {
49     return "null";
50 }
51 
_null_page_fault(struct rt_varea * varea,struct rt_aspace_fault_msg * msg)52 static void _null_page_fault(struct rt_varea *varea,
53                              struct rt_aspace_fault_msg *msg)
54 {
55     static void *null_page;
56 
57     if (!null_page)
58     {
59         null_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
60         if (null_page)
61             memset(null_page, 0, ARCH_PAGE_SIZE);
62         else
63             return;
64     }
65 
66     msg->response.status = MM_FAULT_STATUS_OK;
67     msg->response.size = ARCH_PAGE_SIZE;
68     msg->response.vaddr = null_page;
69 }
70 
_null_shrink(rt_varea_t varea,void * new_start,rt_size_t size)71 static rt_err_t _null_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
72 {
73     char *varea_start = varea->start;
74     void *rm_start;
75     void *rm_end;
76 
77     if (varea_start == (char *)new_start)
78     {
79         rm_start = varea_start + size;
80         rm_end = varea_start + varea->size;
81     }
82     else /* if (varea_start < (char *)new_start) */
83     {
84         RT_ASSERT(varea_start < (char *)new_start);
85         rm_start = varea_start;
86         rm_end = new_start;
87     }
88 
89     rt_varea_unmap_range(varea, rm_start, rm_end - rm_start);
90     return RT_EOK;
91 }
92 
_null_split(struct rt_varea * existed,void * unmap_start,rt_size_t unmap_len,struct rt_varea * subset)93 static rt_err_t _null_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
94 {
95     rt_varea_unmap_range(existed, unmap_start, unmap_len);
96     return RT_EOK;
97 }
98 
_null_expand(struct rt_varea * varea,void * new_vaddr,rt_size_t size)99 static rt_err_t _null_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
100 {
101     return RT_EOK;
102 }
103 
_null_page_read(struct rt_varea * varea,struct rt_aspace_io_msg * msg)104 static void _null_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
105 {
106     void *dest = msg->buffer_vaddr;
107     memset(dest, 0, ARCH_PAGE_SIZE);
108 
109     msg->response.status = MM_FAULT_STATUS_OK;
110     return ;
111 }
112 
_null_page_write(struct rt_varea * varea,struct rt_aspace_io_msg * msg)113 static void _null_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
114 {
115     /* write operation is not allowed */
116     msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
117     return ;
118 }
119 
120 static struct rt_mem_obj _null_object = {
121     .get_name = _null_get_name,
122     .hint_free = RT_NULL,
123     .on_page_fault = _null_page_fault,
124 
125     .page_read = _null_page_read,
126     .page_write = _null_page_write,
127 
128     .on_varea_expand = _null_expand,
129     .on_varea_shrink = _null_shrink,
130     .on_varea_split = _null_split,
131 };
132 
lwp_user_space_init(struct rt_lwp * lwp,rt_bool_t is_fork)133 int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
134 {
135     void *stk_addr;
136     int err = -RT_ENOMEM;
137     const size_t flags = MMF_MAP_PRIVATE;
138 
139     err = arch_user_space_init(lwp);
140     if (err == RT_EOK)
141     {
142         if (!is_fork)
143         {
144             stk_addr = (void *)USER_STACK_VSTART;
145             err = rt_aspace_map(lwp->aspace, &stk_addr,
146                                 USER_STACK_VEND - USER_STACK_VSTART,
147                                 MMU_MAP_U_RWCB, flags, &STACK_OBJ, 0);
148         }
149     }
150 
151     return err;
152 }
153 
lwp_aspace_switch(struct rt_thread * thread)154 void lwp_aspace_switch(struct rt_thread *thread)
155 {
156     struct rt_lwp *lwp = RT_NULL;
157     rt_aspace_t to_aspace;
158     void *from_tbl, *to_table;
159 
160     if (thread->lwp)
161     {
162         lwp = (struct rt_lwp *)thread->lwp;
163         to_aspace = lwp->aspace;
164         to_table = to_aspace->page_table;
165     }
166     else
167     {
168         to_aspace = &rt_kernel_space;
169         /* the page table is arch dependent but not aspace->page_table */
170         to_table = arch_kernel_mmu_table_get();
171     }
172 
173     /* must fetch the effected page table to avoid hot update */
174     from_tbl = rt_hw_mmu_tbl_get();
175     if (to_table != from_tbl)
176     {
177         rt_hw_aspace_switch(to_aspace);
178     }
179 }
180 
lwp_unmap_user_space(struct rt_lwp * lwp)181 void lwp_unmap_user_space(struct rt_lwp *lwp)
182 {
183     if (lwp->aspace)
184         arch_user_space_free(lwp);
185 }
186 
_lwp_map_user(struct rt_lwp * lwp,void * map_va,size_t map_size,int text)187 static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
188                            int text)
189 {
190     void *va = map_va;
191     int ret = 0;
192     rt_size_t flags = MMF_PREFETCH;
193 
194     if (text)
195         flags |= MMF_TEXT;
196     if (va != RT_NULL)
197         flags |= MMF_MAP_FIXED;
198 
199     ret = rt_aspace_map_private(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags);
200     if (ret != RT_EOK)
201     {
202         va = RT_NULL;
203         LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
204               map_size, ret);
205     }
206 
207     return va;
208 }
209 
lwp_unmap_user(struct rt_lwp * lwp,void * va)210 int lwp_unmap_user(struct rt_lwp *lwp, void *va)
211 {
212     int err = rt_aspace_unmap(lwp->aspace, va);
213 
214     return err;
215 }
216 
217 /** fork the src_lwp->aspace in current */
lwp_fork_aspace(struct rt_lwp * dest_lwp,struct rt_lwp * src_lwp)218 int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
219 {
220     int err;
221     err = rt_aspace_fork(&src_lwp->aspace, &dest_lwp->aspace);
222     if (!err)
223     {
224         /* do a explicit aspace switch if the page table is changed */
225         lwp_aspace_switch(rt_thread_self());
226     }
227     return err;
228 }
229 
lwp_unmap_user_phy(struct rt_lwp * lwp,void * va)230 int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
231 {
232     return lwp_unmap_user(lwp, va);
233 }
234 
lwp_map_user(struct rt_lwp * lwp,void * map_va,size_t map_size,int text)235 void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
236 {
237     void *ret = RT_NULL;
238     size_t offset = 0;
239 
240     if (!map_size)
241     {
242         return 0;
243     }
244     offset = (size_t)map_va & ARCH_PAGE_MASK;
245     map_size += (offset + ARCH_PAGE_SIZE - 1);
246     map_size &= ~ARCH_PAGE_MASK;
247     map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
248 
249     ret = _lwp_map_user(lwp, map_va, map_size, text);
250 
251     if (ret)
252     {
253         ret = (void *)((char *)ret + offset);
254     }
255     return ret;
256 }
257 
_flags_to_attr(size_t flags)258 static inline size_t _flags_to_attr(size_t flags)
259 {
260     size_t attr;
261 
262     if (flags & LWP_MAP_FLAG_NOCACHE)
263     {
264         attr = MMU_MAP_U_RW;
265     }
266     else
267     {
268         attr = MMU_MAP_U_RWCB;
269     }
270 
271     return attr;
272 }
273 
_flags_to_aspace_flag(size_t flags)274 static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
275 {
276     mm_flag_t mm_flag = 0;
277     if (flags & LWP_MAP_FLAG_MAP_FIXED)
278         mm_flag |= MMF_MAP_FIXED;
279     if (flags & LWP_MAP_FLAG_PREFETCH)
280         mm_flag |= MMF_PREFETCH;
281 
282     return mm_flag;
283 }
284 
_lwp_map_user_varea(struct rt_lwp * lwp,void * map_va,size_t map_size,size_t flags)285 static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
286 {
287     void *va = map_va;
288     int ret = 0;
289     rt_varea_t varea = RT_NULL;
290     mm_flag_t mm_flags;
291     size_t attr;
292 
293     attr = _flags_to_attr(flags);
294     mm_flags = _flags_to_aspace_flag(flags);
295     ret = rt_aspace_map_private(lwp->aspace, &va, map_size,
296                                 attr, mm_flags);
297     if (ret == RT_EOK)
298     {
299         varea = rt_aspace_query(lwp->aspace, va);
300     }
301 
302     if (ret != RT_EOK)
303     {
304         LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
305               map_size, ret);
306     }
307 
308     return varea;
309 }
310 
_map_user_varea_ext(struct rt_lwp * lwp,void * map_va,size_t map_size,size_t flags)311 static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
312 {
313     size_t offset = 0;
314 
315     if (!map_size)
316     {
317         return 0;
318     }
319     offset = (size_t)map_va & ARCH_PAGE_MASK;
320     map_size += (offset + ARCH_PAGE_SIZE - 1);
321     map_size &= ~ARCH_PAGE_MASK;
322     map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
323 
324     return _lwp_map_user_varea(lwp, map_va, map_size, flags);
325 }
326 
lwp_map_user_varea_ext(struct rt_lwp * lwp,void * map_va,size_t map_size,size_t flags)327 rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
328 {
329     return _map_user_varea_ext(lwp, map_va, map_size, flags);
330 }
331 
lwp_map_user_varea(struct rt_lwp * lwp,void * map_va,size_t map_size)332 rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
333 {
334     return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
335 }
336 
lwp_map_user_phy(struct rt_lwp * lwp,void * map_va,void * map_pa,size_t map_size,int cached)337 void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
338                        size_t map_size, int cached)
339 {
340     int err;
341     char *va;
342     size_t offset = 0;
343 
344     if (!map_size)
345     {
346         return 0;
347     }
348     if (map_va)
349     {
350         if (((size_t)map_va & ARCH_PAGE_MASK) !=
351             ((size_t)map_pa & ARCH_PAGE_MASK))
352         {
353             return 0;
354         }
355     }
356     offset = (size_t)map_pa & ARCH_PAGE_MASK;
357     map_size += (offset + ARCH_PAGE_SIZE - 1);
358     map_size &= ~ARCH_PAGE_MASK;
359     map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
360 
361     struct rt_mm_va_hint hint = {.flags = 0,
362                                  .limit_range_size = lwp->aspace->size,
363                                  .limit_start = lwp->aspace->start,
364                                  .prefer = map_va,
365                                  .map_size = map_size};
366     if (map_va != RT_NULL)
367         hint.flags |= MMF_MAP_FIXED;
368 
369     rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
370 
371     err =
372         rt_aspace_map_phy(lwp->aspace, &hint, attr, MM_PA_TO_OFF(map_pa), (void **)&va);
373     if (err != RT_EOK)
374     {
375         va = RT_NULL;
376         LOG_W("%s", __func__);
377     }
378     else
379     {
380         va += offset;
381     }
382 
383     return va;
384 }
385 
lwp_brk(void * addr)386 rt_base_t lwp_brk(void *addr)
387 {
388     rt_base_t ret = -1;
389     rt_varea_t varea = RT_NULL;
390     struct rt_lwp *lwp = RT_NULL;
391     size_t size = 0;
392 
393     lwp = lwp_self();
394 
395     if ((size_t)addr == RT_NULL)
396     {
397         addr = (char *)lwp->end_heap + 1;
398     }
399 
400     if ((size_t)addr <= lwp->end_heap && (size_t)addr > USER_HEAP_VADDR)
401     {
402         ret = (size_t)addr;
403     }
404     else if ((size_t)addr <= USER_HEAP_VEND)
405     {
406         size = RT_ALIGN((size_t)addr - lwp->end_heap, ARCH_PAGE_SIZE);
407         varea = lwp_map_user_varea_ext(lwp, (void *)lwp->end_heap, size, LWP_MAP_FLAG_PREFETCH);
408         if (varea)
409         {
410             lwp->end_heap = (long)(varea->start + varea->size);
411             ret = lwp->end_heap;
412         }
413     }
414 
415     return ret;
416 }
417 
_get_mmap_obj(struct rt_lwp * lwp)418 rt_inline rt_mem_obj_t _get_mmap_obj(struct rt_lwp *lwp)
419 {
420     return &_null_object;
421 }
422 
_memory_threshold_ok(void)423 rt_inline rt_bool_t _memory_threshold_ok(void)
424 {
425     #define GUARDIAN_BITS (10)
426     size_t total, free;
427 
428     rt_page_get_info(&total, &free);
429     if (free * (0x1000) < 0x100000)
430     {
431         LOG_I("%s: low of system memory", __func__);
432         return RT_FALSE;
433     }
434 
435     return RT_TRUE;
436 }
437 
_uflag_to_kernel(long flag)438 rt_inline long _uflag_to_kernel(long flag)
439 {
440     flag &= ~MMF_MAP_FIXED;
441     flag &= ~MMF_MAP_PRIVATE;
442     flag &= ~MMF_MAP_PRIVATE_DONT_SYNC;
443     return flag;
444 }
445 
_uattr_to_kernel(long attr)446 rt_inline long _uattr_to_kernel(long attr)
447 {
448     /* Warning: be careful with the case if user attribution is unwritable */
449     return attr;
450 }
451 
_prefetch_mmap(rt_aspace_t aspace,void * addr,long size)452 static void _prefetch_mmap(rt_aspace_t aspace, void *addr, long size)
453 {
454     struct rt_aspace_fault_msg msg;
455 
456     msg.fault_op = MM_FAULT_OP_WRITE;
457     msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
458 
459     for (char *base = addr; size > 0; base += ARCH_PAGE_SIZE, size -= ARCH_PAGE_SIZE)
460     {
461         msg.fault_vaddr = base;
462         msg.off = (long)base >> MM_PAGE_SHIFT;
463         rt_aspace_fault_try_fix(aspace, &msg);
464     }
465     return ;
466 }
467 
lwp_user_memory_remap_to_kernel(rt_lwp_t lwp,void * uaddr,size_t length)468 void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length)
469 {
470     long kattr;
471     long kflag;
472     long offset_in_mobj;
473     long offset_in_page;
474     rt_err_t error;
475     rt_varea_t uarea;
476     rt_mem_obj_t mobj;
477     void *kaddr = 0;
478 
479     uarea = rt_aspace_query(lwp->aspace, uaddr);
480     if (uarea)
481     {
482         /* setup the identical mapping, and align up for address & length */
483         kattr = _uattr_to_kernel(uarea->attr);
484         kflag = _uflag_to_kernel(uarea->flag);
485         offset_in_mobj = uarea->offset + ((long)uaddr - (long)uarea->start) / ARCH_PAGE_SIZE;
486         mobj = uarea->mem_obj;
487         offset_in_page = (long)uaddr & ARCH_PAGE_MASK;
488         length = RT_ALIGN(length + offset_in_page, ARCH_PAGE_SIZE);
489         error = rt_aspace_map(&rt_kernel_space, &kaddr, length, kattr, kflag, mobj, offset_in_mobj);
490         if (error)
491         {
492             LOG_I("%s(length=0x%lx,attr=0x%lx,flags=0x%lx): do map failed", __func__, length, kattr, kflag);
493             kaddr = 0;
494         }
495         else
496         {
497             /* TODO: {make a memory lock?} */
498             LOG_D("%s(length=0x%lx,attr=0x%lx,flags=0x%lx,offset=0x%lx) => %p", __func__, length, kattr, kflag, offset_in_mobj, kaddr);
499             _prefetch_mmap(&rt_kernel_space, kaddr, length);
500             kaddr += offset_in_page;
501         }
502     }
503 
504     return kaddr;
505 }
506 #include <dfs_dentry.h>
507 #define _AFFBLK_PGOFFSET (RT_PAGE_AFFINITY_BLOCK_SIZE >> MM_PAGE_SHIFT)
508 
_aligned_for_weak_mapping(off_t * ppgoff,rt_size_t * plen,rt_size_t * palign)509 static rt_base_t _aligned_for_weak_mapping(off_t *ppgoff, rt_size_t *plen, rt_size_t *palign)
510 {
511     off_t aligned_pgoffset, pgoffset = *ppgoff;
512     rt_size_t length = *plen;
513     rt_size_t min_align_size = *palign;
514     rt_base_t aligned_size = 0;
515 
516     if (pgoffset >= 0)
517     {
518         /* force an alignment */
519         aligned_pgoffset =
520             RT_ALIGN_DOWN(pgoffset, RT_PAGE_AFFINITY_BLOCK_SIZE >> MM_PAGE_SHIFT);
521         aligned_size = (pgoffset - aligned_pgoffset) << MM_PAGE_SHIFT;
522 
523         if (aligned_pgoffset != pgoffset)
524         {
525             /**
526              * If requested pgoffset is not sitting on an aligned page offset,
527              * expand the request mapping to force an alignment.
528              */
529             length += aligned_size;
530             pgoffset = aligned_pgoffset;
531         }
532 
533         /**
534          * As this is a weak mapping, we can pick any reasonable address for our
535          * requirement.
536          */
537         min_align_size = RT_PAGE_AFFINITY_BLOCK_SIZE;
538     }
539     else
540     {
541         RT_ASSERT(0 && "Unexpected input");
542     }
543 
544     *ppgoff = pgoffset;
545     *plen = length;
546     *palign = min_align_size;
547 
548     return aligned_size;
549 }
550 
lwp_mmap2(struct rt_lwp * lwp,void * addr,size_t length,int prot,int flags,int fd,off_t pgoffset)551 void *lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot,
552                 int flags, int fd, off_t pgoffset)
553 {
554     rt_err_t rc;
555     rt_size_t k_attr, k_flags, k_offset, aligned_size = 0;
556     rt_size_t min_align_size = 1 << MM_PAGE_SHIFT;
557     rt_aspace_t uspace;
558     rt_mem_obj_t mem_obj;
559     void *ret = 0;
560     LOG_D("%s(addr=0x%lx,length=0x%lx,fd=%d,pgoff=0x%lx)", __func__, addr, length, fd, pgoffset);
561 
562     /* alignment for affinity page block */
563     if (flags & MAP_FIXED)
564     {
565         if (fd != -1)
566         {
567             /* requested mapping address */
568             rt_base_t va_affid = RT_PAGE_PICK_AFFID(addr);
569             rt_base_t pgoff_affid = RT_PAGE_PICK_AFFID(pgoffset << MM_PAGE_SHIFT);
570 
571             /* filter illegal align address */
572             if (va_affid != pgoff_affid)
573             {
574                 LOG_W("Unaligned mapping address %p(pgoff=0x%lx) from fd=%d",
575                     addr, pgoffset, fd);
576             }
577         }
578         else
579         {
580             /* anonymous mapping can always aligned */
581         }
582     }
583     else
584     {
585         /* weak address selection */
586         aligned_size = _aligned_for_weak_mapping(&pgoffset, &length, &min_align_size);
587     }
588 
589     if (fd == -1)
590     {
591     #ifdef RT_DEBUGGING_PAGE_THRESHOLD
592         if (!_memory_threshold_ok())
593             return (void *)-ENOMEM;
594     #endif /* RT_DEBUGGING_PAGE_THRESHOLD */
595 
596         k_offset = MM_PA_TO_OFF(addr);
597         k_flags = MMF_CREATE(lwp_user_mm_flag_to_kernel(flags) | MMF_MAP_PRIVATE,
598                              min_align_size);
599         k_attr = lwp_user_mm_attr_to_kernel(prot);
600 
601         uspace = lwp->aspace;
602         length = RT_ALIGN(length, ARCH_PAGE_SIZE);
603         mem_obj = _get_mmap_obj(lwp);
604 
605         rc = rt_aspace_map(uspace, &addr, length, k_attr, k_flags, mem_obj, k_offset);
606         if (rc == RT_EOK)
607         {
608             ret = addr;
609         }
610         else
611         {
612             ret = (void *)lwp_errno_to_posix(rc);
613         }
614     }
615     else
616     {
617         struct dfs_file *d;
618 
619         d = fd_get(fd);
620         if (d)
621         {
622             struct dfs_mmap2_args mmap2;
623 
624             mmap2.addr = addr;
625             mmap2.length = length;
626             mmap2.min_align_size = min_align_size;
627             mmap2.prot = prot;
628             mmap2.flags = flags;
629             mmap2.pgoffset = pgoffset;
630             mmap2.ret = (void *)-1;
631             mmap2.lwp = lwp;
632 
633             rc = dfs_file_mmap2(d, &mmap2);
634             if (rc == RT_EOK)
635             {
636                 ret = mmap2.ret;
637             }
638             else
639             {
640                 ret = (void *)lwp_errno_to_posix(rc);
641             }
642         }
643     }
644 
645     if ((long)ret <= 0)
646     {
647         LOG_D("%s() => %ld", __func__, ret);
648     }
649     else
650     {
651         ret = (char *)ret + aligned_size;
652         LOG_D("%s() => 0x%lx", __func__, ret);
653     }
654 
655     return ret;
656 }
657 
lwp_munmap(struct rt_lwp * lwp,void * addr,size_t length)658 int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length)
659 {
660     int ret;
661 
662     RT_ASSERT(lwp);
663     ret = rt_aspace_unmap_range(lwp->aspace, addr, length);
664     return lwp_errno_to_posix(ret);
665 }
666 
lwp_mremap(struct rt_lwp * lwp,void * old_address,size_t old_size,size_t new_size,int flags,void * new_address)667 void *lwp_mremap(struct rt_lwp *lwp, void *old_address, size_t old_size,
668                     size_t new_size, int flags, void *new_address)
669 {
670     RT_ASSERT(lwp);
671 
672     return rt_aspace_mremap_range(lwp->aspace, old_address, old_size, new_size, flags, new_address);
673 }
674 
lwp_get_from_user(void * dst,void * src,size_t size)675 size_t lwp_get_from_user(void *dst, void *src, size_t size)
676 {
677     struct rt_lwp *lwp = RT_NULL;
678 
679     /* check src */
680 
681     if (src < (void *)USER_VADDR_START)
682     {
683         return 0;
684     }
685     if (src >= (void *)USER_VADDR_TOP)
686     {
687         return 0;
688     }
689     if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
690     {
691         return 0;
692     }
693 
694     lwp = lwp_self();
695     if (!lwp)
696     {
697         return 0;
698     }
699 
700     return lwp_data_get(lwp, dst, src, size);
701 }
702 
lwp_put_to_user(void * dst,void * src,size_t size)703 size_t lwp_put_to_user(void *dst, void *src, size_t size)
704 {
705     struct rt_lwp *lwp = RT_NULL;
706 
707     /* check dst */
708     if (dst < (void *)USER_VADDR_START)
709     {
710         return 0;
711     }
712     if (dst >= (void *)USER_VADDR_TOP)
713     {
714         return 0;
715     }
716     if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
717     {
718         return 0;
719     }
720 
721     lwp = lwp_self();
722     if (!lwp)
723     {
724         return 0;
725     }
726 
727     return lwp_data_put(lwp, dst, src, size);
728 }
729 
_can_unaligned_access(const char * addr)730 rt_inline rt_bool_t _can_unaligned_access(const char *addr)
731 {
732     return rt_kmem_v2p((char *)addr) - PV_OFFSET == addr;
733 }
734 
lwp_memcpy(void * __restrict dst,const void * __restrict src,size_t size)735 void *lwp_memcpy(void * __restrict dst, const void * __restrict src, size_t size)
736 {
737     void *rc = dst;
738     long len;
739 
740     if (lwp_in_user_space(dst))
741     {
742         if (!lwp_in_user_space(src))
743         {
744             len = lwp_put_to_user(dst, (void *)src, size);
745             if (!len)
746             {
747                 LOG_E("lwp_put_to_user(lwp=%p, dst=%p,src=%p,size=0x%lx) failed", lwp_self(), dst, src, size);
748             }
749         }
750         else
751         {
752             /* not support yet */
753             LOG_W("%s(dst=%p,src=%p,size=0x%lx): operation not support", dst, src, size, __func__);
754         }
755     }
756     else
757     {
758         if (lwp_in_user_space(src))
759         {
760             len = lwp_get_from_user(dst, (void *)src, size);
761             if (!len)
762             {
763                 LOG_E("lwp_get_from_user(lwp=%p, dst=%p,src=%p,size=0x%lx) failed", lwp_self(), dst, src, size);
764             }
765         }
766         else
767         {
768             if (_can_unaligned_access(dst) && _can_unaligned_access(src))
769             {
770                 rc = memcpy(dst, src, size);
771             }
772             else
773             {
774                 rt_memcpy(dst, src, size);
775             }
776         }
777     }
778 
779     return rc;
780 }
781 
lwp_user_accessible_ext(struct rt_lwp * lwp,void * addr,size_t size)782 int lwp_user_accessible_ext(struct rt_lwp *lwp, void *addr, size_t size)
783 {
784     void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
785     void *tmp_addr = RT_NULL;
786 
787     if (!lwp)
788     {
789         return RT_FALSE;
790     }
791     if (!size || !addr)
792     {
793         return RT_FALSE;
794     }
795     addr_start = addr;
796     addr_end = (void *)((char *)addr + size);
797 
798 #ifdef ARCH_RISCV64
799     if (addr_start < (void *)USER_VADDR_START)
800     {
801         return RT_FALSE;
802     }
803 #else
804     if (addr_start >= (void *)USER_VADDR_TOP)
805     {
806         return RT_FALSE;
807     }
808     if (addr_end > (void *)USER_VADDR_TOP)
809     {
810         return RT_FALSE;
811     }
812 #endif
813 
814     next_page =
815         (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
816     do
817     {
818         size_t len = (char *)next_page - (char *)addr_start;
819 
820         if (size < len)
821         {
822             len = size;
823         }
824         tmp_addr = lwp_v2p(lwp, addr_start);
825         if (tmp_addr == ARCH_MAP_FAILED &&
826             !rt_aspace_query(lwp->aspace, addr_start))
827         {
828             return RT_FALSE;
829         }
830         addr_start = (void *)((char *)addr_start + len);
831         size -= len;
832         next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
833     } while (addr_start < addr_end);
834     return RT_TRUE;
835 }
836 
lwp_user_accessable(void * addr,size_t size)837 int lwp_user_accessable(void *addr, size_t size)
838 {
839     return lwp_user_accessible_ext(lwp_self(), addr, size);
840 }
841 
842 #define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
843 
844 /* src is in lwp address space, dst is in current thread space */
lwp_data_get(struct rt_lwp * lwp,void * dst,void * src,size_t size)845 size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
846 {
847     size_t copy_len = 0;
848     char *temp_page = 0;
849     char *dst_iter, *dst_next_page;
850     char *src_copy_end, *src_iter, *src_iter_aligned;
851 
852     if (!size || !dst)
853     {
854         return 0;
855     }
856     dst_iter = dst;
857     src_iter = src;
858     src_copy_end = src + size;
859     dst_next_page =
860         (char *)(((size_t)src_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
861     do
862     {
863         size_t bytes_to_copy = (char *)dst_next_page - (char *)src_iter;
864         if (bytes_to_copy > size)
865         {
866             bytes_to_copy = size;
867         }
868 
869         if (ALIGNED(src_iter) && bytes_to_copy == ARCH_PAGE_SIZE)
870         {
871             /* get page to kernel buffer */
872             if (rt_aspace_page_get(lwp->aspace, src_iter, dst_iter))
873                 break;
874         }
875         else
876         {
877             if (!temp_page)
878                 temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
879             if (!temp_page)
880                 break;
881 
882             src_iter_aligned = (char *)((long)src_iter & ~ARCH_PAGE_MASK);
883             if (rt_aspace_page_get(lwp->aspace, src_iter_aligned, temp_page))
884                 break;
885             memcpy(dst_iter, temp_page + (src_iter - src_iter_aligned), bytes_to_copy);
886         }
887 
888         dst_iter = dst_iter + bytes_to_copy;
889         src_iter = src_iter + bytes_to_copy;
890         size -= bytes_to_copy;
891         dst_next_page = (void *)((char *)dst_next_page + ARCH_PAGE_SIZE);
892         copy_len += bytes_to_copy;
893     } while (src_iter < src_copy_end);
894 
895     if (temp_page)
896         rt_pages_free(temp_page, 0);
897     return copy_len;
898 }
899 
900 /* dst is in lwp address space, src is in current thread space */
lwp_data_put(struct rt_lwp * lwp,void * dst,void * src,size_t size)901 size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
902 {
903     size_t copy_len = 0;
904     char *temp_page = 0;
905     char *dst_iter, *dst_iter_aligned, *dst_next_page;
906     char *src_put_end, *src_iter;
907 
908     if (!size || !dst)
909     {
910         return 0;
911     }
912 
913     src_iter = src;
914     dst_iter = dst;
915     src_put_end = dst + size;
916     dst_next_page =
917         (char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
918     do
919     {
920         size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
921         if (bytes_to_put > size)
922         {
923             bytes_to_put = size;
924         }
925 
926         if (ALIGNED(dst_iter) && bytes_to_put == ARCH_PAGE_SIZE)
927         {
928             /* write to page in kernel */
929             if (rt_aspace_page_put(lwp->aspace, dst_iter, src_iter))
930                 break;
931         }
932         else
933         {
934             if (!temp_page)
935                 temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
936             if (!temp_page)
937                 break;
938 
939             dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
940             if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
941                 break;
942             memcpy(temp_page + (dst_iter - dst_iter_aligned), src_iter, bytes_to_put);
943             if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
944                 break;
945         }
946 
947         src_iter = src_iter + bytes_to_put;
948         dst_iter = dst_iter + bytes_to_put;
949         size -= bytes_to_put;
950         dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
951         copy_len += bytes_to_put;
952     } while (dst_iter < src_put_end);
953 
954     if (temp_page)
955         rt_pages_free(temp_page, 0);
956     return copy_len;
957 }
958 
959 /* Set N bytes of S to C */
lwp_data_set(struct rt_lwp * lwp,void * dst,int byte,size_t size)960 size_t lwp_data_set(struct rt_lwp *lwp, void *dst, int byte, size_t size)
961 {
962     size_t copy_len = 0;
963     char *temp_page = 0;
964     char *dst_iter, *dst_iter_aligned, *dst_next_page;
965     char *dst_put_end;
966 
967     if (!size || !dst)
968     {
969         return 0;
970     }
971 
972     dst_iter = dst;
973     dst_put_end = dst + size;
974     dst_next_page =
975         (char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
976     temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
977     if (temp_page)
978     {
979         do
980         {
981             size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
982             if (bytes_to_put > size)
983             {
984                 bytes_to_put = size;
985             }
986 
987             dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
988             if (!ALIGNED(dst_iter) || bytes_to_put != ARCH_PAGE_SIZE)
989                 if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
990                     break;
991 
992             memset(temp_page + (dst_iter - dst_iter_aligned), byte, bytes_to_put);
993             if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
994                 break;
995 
996             dst_iter = dst_iter + bytes_to_put;
997             size -= bytes_to_put;
998             dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
999             copy_len += bytes_to_put;
1000         } while (dst_iter < dst_put_end);
1001 
1002         rt_pages_free(temp_page, 0);
1003     }
1004 
1005     return copy_len;
1006 }
1007 
lwp_user_strlen_ext(struct rt_lwp * lwp,const char * s)1008 size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s)
1009 {
1010     int len = 0;
1011     char *temp_buf = RT_NULL;
1012     void *addr_start = RT_NULL;
1013     int get_bytes = 0;
1014     int index = 0;
1015 
1016     if (s == RT_NULL)
1017         return 0;
1018 
1019     if (lwp == RT_NULL)
1020     {
1021         LOG_W("%s: lwp is NULL", __func__);
1022         return -1;
1023     }
1024 
1025     addr_start = (void *)s;
1026     temp_buf = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
1027     if (!temp_buf)
1028     {
1029         LOG_W("%s: No memory", __func__);
1030         return -1;
1031     }
1032 
1033     get_bytes = lwp_data_get(lwp, temp_buf, addr_start, ARCH_PAGE_SIZE);
1034     if (get_bytes == 0)
1035     {
1036         LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000) failed", lwp, temp_buf, addr_start);
1037         rt_pages_free(temp_buf, 0);
1038         return -1;
1039     }
1040 
1041     while (temp_buf[index] != '\0')
1042     {
1043         len++;
1044         index++;
1045         if (index == get_bytes)
1046         {
1047             if (get_bytes == ARCH_PAGE_SIZE)
1048             {
1049                 get_bytes = lwp_data_get(lwp, temp_buf, addr_start + len, ARCH_PAGE_SIZE);
1050                 if (get_bytes == 0)
1051                 {
1052                     LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
1053                         lwp, temp_buf, addr_start);
1054                     len = -1;
1055                     break;
1056                 }
1057                 index = 0;
1058             }
1059             else
1060             {
1061                 LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
1062                     lwp, temp_buf, addr_start);
1063                 len = -1;
1064                 break;
1065             }
1066         }
1067     }
1068 
1069     rt_pages_free(temp_buf, 0);
1070     return len;
1071 }
1072 
lwp_user_strlen(const char * s)1073 size_t lwp_user_strlen(const char *s)
1074 {
1075     struct rt_lwp *lwp = RT_NULL;
1076 
1077     lwp = lwp_self();
1078     RT_ASSERT(lwp != RT_NULL);
1079 
1080     return lwp_user_strlen_ext(lwp, s);
1081 }
1082 
lwp_strlen(struct rt_lwp * lwp,const char * s)1083 size_t lwp_strlen(struct rt_lwp *lwp, const char *s)
1084 {
1085     if (lwp_in_user_space(s))
1086         return lwp_user_strlen_ext(lwp, s);
1087     else
1088         return strlen(s);
1089 }
1090 
1091 #endif
1092