1 /*
2  * Copyright (c) 2006-2021, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2020-11-18     Jesven       first version
9  * 2021-02-03     lizhirui     port to riscv64
10  * 2021-02-06     lizhirui     add thread filter
11  * 2021-02-19     lizhirui     port to new version of rt-smart
12  * 2021-03-02     lizhirui     add a auxillary function for interrupt
13  * 2021-03-04     lizhirui     delete thread filter
14  * 2021-03-04     lizhirui     modify for new version of rt-smart
15  * 2021-11-22     JasonHu      add lwp_set_thread_context
16  * 2021-11-30     JasonHu      add clone/fork support
17  * 2023-07-16     Shell        Move part of the codes to C from asm in signal handling
18  * 2023-10-16     Shell        Support a new backtrace framework
19  */
20 #include <rthw.h>
21 #include <rtthread.h>
22 
23 #include <stddef.h>
24 
25 #ifdef ARCH_MM_MMU
26 
27 #define DBG_TAG "lwp.arch"
28 #define DBG_LVL DBG_INFO
29 #include <rtdbg.h>
30 
31 #include <lwp_internal.h>
32 #include <lwp_arch.h>
33 #include <lwp_user_mm.h>
34 #include <page.h>
35 
36 #include <cpuport.h>
37 #include <encoding.h>
38 #include <stack.h>
39 #include <cache.h>
40 
41 extern rt_ubase_t MMUTable[];
42 
lwp_copy_return_code_to_user_stack()43 void *lwp_copy_return_code_to_user_stack()
44 {
45     void lwp_thread_return();
46     void lwp_thread_return_end();
47     rt_thread_t tid = rt_thread_self();
48 
49     if (tid->user_stack != RT_NULL)
50     {
51         rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
52         rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
53         lwp_memcpy((void *)userstack, lwp_thread_return, size);
54         return (void *)userstack;
55     }
56 
57     return RT_NULL;
58 }
59 
lwp_fix_sp(rt_ubase_t cursp)60 rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
61 {
62     void lwp_thread_return();
63     void lwp_thread_return_end();
64 
65     if (cursp == 0)
66     {
67         return 0;
68     }
69 
70     return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
71 }
72 
rt_thread_sp_to_thread(void * spmember_addr)73 rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
74 {
75     return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
76 }
77 
get_thread_kernel_stack_top(rt_thread_t thread)78 void *get_thread_kernel_stack_top(rt_thread_t thread)
79 {
80     return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
81 }
82 
arch_get_user_sp(void)83 void *arch_get_user_sp(void)
84 {
85     /* user sp saved in interrupt context */
86     rt_thread_t self = rt_thread_self();
87     rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
88     struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
89 
90     return (void *)frame->user_sp_exc_stack;
91 }
92 
arch_user_space_init(struct rt_lwp * lwp)93 int arch_user_space_init(struct rt_lwp *lwp)
94 {
95     rt_ubase_t *mmu_table;
96 
97     mmu_table = rt_hw_mmu_pgtbl_create();
98     if (!mmu_table)
99     {
100         return -RT_ENOMEM;
101     }
102 
103     lwp->end_heap = USER_HEAP_VADDR;
104     lwp->aspace = rt_aspace_create(
105         (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
106     if (!lwp->aspace)
107     {
108         return -RT_ERROR;
109     }
110 
111     return 0;
112 }
113 
arch_kernel_mmu_table_get(void)114 void *arch_kernel_mmu_table_get(void)
115 {
116     return (void *)((char *)MMUTable);
117 }
118 
arch_user_space_free(struct rt_lwp * lwp)119 void arch_user_space_free(struct rt_lwp *lwp)
120 {
121     if (lwp)
122     {
123         RT_ASSERT(lwp->aspace);
124 
125         void *pgtbl = lwp->aspace->page_table;
126         rt_aspace_delete(lwp->aspace);
127 
128         /* must be freed after aspace delete, pgtbl is required for unmap */
129         rt_hw_mmu_pgtbl_delete(pgtbl);
130         lwp->aspace = RT_NULL;
131     }
132     else
133     {
134         LOG_W("%s: NULL lwp as parameter", __func__);
135         RT_ASSERT(0);
136     }
137 }
138 
139 long _sys_clone(void *arg[]);
sys_clone(void * arg[])140 long sys_clone(void *arg[])
141 {
142     return _sys_clone(arg);
143 }
144 
145 long _sys_fork(void);
sys_fork(void)146 long sys_fork(void)
147 {
148     return _sys_fork();
149 }
150 
151 long _sys_vfork(void);
sys_vfork(void)152 long sys_vfork(void)
153 {
154     return _sys_fork();
155 }
156 
157 /**
158  * set exec context for fork/clone.
159  */
arch_set_thread_context(void (* exit)(void),void * new_thread_stack,void * user_stack,void ** thread_sp)160 int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
161                             void *user_stack, void **thread_sp)
162 {
163     RT_ASSERT(exit != RT_NULL);
164     RT_ASSERT(user_stack != RT_NULL);
165     RT_ASSERT(new_thread_stack != RT_NULL);
166     RT_ASSERT(thread_sp != RT_NULL);
167     struct rt_hw_stack_frame *syscall_frame;
168     struct rt_hw_stack_frame *thread_frame;
169 
170     rt_uint8_t *stk;
171     rt_uint8_t *syscall_stk;
172 
173     stk = (rt_uint8_t *)new_thread_stack;
174     /* reserve syscall context, all the registers are copyed from parent */
175     stk -= CTX_REG_NR * REGBYTES;
176     syscall_stk = stk;
177 
178     syscall_frame = (struct rt_hw_stack_frame *)stk;
179 
180     /* modify user sp */
181     syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
182 
183     /* skip ecall */
184     syscall_frame->epc += 4;
185 
186     /* child return value is 0 */
187     syscall_frame->a0 = 0;
188     syscall_frame->a1 = 0;
189 
190     /* reset thread area */
191     rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
192     syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
193 
194 #ifdef ARCH_USING_NEW_CTX_SWITCH
195     extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
196     rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
197     sstatus &= ~SSTATUS_SIE;
198 
199     /* compatible to RESTORE_CONTEXT */
200     stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
201 #else
202     /* build temp thread context */
203     stk -= sizeof(struct rt_hw_stack_frame);
204 
205     thread_frame = (struct rt_hw_stack_frame *)stk;
206 
207     int i;
208     for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
209     {
210         ((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
211     }
212 
213     /* set pc for thread */
214     thread_frame->epc     = (rt_ubase_t)exit;
215 
216     /* set old exception mode as supervisor, because in kernel */
217     thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
218     thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
219 
220     /* set stack as syscall stack */
221     thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
222 
223 #endif /* ARCH_USING_NEW_CTX_SWITCH */
224     /* save new stack top */
225     *thread_sp = (void *)stk;
226 
227     /**
228      * The stack for child thread:
229      *
230      * +------------------------+ --> kernel stack top
231      * | syscall stack          |
232      * |                        |
233      * | @sp                    | --> `user_stack`
234      * | @epc                   | --> user ecall addr + 4 (skip ecall)
235      * | @a0&a1                 | --> 0 (for child return 0)
236      * |                        |
237      * +------------------------+ --> temp thread stack top
238      * | temp thread stack      |           ^
239      * |                        |           |
240      * | @sp                    | ---------/
241      * | @epc                   | --> `exit` (arch_clone_exit/arch_fork_exit)
242      * |                        |
243      * +------------------------+ --> thread sp
244      */
245     return 0;
246 }
247 
248 #define ALGIN_BYTES (16)
249 
250 struct signal_ucontext
251 {
252     rt_int64_t sigreturn;
253     lwp_sigset_t save_sigmask;
254 
255     siginfo_t si;
256 
257     rt_align(16)
258     struct rt_hw_stack_frame frame;
259 };
260 
arch_signal_ucontext_restore(rt_base_t user_sp)261 void *arch_signal_ucontext_restore(rt_base_t user_sp)
262 {
263     struct signal_ucontext *new_sp;
264     new_sp = (void *)user_sp;
265 
266     if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
267     {
268         lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
269     }
270     else
271     {
272         LOG_I("User frame corrupted during signal handling\nexiting...");
273         sys_exit_group(EXIT_FAILURE);
274     }
275 
276     return (void *)&new_sp->frame;
277 }
278 
arch_signal_ucontext_save(int signo,siginfo_t * psiginfo,struct rt_hw_stack_frame * exp_frame,rt_base_t user_sp,lwp_sigset_t * save_sig_mask)279 void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
280                          struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
281                          lwp_sigset_t *save_sig_mask)
282 {
283     struct signal_ucontext *new_sp;
284     new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
285 
286     if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
287     {
288         /* push psiginfo */
289         if (psiginfo)
290         {
291             lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
292         }
293 
294         lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
295 
296         /* copy the save_sig_mask */
297         lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
298 
299         /* copy lwp_sigreturn */
300         const size_t lwp_sigreturn_bytes = 8;
301         extern void lwp_sigreturn(void);
302         /* -> ensure that the sigreturn start at the outer most boundary */
303         lwp_memcpy(&new_sp->sigreturn,  &lwp_sigreturn, lwp_sigreturn_bytes);
304 
305         /**
306          * synchronize dcache & icache if target is
307          * a Harvard Architecture machine, otherwise
308          * do nothing
309          */
310         rt_hw_sync_cache_local(&new_sp->sigreturn, 8);
311     }
312     else
313     {
314         LOG_I("%s: User stack overflow", __func__);
315         sys_exit_group(EXIT_FAILURE);
316     }
317 
318     return new_sp;
319 }
320 
arch_syscall_set_errno(void * eframe,int expected,int code)321 void arch_syscall_set_errno(void *eframe, int expected, int code)
322 {
323     /* NO support */
324     return ;
325 }
326 
327 /**
328  * void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
329  */
lwp_exec_user(void * args,void * kernel_stack,void * user_entry)330 void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
331 {
332     arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
333 }
334 
335 #endif /* ARCH_MM_MMU */
336 
arch_backtrace_uthread(rt_thread_t thread)337 int arch_backtrace_uthread(rt_thread_t thread)
338 {
339     struct rt_hw_backtrace_frame frame;
340     struct rt_hw_stack_frame *stack;
341 
342     if (thread && thread->lwp)
343     {
344         stack = thread->user_ctx.ctx;
345         if ((long)stack > (unsigned long)thread->stack_addr
346             && (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
347         {
348             frame.pc = stack->epc;
349             frame.fp = stack->s0_fp;
350             lwp_backtrace_frame(thread, &frame);
351             return 0;
352         }
353         else
354             return -1;
355     }
356     return -1;
357 }
358