1 /*
2  * Copyright (c) 2006-2023, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2021-05-18     Jesven       first version
9  * 2023-07-16     Shell        Move part of the codes to C from asm in signal handling
10  * 2023-10-16     Shell        Support a new backtrace framework
11  * 2023-08-03     Shell        Support of syscall restart (SA_RESTART)
12  */
13 
14 #include <armv8.h>
15 #include <rthw.h>
16 #include <rtthread.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <lwp_internal.h>
20 
21 #ifdef ARCH_MM_MMU
22 
23 #define DBG_TAG "lwp.arch"
24 #define DBG_LVL DBG_INFO
25 #include <rtdbg.h>
26 
27 #include <lwp_arch.h>
28 #include <lwp_user_mm.h>
29 
30 extern size_t MMUTable[];
31 
arch_user_space_init(struct rt_lwp * lwp)32 int arch_user_space_init(struct rt_lwp *lwp)
33 {
34     size_t *mmu_table;
35 
36     mmu_table = rt_hw_mmu_pgtbl_create();
37     if (mmu_table)
38     {
39         lwp->end_heap = USER_HEAP_VADDR;
40         lwp->aspace = rt_aspace_create(
41             (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
42         if (!lwp->aspace)
43         {
44             return -RT_ERROR;
45         }
46     }
47     else
48     {
49         return -RT_ENOMEM;
50     }
51 
52     return 0;
53 }
54 
arch_kernel_mmu_table_get(void)55 void *arch_kernel_mmu_table_get(void)
56 {
57     return (void *)NULL;
58 }
59 
arch_user_space_free(struct rt_lwp * lwp)60 void arch_user_space_free(struct rt_lwp *lwp)
61 {
62     if (lwp)
63     {
64         RT_ASSERT(lwp->aspace);
65         void *pgtbl = lwp->aspace->page_table;
66         rt_aspace_delete(lwp->aspace);
67 
68         /* must be freed after aspace delete, pgtbl is required for unmap */
69         rt_pages_free(pgtbl, 0);
70         lwp->aspace = NULL;
71     }
72     else
73     {
74         LOG_W("%s: NULL lwp as parameter", __func__);
75         RT_ASSERT(0);
76     }
77 }
78 
arch_expand_user_stack(void * addr)79 int arch_expand_user_stack(void *addr)
80 {
81     int ret = 0;
82     size_t stack_addr = (size_t)addr;
83 
84     stack_addr &= ~ARCH_PAGE_MASK;
85     if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
86         (stack_addr < (size_t)USER_STACK_VEND))
87     {
88         void *map =
89             lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
90 
91         if (map || lwp_user_accessable(addr, 1))
92         {
93             ret = 1;
94         }
95     }
96     return ret;
97 }
98 
99 #endif
100 
arch_set_thread_context(void (* exit)(void),void * new_thread_stack,void * user_stack,void ** thread_sp)101 int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
102                             void *user_stack, void **thread_sp)
103 {
104     struct rt_hw_exp_stack *syscall_frame;
105     struct rt_hw_exp_stack *thread_frame;
106     struct rt_hw_exp_stack *ori_syscall = rt_thread_self()->user_ctx.ctx;
107     RT_ASSERT(ori_syscall != RT_NULL);
108 
109     new_thread_stack = (rt_ubase_t*)RT_ALIGN_DOWN((rt_ubase_t)new_thread_stack, 16);
110 
111     syscall_frame = (void *)((long)new_thread_stack - sizeof(struct rt_hw_exp_stack));
112     memcpy(syscall_frame, ori_syscall, sizeof(*syscall_frame));
113     syscall_frame->sp_el0 = (long)user_stack;
114     syscall_frame->x0 = 0;
115 
116     thread_frame = (void *)rt_hw_stack_init(exit, RT_NULL, (void *)syscall_frame, RT_NULL);
117 
118     *thread_sp = thread_frame;
119 
120     return 0;
121 }
122 
123 #define ALGIN_BYTES (16)
124 
125 /* the layout is part of ABI, dont change it */
126 struct signal_ucontext
127 {
128     rt_int64_t sigreturn;
129     lwp_sigset_t save_sigmask;
130 
131     siginfo_t si;
132 
133     rt_align(ALGIN_BYTES)
134     struct rt_hw_exp_stack frame;
135 };
136 
137 RT_STATIC_ASSERT(abi_offset_compatible, offsetof(struct signal_ucontext, si) == UCTX_ABI_OFFSET_TO_SI);
138 
arch_signal_ucontext_get_frame(struct signal_ucontext * uctx)139 void *arch_signal_ucontext_get_frame(struct signal_ucontext *uctx)
140 {
141     return &uctx->frame;
142 }
143 
144 /* internal used only */
arch_syscall_prepare_signal(rt_base_t rc,struct rt_hw_exp_stack * exp_frame)145 void arch_syscall_prepare_signal(rt_base_t rc, struct rt_hw_exp_stack *exp_frame)
146 {
147     long x0 = exp_frame->x0;
148     exp_frame->x0 = rc;
149     exp_frame->x7 = x0;
150     return ;
151 }
152 
153 void arch_syscall_restart(void *sp, void *ksp);
154 
arch_syscall_set_errno(void * eframe,int expected,int code)155 void arch_syscall_set_errno(void *eframe, int expected, int code)
156 {
157     struct rt_hw_exp_stack *exp_frame = eframe;
158     if (exp_frame->x0 == -expected)
159         exp_frame->x0 = -code;
160     return ;
161 }
162 
arch_signal_check_erestart(void * eframe,void * ksp)163 void arch_signal_check_erestart(void *eframe, void *ksp)
164 {
165     struct rt_hw_exp_stack *exp_frame = eframe;
166     long rc = exp_frame->x0;
167     long sys_id = exp_frame->x8;
168     (void)sys_id;
169 
170     if (rc == -ERESTART)
171     {
172         LOG_D("%s(rc=%ld,sys_id=%ld,pid=%d)", __func__, rc, sys_id, lwp_self()->pid);
173         LOG_D("%s: restart rc = %ld", lwp_get_syscall_name(sys_id), rc);
174         exp_frame->x0 = exp_frame->x7;
175         arch_syscall_restart(eframe, ksp);
176     }
177 
178     return ;
179 }
180 
arch_signal_post_action(struct signal_ucontext * new_sp,rt_base_t kernel_sp)181 static void arch_signal_post_action(struct signal_ucontext *new_sp, rt_base_t kernel_sp)
182 {
183     arch_signal_check_erestart(&new_sp->frame, (void *)kernel_sp);
184 
185     return ;
186 }
187 
arch_signal_ucontext_restore(rt_base_t user_sp,rt_base_t kernel_sp)188 void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp)
189 {
190     struct signal_ucontext *new_sp;
191     new_sp = (void *)user_sp;
192 
193     if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
194     {
195         lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
196         arch_signal_post_action(new_sp, kernel_sp);
197     }
198     else
199     {
200         LOG_I("User frame corrupted during signal handling\nexiting...");
201         sys_exit_group(EXIT_FAILURE);
202     }
203 
204     return (char *)&new_sp->frame + sizeof(struct rt_hw_exp_stack);
205 }
206 
arch_signal_ucontext_save(rt_base_t user_sp,siginfo_t * psiginfo,struct rt_hw_exp_stack * exp_frame,lwp_sigset_t * save_sig_mask)207 void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
208                                 struct rt_hw_exp_stack *exp_frame,
209                                 lwp_sigset_t *save_sig_mask)
210 {
211     struct signal_ucontext *new_sp;
212     new_sp = (void *)((user_sp - sizeof(struct signal_ucontext)) & ~0xf);
213 
214     if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
215     {
216         /* push psiginfo */
217         if (psiginfo)
218         {
219             lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
220         }
221 
222         /* exp frame is already aligned as AAPCS64 required */
223         lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
224 
225         /* copy the save_sig_mask */
226         lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
227 
228         /* copy lwp_sigreturn */
229         const size_t lwp_sigreturn_bytes = 8;
230         extern void lwp_sigreturn(void);
231         /* -> ensure that the sigreturn start at the outer most boundary */
232         lwp_memcpy(&new_sp->sigreturn,  &lwp_sigreturn, lwp_sigreturn_bytes);
233     }
234     else
235     {
236         LOG_I("%s: User stack overflow", __func__);
237         sys_exit_group(EXIT_FAILURE);
238     }
239 
240     return new_sp;
241 }
242 
arch_backtrace_uthread(rt_thread_t thread)243 int arch_backtrace_uthread(rt_thread_t thread)
244 {
245     struct rt_hw_backtrace_frame frame;
246     struct rt_hw_exp_stack *stack;
247 
248     if (thread && thread->lwp)
249     {
250         stack = thread->user_ctx.ctx;
251         if ((long)stack > (unsigned long)thread->stack_addr
252             && (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
253         {
254             frame.pc = stack->pc;
255             frame.fp = stack->x29;
256             lwp_backtrace_frame(thread, &frame);
257             return 0;
258         }
259         else
260             return -1;
261     }
262     return -1;
263 }
264