1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2019-10-28 Jesven first version
9 * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
10 */
11
12 #include <rthw.h>
13 #include <rtthread.h>
14 #include <stddef.h>
15 #include <stdlib.h>
16
17 #ifdef ARCH_MM_MMU
18
19 #define DBG_TAG "lwp.arch"
20 #define DBG_LVL DBG_INFO
21 #include <rtdbg.h>
22
23 #include <lwp_arch.h>
24 #include <lwp_user_mm.h>
25
arch_user_space_init(struct rt_lwp * lwp)26 int arch_user_space_init(struct rt_lwp *lwp)
27 {
28 size_t *mmu_table;
29
30 mmu_table = rt_hw_mmu_pgtbl_create();
31 if (!mmu_table)
32 {
33 return -RT_ENOMEM;
34 }
35
36 lwp->end_heap = USER_HEAP_VADDR;
37
38
39 lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
40 if (!lwp->aspace)
41 {
42 return -RT_ERROR;
43 }
44
45 return 0;
46 }
47
48 static struct rt_varea kuser_varea;
49
arch_kuser_init(rt_aspace_t aspace,void * vectors)50 void arch_kuser_init(rt_aspace_t aspace, void *vectors)
51 {
52 int err;
53 const size_t kuser_size = 0x1000;
54 extern char __kuser_helper_start[];
55 extern char __kuser_helper_end[];
56 rt_base_t start = (rt_base_t)__kuser_helper_start;
57 rt_base_t end = (rt_base_t)__kuser_helper_end;
58 int kuser_sz = end - start;
59
60 err = rt_aspace_map_static(aspace, &kuser_varea, &vectors, kuser_size,
61 MMU_MAP_U_RO, MMF_MAP_FIXED | MMF_PREFETCH,
62 &rt_mm_dummy_mapper, 0);
63 if (err != 0)
64 while (1)
65 ; // early failed
66
67 lwp_memcpy((void *)((char *)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
68 /*
69 * vectors + 0xfe0 = __kuser_get_tls
70 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
71 */
72 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
73 rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
74 }
75
arch_user_space_free(struct rt_lwp * lwp)76 void arch_user_space_free(struct rt_lwp *lwp)
77 {
78 if (lwp)
79 {
80 RT_ASSERT(lwp->aspace);
81 void *pgtbl = lwp->aspace->page_table;
82 rt_aspace_delete(lwp->aspace);
83
84 /* must be freed after aspace delete, pgtbl is required for unmap */
85 rt_hw_mmu_pgtbl_delete(pgtbl);
86 lwp->aspace = RT_NULL;
87 }
88 else
89 {
90 LOG_W("%s: NULL lwp as parameter", __func__);
91 RT_ASSERT(0);
92 }
93 }
94
arch_expand_user_stack(void * addr)95 int arch_expand_user_stack(void *addr)
96 {
97 int ret = 0;
98 size_t stack_addr = (size_t)addr;
99
100 stack_addr &= ~ARCH_PAGE_MASK;
101 if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
102 {
103 void *map = lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
104
105 if (map || lwp_user_accessable(addr, 1))
106 {
107 ret = 1;
108 }
109 }
110 return ret;
111 }
112 #define ALGIN_BYTES 8
113 #define lwp_sigreturn_bytes 8
114 struct signal_regs {
115 rt_base_t lr;
116 rt_base_t spsr;
117 rt_base_t r0_to_r12[13];
118 rt_base_t ip;
119 };
120
121 struct signal_ucontext
122 {
123 rt_base_t sigreturn[lwp_sigreturn_bytes / sizeof(rt_base_t)];
124 lwp_sigset_t save_sigmask;
125
126 siginfo_t si;
127
128 rt_align(8)
129 struct signal_regs frame;
130 };
131
arch_signal_ucontext_restore(rt_base_t user_sp)132 void *arch_signal_ucontext_restore(rt_base_t user_sp)
133 {
134 struct signal_ucontext *new_sp;
135 rt_base_t ip;
136 new_sp = (void *)user_sp;
137
138 if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
139 {
140 lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
141 ip = new_sp->frame.ip;
142 /* let user restore its lr from frame.ip */
143 new_sp->frame.ip = new_sp->frame.lr;
144 /* kernel will pick eip from frame.lr */
145 new_sp->frame.lr = ip;
146 }
147 else
148 {
149 LOG_I("User frame corrupted during signal handling\nexiting...");
150 sys_exit_group(EXIT_FAILURE);
151 }
152
153 return (void *)&new_sp->frame;
154 }
155
arch_signal_ucontext_save(rt_base_t lr,siginfo_t * psiginfo,struct signal_regs * exp_frame,rt_base_t user_sp,lwp_sigset_t * save_sig_mask)156 void *arch_signal_ucontext_save(rt_base_t lr, siginfo_t *psiginfo,
157 struct signal_regs *exp_frame, rt_base_t user_sp,
158 lwp_sigset_t *save_sig_mask)
159 {
160 rt_base_t spsr;
161 struct signal_ucontext *new_sp;
162 new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
163
164 if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
165 {
166 /* push psiginfo */
167 if (psiginfo)
168 {
169 lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
170 }
171
172 lwp_memcpy(&new_sp->frame.r0_to_r12, exp_frame, sizeof(new_sp->frame.r0_to_r12) + sizeof(rt_base_t));
173 new_sp->frame.lr = lr;
174
175 __asm__ volatile("mrs %0, spsr":"=r"(spsr));
176 new_sp->frame.spsr = spsr;
177
178 /* copy the save_sig_mask */
179 lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
180
181 /* copy lwp_sigreturn */
182 extern void lwp_sigreturn(void);
183 /* -> ensure that the sigreturn start at the outer most boundary */
184 lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
185 }
186 else
187 {
188 LOG_I("%s: User stack overflow", __func__);
189 sys_exit_group(EXIT_FAILURE);
190 }
191
192 return new_sp;
193 }
194
arch_syscall_set_errno(void * eframe,int expected,int code)195 void arch_syscall_set_errno(void *eframe, int expected, int code)
196 {
197 /* NO support */
198 return ;
199 }
200
arch_kernel_mmu_table_get(void)201 void *arch_kernel_mmu_table_get(void)
202 {
203 return rt_kernel_space.page_table;
204 }
205
206 #ifdef LWP_ENABLE_ASID
207 #define MAX_ASID_BITS 8
208 #define MAX_ASID (1 << MAX_ASID_BITS)
209 static uint64_t global_generation = 1;
210 static char asid_valid_bitmap[MAX_ASID];
arch_get_asid(struct rt_lwp * lwp)211 unsigned int arch_get_asid(struct rt_lwp *lwp)
212 {
213 if (lwp == RT_NULL)
214 {
215 // kernel
216 return 0;
217 }
218
219 if (lwp->generation == global_generation)
220 {
221 return lwp->asid;
222 }
223
224 if (lwp->asid && !asid_valid_bitmap[lwp->asid])
225 {
226 asid_valid_bitmap[lwp->asid] = 1;
227 return lwp->asid;
228 }
229
230 for (unsigned i = 1; i < MAX_ASID; i++)
231 {
232 if (asid_valid_bitmap[i] == 0)
233 {
234 asid_valid_bitmap[i] = 1;
235 lwp->generation = global_generation;
236 lwp->asid = i;
237 return lwp->asid;
238 }
239 }
240
241 global_generation++;
242 memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
243
244 asid_valid_bitmap[1] = 1;
245 lwp->generation = global_generation;
246 lwp->asid = 1;
247
248 asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
249
250 return lwp->asid;
251 }
252 #endif
253
254 #endif
255