1/*
2 * Copyright (c) 2022, Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <xtensa_asm2_s.h>
8#include <zephyr/offsets.h>
9#include <offsets_short.h>
10#include <zephyr/syscall.h>
11#include <zephyr/zsr.h>
12
13#include <xtensa/config/core-isa.h>
14
15/**
16 *  syscall number     arg1, arg2, arg3, arg4, arg5, arg6
17 *  --------------     ----------------------------------
18 *  a2                 a6,   a3,   a4,   a5,   a8,   a9
19 *
20 **/
21.pushsection .text.xtensa_do_syscall, "ax"
22.global	xtensa_do_syscall
23.align	4
24xtensa_do_syscall:
25#if XCHAL_HAVE_THREADPTR == 0
26	wsr a2, ZSR_SYSCALL_SCRATCH
27	rsync
28
29	movi a0, xtensa_is_user_context_epc
30	rsr.epc1 a2
31	bne a0, a2, _not_checking_user_context
32
33	addi a2, a2, 3
34	wsr.epc1 a2
35
36	movi a0, PS_RING_MASK
37	rsr.ps a2
38	and a2, a2, a0
39
40	/* Need to set return to 1 if RING != 0,
41	 * so we won't be leaking which ring we are in
42	 * right now.
43	 */
44	beqz a2, _is_user_context_return
45
46	movi a2, 1
47
48_is_user_context_return:
49	rsr a0, ZSR_A0SAVE
50
51	rfe
52
53_not_checking_user_context:
54	rsr a2, ZSR_SYSCALL_SCRATCH
55#endif
56
57	/* Need to disable any interrupts while we are saving
58	 * register content to avoid any interferences.
59	 */
60	rsil a0, 0xf
61
62	rsr a0, ZSR_CPU
63	l32i a0, a0, ___cpu_t_current_OFFSET
64	l32i a0, a0, _thread_offset_to_psp
65
66	addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF
67
68	s32i a1, a0, ___xtensa_irq_bsa_t_scratch_OFFSET
69	s32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET
70	s32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET
71	rsr a2, ZSR_A0SAVE
72	s32i a2, a0, ___xtensa_irq_bsa_t_a0_OFFSET
73	rsr.ps a2
74	movi a3, ~PS_OWB_MASK & ~PS_EXCM_MASK
75	and a2, a2, a3
76	s32i a2, a0, ___xtensa_irq_bsa_t_ps_OFFSET
77
78	/* Manipulate PC where we will return to after syscall.
79	 * This is needed as syscall will stash the PC where
80	 * the syscall instruction locates, instead of
81	 * the instruction after it. We need to increment it to
82	 * execute the next instruction when we return.
83	 * The instruction size is 3 bytes, so lets just add it.
84	 */
85	rsr.epc1 a3
86	addi a3, a3, 3
87	s32i a3, a0, ___xtensa_irq_bsa_t_pc_OFFSET
88
89	/* Need to setup PS so we can spill all registers.
90	 * EXCM and RING bits need to be cleared as CPU
91	 * needs to run in kernel and non-exception modes
92	 * for window rotation to work.
93	 */
94	rsr.ps a3
95	movi a2, ~(PS_EXCM | PS_RING_MASK)
96	and a3, a3, a2
97	wsr.ps a3
98	rsync
99	l32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET
100	l32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET
101	SPILL_ALL_WINDOWS
102
103	rsr a0, ZSR_CPU
104	l32i a0, a0, ___cpu_t_current_OFFSET
105	l32i a0, a0, _thread_offset_to_psp
106	addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF
107
108	mov a1, a0
109
110	ODD_REG_SAVE a0, a1
111
112	call0 xtensa_save_high_regs
113
114	l32i a2, a1, 0
115	l32i a2, a2, ___xtensa_irq_bsa_t_a2_OFFSET
116	movi a0, K_SYSCALL_LIMIT
117	bgeu a2, a0, _bad_syscall
118
119_id_ok:
120	/* Find the function handler for the given syscall id. */
121	movi a3, _k_syscall_table
122	addx4 a2, a2, a3
123	l32i a2, a2, 0
124
125#if XCHAL_HAVE_THREADPTR
126	/* Clear up the threadptr because it is used
127	 * to check if a thread is running on user mode. Since
128	 * we are in a interruption we don't want the system
129	 * thinking it is possibly running in user mode.
130	 */
131#ifdef CONFIG_THREAD_LOCAL_STORAGE
132	movi a0, is_user_mode@tpoff
133	rur.THREADPTR a3
134	add a0, a3, a0
135
136	movi a3, 0
137	s32i a3, a0, 0
138#else
139	movi a0, 0
140	wur.THREADPTR a0
141#endif
142#endif /* XCHAL_HAVE_THREADPTR */
143
144	/* Set syscall parameters by moving them into place before we do
145	 * a call4 for the syscall function itself.
146	 * arg1 = a6
147	 * arg2 = a3 (clobbered above, so we need to reload it)
148	 * arg3 = a4
149	 * arg4 = a5
150	 * arg5 = a8
151	 * arg6 = a9
152	 */
153	mov a10, a8
154	mov a11, a9
155	mov a8, a4
156	mov a9, a5
157
158	/* Stack frame pointer is the 7th argument to z_mrsh_*()
159	 * as ssf, and must be put on stack to be consumed.
160	 *
161	 * Subtract 16 bytes as stack needs to be 16-byte aligned.
162	 */
163	mov a3, a1
164	addi a1, a1, -16
165	s32i a3, a1, 0
166
167	l32i a3, a1, 16
168	l32i a7, a3, ___xtensa_irq_bsa_t_a3_OFFSET
169
170
171	/* Since we are unmasking EXCM, we need to set RING bits to kernel
172	 * mode, otherwise we won't be able to run the exception handler in C.
173	 */
174	movi a0, PS_WOE|PS_CALLINC(0)|PS_UM|PS_INTLEVEL(0)
175	wsr.ps a0
176	rsync
177
178	callx4 a2
179
180	/* Going back before stack frame pointer on stack to
181	 * actual the stack frame. So restoration of registers
182	 * can be done properly when finishing syscalls.
183	 */
184	addi a1, a1, 16
185
186	/* copy return value. Lets put it in the top of stack
187	 * because registers will be clobbered in
188         * xtensa_restore_high_regs
189	 */
190	l32i a3, a1, 0
191	s32i a6, a3, ___xtensa_irq_bsa_t_a2_OFFSET
192
193_syscall_returned:
194	/* Disable interrupts as we are restoring context. */
195	rsil a0, 0xf
196
197	call0 xtensa_restore_high_regs
198
199	ODD_REG_RESTORE a3, a1
200
201#if XCHAL_HAVE_THREADPTR
202#ifdef CONFIG_THREAD_LOCAL_STORAGE
203	l32i a3, a1, ___xtensa_irq_bsa_t_threadptr_OFFSET
204	movi a0, is_user_mode@tpoff
205	add a0, a3, a0
206	movi a3, 1
207	s32i a3, a0, 0
208#endif
209#endif /* XCHAL_HAVE_THREADPTR */
210
211	l32i a3, a1, ___xtensa_irq_bsa_t_ps_OFFSET
212	wsr.ZSR_EPS a3
213
214	l32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET
215	wsr.ZSR_EPC a3
216
217	l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET
218	l32i a2, a1, ___xtensa_irq_bsa_t_a2_OFFSET
219	l32i a3, a1, ___xtensa_irq_bsa_t_a3_OFFSET
220
221	l32i a1, a1, ___xtensa_irq_bsa_t_scratch_OFFSET
222	rsync
223
224	rfi ZSR_RFI_LEVEL
225
226_bad_syscall:
227	movi a2, K_SYSCALL_BAD
228	j _id_ok
229
230.popsection
231
232/* FUNC_NORETURN void xtensa_userspace_enter(k_thread_entry_t user_entry,
233 *					   void *p1, void *p2, void *p3,
234 *					   uint32_t stack_end,
235 *					   uint32_t stack_start)
236 *
237 * A one-way trip to userspace.
238 */
239.global xtensa_userspace_enter
240.type xtensa_userspace_enter, @function
241.align 4
242xtensa_userspace_enter:
243	/* Call entry to set a bit in the windowstart and
244	 * do the rotation, but we are going to set our own
245	 * stack.
246	 */
247	entry a1, 16
248
249	SPILL_ALL_WINDOWS
250
251	/* We have to switch to kernel stack before spill kernel data and
252	 * erase user stack to avoid leak from previous context.
253	 */
254	mov a1, a7 /* stack start (low address) */
255
256	rsr a0, ZSR_CPU
257	l32i a0, a0, ___cpu_t_current_OFFSET
258
259	addi a1, a1, -28
260	s32i a0, a1, 24
261	s32i a2, a1, 20
262	s32i a3, a1, 16
263	s32i a4, a1, 12
264	s32i a5, a1, 8
265	s32i a6, a1, 4
266	s32i a7, a1, 0
267
268	l32i a6, a1, 24
269	call4 xtensa_user_stack_perms
270
271	l32i a6, a1, 24
272#ifdef CONFIG_XTENSA_MMU
273#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP
274	call4 xtensa_swap_update_page_tables
275#else
276	SWAP_PAGE_TABLE a6, a3, a7
277#endif
278#endif
279#ifdef CONFIG_XTENSA_MPU
280	call4 xtensa_mpu_map_write
281#endif
282
283#if XCHAL_HAVE_THREADPTR
284#ifdef CONFIG_THREAD_LOCAL_STORAGE
285	rur.threadptr a3
286	movi a0, is_user_mode@tpoff
287	add a0, a3, a0
288	movi a3, 1
289	s32i a3, a0, 0
290#else
291	rsr a3, ZSR_CPU
292	l32i a3, a3, ___cpu_t_current_OFFSET
293	wur.THREADPTR a3
294#endif
295#endif /* XCHAL_HAVE_THREADPTR */
296
297	/* Set now z_thread_entry parameters, we are simulating a call4
298	 * call, so parameters start at a6, a7, ...
299	 */
300	l32i a6, a1, 20
301	l32i a7, a1, 16
302	l32i a8, a1, 12
303	l32i a9, a1, 8
304
305	/* Go back to user stack */
306	l32i a1, a1, 4
307
308	/* Disabling interrupts as we need to use ZSR_EPC and ZSR_EPS */
309	rsil a0, 0xf
310
311	movi a0, z_thread_entry
312	wsr.ZSR_EPC a0
313
314	/* Configuring PS register.
315	 * We have to set callinc as well, since the called
316	 * function will do "entry"
317	 */
318#ifdef CONFIG_XTENSA_MMU
319	movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(2)
320#endif
321#ifdef CONFIG_XTENSA_MPU
322	/* MPU only has RING 0 and 1. */
323	movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(1)
324#endif
325
326	wsr.ZSR_EPS a0
327
328	/* Wipe out a0 (thre is no return from this function */
329	movi a0, 0
330
331	rfi ZSR_RFI_LEVEL
332