1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2015, Linaro Limited
4 */
5#include "tee_syscall_numbers.h"
6#include "trace_levels.h"
7#include <arm64.h>
8#include <arm64_macros.S>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <kernel/thread.h>
12#include <tee_api_defines.h>
13
14#if 0
15struct sc_rec {
16	uint64_t x0;
17	uint64_t x1;
18	uint64_t x19;
19	uint64_t x30;
20}
21#endif
22#define SC_REC_X0		(8 * 0)
23#define SC_REC_X1		(8 * 1)
24#define SC_REC_X19		(8 * 2)
25#define SC_REC_X30		(8 * 3)
26#define SC_REC_SIZE		(SC_REC_X30 + 8)
27
28/*
29 * uint32_t tee_svc_do_call(struct thread_svc_regs *regs, tee_svc_func func);
30 *
31 * Called from user_ta_handle_svc()
32 */
33FUNC tee_svc_do_call , :
34	sub	sp, sp, #SC_REC_SIZE
35	stp	x0, x1, [sp, #SC_REC_X0]
36	stp	x19, x30, [sp, #SC_REC_X19]
37	mov	x19, sp
38
39	ldr	x2, [x0, #THREAD_SVC_REG_SPSR]
40	tst	x2, #(SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)
41	b.eq	.Lcall_a64
42
43	ldp	x5, x6, [x0, #THREAD_SVC_REG_X5]
44	cmp	x6, #0
45	b.eq	.Lno_args_a32
46
47	/*
48	 * Calculate required space on stack to copy Aarch32 arguments
49	 * and to transform them into Aarch64 arguments.
50	 * x6 = nargs_on_stack
51         * n64 = (nargs_on_stack - 4) * 8
52         * n32 = nargs_on_stack * 4
53         * sp -= ROUNDUP(MAX(n32, n64), 16)
54	 *
55	 */
56	/* n64 = (nargs_on_stack - 4) * 8 */
57	sub	x1, x6, #0x4
58	lsl	x1, x1, #3
59	/* n32 = nargs_on_stack * 4 */
60	lsl	x0, x6, #2
61	/* sp -= ROUNDUP(MAX(n32, n64), 16) */
62	cmp	x1, x0
63	csel	x0, x1, x0, ge
64	add	x0, x0, #0xf
65	and	x0, x0, #0xfffffffffffffff0
66	sub	sp, sp, x0
67
68	/*
69	 * Find location on stack where to copy the Aarch32 arguments
70	 * and do the copy.
71	 * tee_svc_copy_from_user(sp, x5, nargs_on_stack * 4)
72	 */
73	mov	x0, sp
74	mov	x1, x5
75	add	x2, xzr, x6, lsl #2
76	bl	copy_from_user
77	/* If copy failed return the error */
78	cmp	x0, #0
79	bne	.Lret
80
81	/*
82	 * Load arguments into w4..w7, we're loading junk into unused
83	 * registers, but it's quicker than trying to figure out how
84	 * many registers to load into.
85	 */
86	/* x0 = nargs_on_stack */
87	ldr	x0, [x19, #SC_REC_X0]
88	ldr	x0, [x0, #THREAD_SVC_REG_X6]
89	load_wregs sp, 0, 4, 7
90
91	/*
92	 * Convert remaining Aarch32 parameters passed on stack as Aarch64
93	 * parameters on stack.
94	 *
95	 * nargs_on_stack is initialized in x0 above
96	 * n64 = (nargs_on_stack - 4) * 8
97	 * if n64 < 0 goro .Lno_args
98	 * x0 = x2 = x19 - n64
99	 * x1 points to next argument
100	 * while (x2 != x19) {
101	 *	w3 = *x1
102	 *	x1 += 4
103	 *	*x2 = x3
104	 *	x2 += 8
105	 * }
106	 * sp = x0
107	 */
108	/* n64 = (nargs_on_stack - 4) * 8 */
109	subs	x2, x0, #0x4
110	b.le	.Lno_args_a32
111	lsl	x2, x2, #3
112	mov	x0, x2
113
114.Lcpy_to_stack:
115	ldr	w3, [x1], #4
116	str	x3, [x2], #8
117	cmp	x2, x19
118	b.ne	.Lcpy_to_stack
119	mov	sp, x0
120
121
122.Lno_args_a32: /* Load the first 4 arguments to function */
123	ldr	x9, [x19, #SC_REC_X0]
124	load_xregs x9, THREAD_SVC_REG_X0, 0, 3
125	mov	w0, w0
126	mov	w1, w1
127	mov	w2, w2
128	mov	w3, w3
129
130	/* Call the svc function */
131	ldr	x16, [x19, #SC_REC_X1]
132	blr	x16
133	b	.Lret
134
135.Lcall_a64: /* Load the first 8 arguments to function */
136	ldr	x9, [x19, #SC_REC_X0]
137	load_xregs x9, THREAD_SVC_REG_X0, 0, 8
138
139	/* Call the svc function */
140	ldr	x16, [x19, #SC_REC_X1]
141	blr	x16
142
143.Lret:
144	mov	sp, x19
145	ldp	x19, x30, [sp, #SC_REC_X19]
146	add	sp, sp, #SC_REC_SIZE
147	ret
148END_FUNC tee_svc_do_call
149
150/*
151 * syscall_sys_return() and syscall_panic() are two special cases for syscalls
152 * in the way that they do not return to the TA, instead execution is resumed
153 * as if __thread_enter_user_mode() had returned to thread_enter_user_mode().
154 *
155 * In order to do this the functions need a way to get hold of a pointer to
156 * the struct thread_svc_regs provided by storing relevant registers on the
157 * stack in el0_svc() and later load them into registers again when el0_svc()
158 * is returning.
159 *
160 * tee_svc_do_call() is supplied the pointer to struct thread_svc_regs in
161 * x0. This pointer can later be retrieved by chasing x19.
162 */
163
164/*
165 * User space sees this function as:
166 * void syscall_sys_return(uint32_t ret) __noreturn;
167 *
168 * But internally the function depends on being called from
169 * tee_svc_do_call() to be able to chase x19 in order to get hold of a
170 * pointer to struct thread_svc_regs.
171 *
172 * The argument ret is already in x0 so we don't touch that and let it
173 * propagate as return value of the called
174 * tee_svc_unwind_enter_user_mode().
175 */
176FUNC syscall_sys_return , :
177	mov	x1, #0  /* panic = false */
178	mov	x2, #0  /* panic_code = 0 */
179	ldr	x3, [x19, #SC_REC_X0] /* pointer to struct thread_svc_regs */
180	b	tee_svc_sys_return_helper
181END_FUNC syscall_sys_return
182
183/*
184 * User space sees this function as:
185 * void syscall_panic(uint32_t code) __noreturn;
186 *
187 * But internally the function depends on being called from
188 * tee_svc_do_call() to be able to chase x19 in order to get hold of a
189 * pointer to struct thread_svc_regs.
190 */
191FUNC syscall_panic , :
192	mov	x1, #1  /* panic = true */
193	mov	x2, x0  /* code */
194	ldr	w0, =TEE_ERROR_TARGET_DEAD
195	ldr	x3, [x19, #SC_REC_X0] /* pointer to struct thread_svc_regs */
196	b	tee_svc_sys_return_helper
197END_FUNC syscall_panic
198
199BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
200