1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2019, Linaro Limited
4 */
5
6#include <arm32_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread.h>
12#include <sm/optee_smc.h>
13#include <sm/teesmc_opteed.h>
14#include <sm/teesmc_opteed_macros.h>
15
16.arch_extension sec
17
18/*
19 * If ASLR is configured the identity mapped code may be mapped at two
20 * locations, the identity location where virtual and physical address is
21 * the same and at the runtime selected location to which OP-TEE has been
22 * relocated.  Code executing at a location different compared to the
23 * runtime selected location works OK as long as it doesn't do relative
24 * addressing outside the identity mapped range. To allow relative
25 * addressing this macro jumps to the runtime selected location.
26 *
27 * Note that the identity mapped range and the runtime selected range can
28 * only differ if ASLR is configured.
29 */
30	.macro readjust_pc
31#ifdef CFG_CORE_ASLR
32	ldr	r12, =1111f
33	bx	r12
341111:
35#endif
36	.endm
37
38FUNC vector_std_smc_entry , : , .identity_map
39UNWIND(	.cantunwind)
40	readjust_pc
41	push	{r4-r7}
42	bl	thread_handle_std_smc
43	add	sp, sp, #(4 * 4)
44	/*
45	 * Normally thread_handle_std_smc() should return via
46	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
47	 * hasn't switched stack (error detected) it will do a normal "C"
48	 * return.
49	 */
50	mov	r1, r0
51	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
52	smc	#0
53	/* SMC should not return */
54	panic_at_smc_return
55END_FUNC vector_std_smc_entry
56
57FUNC vector_fast_smc_entry , : , .identity_map
58UNWIND(	.cantunwind)
59	readjust_pc
60	push	{r0-r7}
61	mov	r0, sp
62	bl	thread_handle_fast_smc
63	pop	{r1-r8}
64	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
65	smc	#0
66	/* SMC should not return */
67	panic_at_smc_return
68END_FUNC vector_fast_smc_entry
69
70FUNC vector_fiq_entry , : , .identity_map
71UNWIND(	.cantunwind)
72	readjust_pc
73 	/* Secure Monitor received a FIQ and passed control to us. */
74	bl	thread_check_canaries
75	bl	itr_core_handler
76	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
77	smc	#0
78	/* SMC should not return */
79	panic_at_smc_return
80END_FUNC vector_fiq_entry
81
82#if defined(CFG_WITH_ARM_TRUSTED_FW)
83LOCAL_FUNC vector_cpu_on_entry , : , .identity_map
84UNWIND(	.cantunwind)
85	bl	cpu_on_handler
86	/* When cpu_on_handler() returns mmu is enabled */
87	mov	r1, r0
88	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
89	smc	#0
90	/* SMC should not return */
91	panic_at_smc_return
92END_FUNC vector_cpu_on_entry
93
94LOCAL_FUNC vector_cpu_off_entry , : , .identity_map
95UNWIND(	.cantunwind)
96	readjust_pc
97	bl	thread_cpu_off_handler
98	mov	r1, r0
99	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
100	smc	#0
101	/* SMC should not return */
102	panic_at_smc_return
103END_FUNC vector_cpu_off_entry
104
105LOCAL_FUNC vector_cpu_suspend_entry , : , .identity_map
106UNWIND(	.cantunwind)
107	readjust_pc
108	bl	thread_cpu_suspend_handler
109	mov	r1, r0
110	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
111	smc	#0
112	/* SMC should not return */
113	panic_at_smc_return
114END_FUNC vector_cpu_suspend_entry
115
116LOCAL_FUNC vector_cpu_resume_entry , : , .identity_map
117UNWIND(	.cantunwind)
118	readjust_pc
119	bl	thread_cpu_resume_handler
120	mov	r1, r0
121	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
122	smc	#0
123	/* SMC should not return */
124	panic_at_smc_return
125END_FUNC vector_cpu_resume_entry
126
127LOCAL_FUNC vector_system_off_entry , : , .identity_map
128UNWIND(	.cantunwind)
129	readjust_pc
130	bl	thread_system_off_handler
131	mov	r1, r0
132	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
133	smc	#0
134	/* SMC should not return */
135	panic_at_smc_return
136END_FUNC vector_system_off_entry
137
138LOCAL_FUNC vector_system_reset_entry , : , .identity_map
139UNWIND(	.cantunwind)
140	readjust_pc
141	bl	thread_system_reset_handler
142	mov	r1, r0
143	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
144	smc	#0
145	/* SMC should not return */
146	panic_at_smc_return
147END_FUNC vector_system_reset_entry
148
149/*
150 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
151 * initialization.  Also used when compiled with the internal monitor, but
152 * the cpu_*_entry and system_*_entry are not used then.
153 *
154 * Note that ARM-TF depends on the layout of this vector table, any change
155 * in layout has to be synced with ARM-TF.
156 */
157FUNC thread_vector_table , : , .identity_map
158UNWIND(	.cantunwind)
159	b	vector_std_smc_entry
160	b	vector_fast_smc_entry
161	b	vector_cpu_on_entry
162	b	vector_cpu_off_entry
163	b	vector_cpu_resume_entry
164	b	vector_cpu_suspend_entry
165	b	vector_fiq_entry
166	b	vector_system_off_entry
167	b	vector_system_reset_entry
168END_FUNC thread_vector_table
169DECLARE_KEEP_PAGER thread_vector_table
170#endif /*if defined(CFG_WITH_ARM_TRUSTED_FW)*/
171
172FUNC thread_std_smc_entry , :
173UNWIND(	.cantunwind)
174	push	{r4, r5} /* Pass these following the arm32 calling convention */
175	bl	__thread_std_smc_entry
176	add	sp, sp, #8 /* There's nothing return, just restore the sp */
177	mov	r4, r0	/* Save return value for later */
178
179	/* Disable interrupts before switching to temporary stack */
180	cpsid	aif
181	bl	thread_get_tmp_sp
182	mov	sp, r0
183
184	bl	thread_state_free
185
186	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
187	mov	r1, r4
188	mov	r2, #0
189	mov	r3, #0
190	mov	r4, #0
191	smc	#0
192	/* SMC should not return */
193	panic_at_smc_return
194END_FUNC thread_std_smc_entry
195
196/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
197FUNC thread_rpc , :
198	push	{r0, lr}
199UNWIND(	.save	{r0, lr})
200
201	bl	thread_save_state
202	mov	r4, r0			/* Save original CPSR */
203
204	/*
205 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
206	 */
207	bl	thread_get_tmp_sp
208	ldr	r5, [sp]		/* Get pointer to rv[] */
209	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
210	mov	sp, r0			/* Switch to tmp stack */
211
212	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
213	mov	r1, r4			/* CPSR to restore */
214	ldr	r2, =.thread_rpc_return
215	bl	thread_state_suspend
216	mov	r4, r0			/* Supply thread index */
217	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
218	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
219	smc	#0
220	/* SMC should not return */
221	panic_at_smc_return
222
223.thread_rpc_return:
224	/*
225	 * At this point has the stack pointer been restored to the value
226	 * it had when thread_save_state() was called above.
227	 *
228	 * Jumps here from thread_resume above when RPC has returned. The
229	 * IRQ and FIQ bits are restored to what they where when this
230	 * function was originally entered.
231	 */
232	pop	{r12, lr}		/* Get pointer to rv[] */
233	stm	r12, {r0-r3}		/* Store r0-r3 into rv[] */
234	bx	lr
235END_FUNC thread_rpc
236DECLARE_KEEP_PAGER thread_rpc
237
238/*
239 * void thread_foreign_intr_exit(uint32_t thread_index)
240 *
241 * This function is jumped to at the end of macro foreign_intr_handler().
242 * The current thread as indicated by @thread_index has just been
243 * suspended.  The job here is just to inform normal world the thread id to
244 * resume when returning.
245 */
246FUNC thread_foreign_intr_exit , :
247	mov	r4, r0
248	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
249	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
250	mov	r2, #0
251	mov	r3, #0
252	smc	#0
253	/* SMC should not return */
254	panic_at_smc_return
255END_FUNC thread_foreign_intr_exit
256