1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #ifndef __KERNEL_THREAD_PRIVATE_ARCH_H
8 #define __KERNEL_THREAD_PRIVATE_ARCH_H
9 
10 #ifndef __ASSEMBLER__
11 
12 #include <kernel/thread.h>
13 #include <kernel/vfp.h>
14 #include <sm/sm.h>
15 
16 #ifdef CFG_WITH_ARM_TRUSTED_FW
17 #define STACK_TMP_OFFS		0
18 #else
19 #define STACK_TMP_OFFS		SM_STACK_TMP_RESERVE_SIZE
20 #endif
21 
22 #ifdef ARM32
23 #ifdef CFG_CORE_SANITIZE_KADDRESS
24 #define STACK_TMP_SIZE		(3072 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
25 #else
26 #define STACK_TMP_SIZE		(2048 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
27 #endif
28 #define STACK_THREAD_SIZE	(8192 + CFG_STACK_THREAD_EXTRA)
29 
30 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(__clang__) || \
31 	!defined(CFG_CRYPTO_WITH_CE)
32 #define STACK_ABT_SIZE		3072
33 #else
34 #define STACK_ABT_SIZE		2048
35 #endif
36 
37 #endif /*ARM32*/
38 
39 #ifdef ARM64
40 #if defined(__clang__) && !defined(__OPTIMIZE_SIZE__)
41 #define STACK_TMP_SIZE		(4096 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
42 #else
43 #define STACK_TMP_SIZE		(2048 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
44 #endif
45 #define STACK_THREAD_SIZE	(8192 + CFG_STACK_THREAD_EXTRA)
46 
47 #if TRACE_LEVEL > 0
48 #define STACK_ABT_SIZE		3072
49 #else
50 #define STACK_ABT_SIZE		1024
51 #endif
52 #endif /*ARM64*/
53 
54 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
55 /*
56  * Extra space added to each stack in order to reliably detect and dump stack
57  * overflows. Should cover the maximum expected overflow size caused by any C
58  * function (say, 512 bytes; no function should have that much local variables),
59  * plus the maximum stack space needed by __cyg_profile_func_exit(): about 1 KB,
60  * a large part of which is used to print the call stack. Total: 1.5 KB.
61  */
62 #define STACK_CHECK_EXTRA	1536
63 #else
64 #define STACK_CHECK_EXTRA	0
65 #endif
66 
67 #ifdef ARM64
68 struct thread_user_mode_rec {
69 	uint64_t ctx_regs_ptr;
70 	uint64_t exit_status0_ptr;
71 	uint64_t exit_status1_ptr;
72 	uint64_t pad;
73 	uint64_t x[31 - 19]; /* x19..x30 */
74 };
75 #endif /*ARM64*/
76 
77 #ifdef CFG_WITH_VFP
78 struct thread_vfp_state {
79 	bool ns_saved;
80 	bool sec_saved;
81 	bool sec_lazy_saved;
82 	struct vfp_state ns;
83 	struct vfp_state sec;
84 	struct thread_user_vfp_state *uvfp;
85 };
86 
87 #endif /*CFG_WITH_VFP*/
88 #endif /*__ASSEMBLER__*/
89 
90 #ifdef ARM64
91 #ifdef CFG_WITH_VFP
92 #define THREAD_VFP_STATE_SIZE				\
93 	(16 + (16 * 32 + 16) * 2 + 16)
94 #else
95 #define THREAD_VFP_STATE_SIZE				0
96 #endif
97 #endif /*ARM64*/
98 
99 #ifndef __ASSEMBLER__
100 
101 /*
102  * During boot note the part of code and data that needs to be mapped while
103  * in user mode. The provided address and size have to be page aligned.
104  * Note that the code and data will be mapped at the lowest possible
105  * addresses available for user space (see core_mmu_get_user_va_range()).
106  */
107 extern long thread_user_kcode_offset;
108 
109 /*
110  * Initializes VBAR for current CPU (called by thread_init_per_cpu()
111  */
112 void thread_init_vbar(vaddr_t addr);
113 
114 void thread_excp_vect(void);
115 void thread_excp_vect_wa_spectre_v2(void);
116 void thread_excp_vect_wa_a15_spectre_v2(void);
117 void thread_excp_vect_wa_spectre_bhb(void);
118 void thread_excp_vect_end(void);
119 
120 /*
121  * Assembly function as the first function in a thread.  Handles a stdcall,
122  * a0-a3 holds the parameters. Hands over to __thread_std_smc_entry() when
123  * everything is set up and does some post processing once
124  * __thread_std_smc_entry() returns.
125  */
126 void thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
127 			  uint32_t a4, uint32_t a5);
128 uint32_t __thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
129 				uint32_t a3, uint32_t a4, uint32_t a5);
130 
131 void thread_sp_alloc_and_run(struct thread_smc_args *args);
132 
133 /*
134  * Resumes execution of currently active thread by restoring context and
135  * jumping to the instruction where to continue execution.
136  *
137  * Arguments supplied by non-secure world will be copied into the saved
138  * context of the current thread if THREAD_FLAGS_COPY_ARGS_ON_RETURN is set
139  * in the flags field in the thread context.
140  */
141 void thread_resume(struct thread_ctx_regs *regs);
142 
143 uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
144 				  uint32_t *exit_status0,
145 				  uint32_t *exit_status1);
146 
147 /*
148  * Private functions made available for thread_asm.S
149  */
150 
151 /* Returns the temp stack for current CPU */
152 void *thread_get_tmp_sp(void);
153 
154 /*
155  * Marks the current thread as suspended. And updated the flags
156  * for the thread context (see thread resume for use of flags).
157  * Returns thread index of the thread that was suspended.
158  */
159 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc);
160 
161 /*
162  * Marks the current thread as free.
163  */
164 void thread_state_free(void);
165 
166 /* Returns a pointer to the saved registers in current thread context. */
167 struct thread_ctx_regs *thread_get_ctx_regs(void);
168 
169 #ifdef ARM32
170 /* Sets sp for abort mode */
171 void thread_set_abt_sp(vaddr_t sp);
172 
173 /* Sets sp for undefined mode */
174 void thread_set_und_sp(vaddr_t sp);
175 
176 /* Sets sp for irq mode */
177 void thread_set_irq_sp(vaddr_t sp);
178 
179 /* Sets sp for fiq mode */
180 void thread_set_fiq_sp(vaddr_t sp);
181 
182 /* Read usr_sp banked CPU register */
183 uint32_t thread_get_usr_sp(void);
184 uint32_t thread_get_usr_lr(void);
185 void thread_set_usr_lr(uint32_t usr_lr);
186 #endif /*ARM32*/
187 
188 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
189 			  uint32_t a4, uint32_t a5);
190 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
191 			    uint32_t a2, uint32_t a3);
192 
193 /*
194  * Suspends current thread and temorarily exits to non-secure world.
195  * This function returns later when non-secure world returns.
196  *
197  * The purpose of this function is to request services from non-secure
198  * world.
199  */
200 #define THREAD_RPC_NUM_ARGS     4
201 #ifdef CFG_CORE_FFA
202 struct thread_rpc_arg {
203 	union {
204 		struct {
205 			uint32_t w1;
206 			uint32_t w4;
207 			uint32_t w5;
208 			uint32_t w6;
209 		} call;
210 		struct {
211 			uint32_t w4;
212 			uint32_t w5;
213 			uint32_t w6;
214 		} ret;
215 		uint32_t pad[THREAD_RPC_NUM_ARGS];
216 	};
217 };
218 
219 void thread_rpc(struct thread_rpc_arg *rpc_arg);
220 #else
221 void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
222 #endif
223 
224 /*
225  * Called from assembly only, vector_fast_smc_entry(). Handles a fast SMC
226  * by dispatching it to the registered fast SMC handler.
227  */
228 void thread_handle_fast_smc(struct thread_smc_args *args);
229 
230 /*
231  * Called from assembly only, vector_std_smc_entry().  Handles a std SMC by
232  * dispatching it to the registered std SMC handler.
233  */
234 uint32_t thread_handle_std_smc(uint32_t a0, uint32_t a1, uint32_t a2,
235 			       uint32_t a3, uint32_t a4, uint32_t a5,
236 			       uint32_t a6, uint32_t a7);
237 
238 /* Called from assembly only. Handles a SVC from user mode. */
239 void thread_svc_handler(struct thread_svc_regs *regs);
240 
241 void thread_spmc_register_secondary_ep(vaddr_t ep);
242 #endif /*__ASSEMBLER__*/
243 #endif /*__KERNEL_THREAD_PRIVATE_ARCH_H*/
244