1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2016-2022, Linaro Limited
5 * Copyright (c) 2020-2021, Arm Limited
6 */
7
8 #ifndef __KERNEL_THREAD_ARCH_H
9 #define __KERNEL_THREAD_ARCH_H
10
11 #ifndef __ASSEMBLER__
12 #include <arm.h>
13 #include <compiler.h>
14 #include <kernel/vfp.h>
15 #include <types_ext.h>
16 #endif
17
18 #ifndef __ASSEMBLER__
19
20 #ifdef ARM64
21 /*
22 * struct thread_core_local needs to have alignment suitable for a stack
23 * pointer since SP_EL1 points to this
24 */
25 #define THREAD_CORE_LOCAL_ALIGNED __aligned(16)
26 #else
27 #define THREAD_CORE_LOCAL_ALIGNED __aligned(8)
28 #endif
29
30 struct mobj;
31
32 /*
33 * Storage of keys used for pointer authentication. FEAT_PAuth supports a
34 * number of keys of which only the APIA key is currently used, depending on
35 * configuration.
36 */
37 struct thread_pauth_keys {
38 uint64_t apia_hi;
39 uint64_t apia_lo;
40 };
41
42 struct thread_core_local {
43 #ifdef ARM32
44 uint32_t r[2];
45 paddr_t sm_pm_ctx_phys;
46 #endif
47 #ifdef ARM64
48 uint64_t x[4];
49 #endif
50 #ifdef CFG_CORE_PAUTH
51 struct thread_pauth_keys keys;
52 #endif
53 vaddr_t tmp_stack_va_end;
54 long kcode_offset;
55 short int curr_thread;
56 uint32_t flags;
57 vaddr_t abt_stack_va_end;
58 #ifdef CFG_TEE_CORE_DEBUG
59 unsigned int locked_count; /* Number of spinlocks held */
60 #endif
61 #if defined(ARM64) && defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
62 uint8_t bhb_loop_count;
63 #endif
64 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
65 bool stackcheck_recursion;
66 #endif
67 } THREAD_CORE_LOCAL_ALIGNED;
68
69 struct thread_vector_table {
70 uint32_t std_smc_entry;
71 uint32_t fast_smc_entry;
72 uint32_t cpu_on_entry;
73 uint32_t cpu_off_entry;
74 uint32_t cpu_resume_entry;
75 uint32_t cpu_suspend_entry;
76 uint32_t fiq_entry;
77 uint32_t system_off_entry;
78 uint32_t system_reset_entry;
79 };
80
81 extern struct thread_vector_table thread_vector_table;
82
83 struct thread_user_vfp_state {
84 struct vfp_state vfp;
85 bool lazy_saved;
86 bool saved;
87 };
88
89 #ifdef ARM32
90 struct thread_smc_args {
91 uint32_t a0; /* SMC function ID */
92 uint32_t a1; /* Parameter */
93 uint32_t a2; /* Parameter */
94 uint32_t a3; /* Thread ID when returning from RPC */
95 uint32_t a4; /* Not used */
96 uint32_t a5; /* Not used */
97 uint32_t a6; /* Not used */
98 uint32_t a7; /* Hypervisor Client ID */
99 };
100 #endif /*ARM32*/
101 #ifdef ARM64
102 struct thread_smc_args {
103 uint64_t a0; /* SMC function ID */
104 uint64_t a1; /* Parameter */
105 uint64_t a2; /* Parameter */
106 uint64_t a3; /* Thread ID when returning from RPC */
107 uint64_t a4; /* Not used */
108 uint64_t a5; /* Not used */
109 uint64_t a6; /* Not used */
110 uint64_t a7; /* Hypervisor Client ID */
111 };
112 #endif /*ARM64*/
113
114 #ifdef ARM32
115 struct thread_abort_regs {
116 uint32_t usr_sp;
117 uint32_t usr_lr;
118 uint32_t pad;
119 uint32_t spsr;
120 uint32_t elr;
121 uint32_t r0;
122 uint32_t r1;
123 uint32_t r2;
124 uint32_t r3;
125 uint32_t r4;
126 uint32_t r5;
127 uint32_t r6;
128 uint32_t r7;
129 uint32_t r8;
130 uint32_t r9;
131 uint32_t r10;
132 uint32_t r11;
133 uint32_t ip;
134 };
135 #endif /*ARM32*/
136 #ifdef ARM64
137 struct thread_abort_regs {
138 uint64_t x0; /* r0_usr */
139 uint64_t x1; /* r1_usr */
140 uint64_t x2; /* r2_usr */
141 uint64_t x3; /* r3_usr */
142 uint64_t x4; /* r4_usr */
143 uint64_t x5; /* r5_usr */
144 uint64_t x6; /* r6_usr */
145 uint64_t x7; /* r7_usr */
146 uint64_t x8; /* r8_usr */
147 uint64_t x9; /* r9_usr */
148 uint64_t x10; /* r10_usr */
149 uint64_t x11; /* r11_usr */
150 uint64_t x12; /* r12_usr */
151 uint64_t x13; /* r13/sp_usr */
152 uint64_t x14; /* r14/lr_usr */
153 uint64_t x15;
154 uint64_t x16;
155 uint64_t x17;
156 uint64_t x18;
157 uint64_t x19;
158 uint64_t x20;
159 uint64_t x21;
160 uint64_t x22;
161 uint64_t x23;
162 uint64_t x24;
163 uint64_t x25;
164 uint64_t x26;
165 uint64_t x27;
166 uint64_t x28;
167 uint64_t x29;
168 uint64_t x30;
169 uint64_t elr;
170 uint64_t spsr;
171 uint64_t sp_el0;
172 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
173 uint64_t apiakey_hi;
174 uint64_t apiakey_lo;
175 #endif
176 };
177 #endif /*ARM64*/
178
179 #ifdef ARM32
180 struct thread_svc_regs {
181 uint32_t spsr;
182 uint32_t r0;
183 uint32_t r1;
184 uint32_t r2;
185 uint32_t r3;
186 uint32_t r4;
187 uint32_t r5;
188 uint32_t r6;
189 uint32_t r7;
190 uint32_t lr;
191 };
192 #endif /*ARM32*/
193 #ifdef ARM64
194 struct thread_svc_regs {
195 uint64_t elr;
196 uint64_t spsr;
197 uint64_t x0; /* r0_usr */
198 uint64_t x1; /* r1_usr */
199 uint64_t x2; /* r2_usr */
200 uint64_t x3; /* r3_usr */
201 uint64_t x4; /* r4_usr */
202 uint64_t x5; /* r5_usr */
203 uint64_t x6; /* r6_usr */
204 uint64_t x7; /* r7_usr */
205 uint64_t x8; /* r8_usr */
206 uint64_t x9; /* r9_usr */
207 uint64_t x10; /* r10_usr */
208 uint64_t x11; /* r11_usr */
209 uint64_t x12; /* r12_usr */
210 uint64_t x13; /* r13/sp_usr */
211 uint64_t x14; /* r14/lr_usr */
212 uint64_t x30;
213 uint64_t sp_el0;
214 #ifdef CFG_SECURE_PARTITION
215 uint64_t x15;
216 uint64_t x16;
217 uint64_t x17;
218 uint64_t x18;
219 uint64_t x19;
220 uint64_t x20;
221 uint64_t x21;
222 uint64_t x22;
223 uint64_t x23;
224 uint64_t x24;
225 uint64_t x25;
226 uint64_t x26;
227 uint64_t x27;
228 uint64_t x28;
229 uint64_t x29;
230 #endif
231 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
232 uint64_t apiakey_hi;
233 uint64_t apiakey_lo;
234 #endif
235 uint64_t pad;
236 } __aligned(16);
237 #endif /*ARM64*/
238
239 #ifdef ARM32
240 struct thread_ctx_regs {
241 uint32_t r0;
242 uint32_t r1;
243 uint32_t r2;
244 uint32_t r3;
245 uint32_t r4;
246 uint32_t r5;
247 uint32_t r6;
248 uint32_t r7;
249 uint32_t r8;
250 uint32_t r9;
251 uint32_t r10;
252 uint32_t r11;
253 uint32_t r12;
254 uint32_t usr_sp;
255 uint32_t usr_lr;
256 uint32_t svc_spsr;
257 uint32_t svc_sp;
258 uint32_t svc_lr;
259 uint32_t pc;
260 uint32_t cpsr;
261 };
262 #endif /*ARM32*/
263
264 #ifdef ARM64
265 struct thread_ctx_regs {
266 uint64_t sp;
267 uint64_t pc;
268 uint64_t cpsr;
269 uint64_t x[31];
270 uint64_t tpidr_el0;
271 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
272 uint64_t apiakey_hi;
273 uint64_t apiakey_lo;
274 #endif
275 };
276 #endif /*ARM64*/
277
278 struct user_mode_ctx;
279
280 #ifdef CFG_WITH_ARM_TRUSTED_FW
281 /*
282 * These five functions have a __weak default implementation which does
283 * nothing. Platforms are expected to override them if needed.
284 */
285 unsigned long thread_cpu_off_handler(unsigned long a0, unsigned long a1);
286 unsigned long thread_cpu_suspend_handler(unsigned long a0, unsigned long a1);
287 unsigned long thread_cpu_resume_handler(unsigned long a0, unsigned long a1);
288 unsigned long thread_system_off_handler(unsigned long a0, unsigned long a1);
289 unsigned long thread_system_reset_handler(unsigned long a0, unsigned long a1);
290 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
291
292 /*
293 * Defines the bits for the exception mask used by the
294 * thread_*_exceptions() functions below.
295 * These definitions are compatible with both ARM32 and ARM64.
296 */
297 #if defined(CFG_ARM_GICV3)
298 #define THREAD_EXCP_FOREIGN_INTR (ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
299 #define THREAD_EXCP_NATIVE_INTR (ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
300 #else
301 #define THREAD_EXCP_FOREIGN_INTR (ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
302 #define THREAD_EXCP_NATIVE_INTR (ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
303 #endif
304 #define THREAD_EXCP_ALL (THREAD_EXCP_FOREIGN_INTR \
305 | THREAD_EXCP_NATIVE_INTR \
306 | (ARM32_CPSR_A >> ARM32_CPSR_F_SHIFT))
307
308 #ifdef CFG_WITH_VFP
309 /*
310 * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
311 *
312 * Foreign interrupts are masked while VFP is enabled. User space must not be
313 * entered before thread_kernel_disable_vfp() has been called to disable VFP
314 * and restore the foreign interrupt status.
315 *
316 * This function may only be called from an active thread context and may
317 * not be called again before thread_kernel_disable_vfp() has been called.
318 *
319 * VFP state is saved as needed.
320 *
321 * Returns a state variable that should be passed to
322 * thread_kernel_disable_vfp().
323 */
324 uint32_t thread_kernel_enable_vfp(void);
325
326 /*
327 * thread_kernel_disable_vfp() - Disables usage of VFP
328 * @state: state variable returned by thread_kernel_enable_vfp()
329 *
330 * Disables usage of VFP and restores foreign interrupt status after a call to
331 * thread_kernel_enable_vfp().
332 *
333 * This function may only be called after a call to
334 * thread_kernel_enable_vfp().
335 */
336 void thread_kernel_disable_vfp(uint32_t state);
337
338 /*
339 * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
340 */
341 void thread_kernel_save_vfp(void);
342
343 /*
344 * thread_kernel_save_vfp() - Restores kernel vfp state
345 */
346 void thread_kernel_restore_vfp(void);
347
348 /*
349 * thread_user_enable_vfp() - Enables vfp for user mode usage
350 * @uvfp: pointer to where to save the vfp state if needed
351 */
352 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
353 #else /*CFG_WITH_VFP*/
thread_kernel_save_vfp(void)354 static inline void thread_kernel_save_vfp(void)
355 {
356 }
357
thread_kernel_restore_vfp(void)358 static inline void thread_kernel_restore_vfp(void)
359 {
360 }
361 #endif /*CFG_WITH_VFP*/
362
363 /*
364 * thread_user_save_vfp() - Saves the user vfp state if enabled
365 */
366 #ifdef CFG_WITH_VFP
367 void thread_user_save_vfp(void);
368 #else
thread_user_save_vfp(void)369 static inline void thread_user_save_vfp(void)
370 {
371 }
372 #endif
373
374 /*
375 * thread_user_clear_vfp() - Clears the vfp state
376 * @uctx: pointer to user mode context containing the saved state to clear
377 */
378 #ifdef CFG_WITH_VFP
379 void thread_user_clear_vfp(struct user_mode_ctx *uctx);
380 #else
thread_user_clear_vfp(struct user_mode_ctx * uctx __unused)381 static inline void thread_user_clear_vfp(struct user_mode_ctx *uctx __unused)
382 {
383 }
384 #endif
385
386 #ifdef ARM64
387 /*
388 * thread_get_saved_thread_sp() - Returns the saved sp of current thread
389 *
390 * When switching from the thread stack pointer the value is stored
391 * separately in the current thread context. This function returns this
392 * saved value.
393 *
394 * @returns stack pointer
395 */
396 vaddr_t thread_get_saved_thread_sp(void);
397 #endif /*ARM64*/
398
399 /*
400 * Provides addresses and size of kernel code that must be mapped while in
401 * user mode.
402 */
403 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
404 void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
405 vaddr_t *va, size_t *sz);
406 #else
thread_get_user_kcode(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)407 static inline void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
408 vaddr_t *va, size_t *sz)
409 {
410 *mobj = NULL;
411 *offset = 0;
412 *va = 0;
413 *sz = 0;
414 }
415 #endif
416
417 /*
418 * Provides addresses and size of kernel (rw) data that must be mapped
419 * while in user mode.
420 */
421 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
422 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
423 void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
424 vaddr_t *va, size_t *sz);
425 #else
thread_get_user_kdata(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)426 static inline void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
427 vaddr_t *va, size_t *sz)
428 {
429 *mobj = NULL;
430 *offset = 0;
431 *va = 0;
432 *sz = 0;
433 }
434 #endif
435
436 /*
437 * Disables and empties the prealloc RPC cache one reference at a time. If
438 * all threads are idle this function returns true and a cookie of one shm
439 * object which was removed from the cache. When the cache is empty *cookie
440 * is set to 0 and the cache is disabled else a valid cookie value. If one
441 * thread isn't idle this function returns false.
442 */
443 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
444
445 /*
446 * Enabled the prealloc RPC cache. If all threads are idle the cache is
447 * enabled and this function returns true. If one thread isn't idle this
448 * function return false.
449 */
450 bool thread_enable_prealloc_rpc_cache(void);
451
452 unsigned long thread_smc(unsigned long func_id, unsigned long a1,
453 unsigned long a2, unsigned long a3);
454 void thread_smccc(struct thread_smc_args *arg_res);
455 #endif /*__ASSEMBLER__*/
456 #endif /*__KERNEL_THREAD_ARCH_H*/
457