1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2016-2017, Linaro Limited
5 * Copyright (c) 2020-2021, Arm Limited
6 */
7
8 #ifndef KERNEL_THREAD_H
9 #define KERNEL_THREAD_H
10
11 #ifndef __ASSEMBLER__
12 #include <types_ext.h>
13 #include <compiler.h>
14 #include <mm/pgt_cache.h>
15 #endif
16 #include <util.h>
17 #include <kernel/thread_arch.h>
18
19 #define THREAD_FLAGS_COPY_ARGS_ON_RETURN BIT(0)
20 #define THREAD_FLAGS_FOREIGN_INTR_ENABLE BIT(1)
21 #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR BIT(2)
22
23 #define THREAD_ID_0 0
24 #define THREAD_ID_INVALID -1
25
26 #define THREAD_RPC_MAX_NUM_PARAMS U(4)
27
28 #ifndef __ASSEMBLER__
29
30 struct thread_specific_data {
31 TAILQ_HEAD(, ts_session) sess_stack;
32 struct ts_ctx *ctx;
33 #ifdef CFG_CORE_FFA
34 uint32_t rpc_target_info;
35 #endif
36 uint32_t abort_type;
37 uint32_t abort_descr;
38 vaddr_t abort_va;
39 unsigned int abort_core;
40 struct thread_abort_regs abort_regs;
41 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
42 bool stackcheck_recursion;
43 #endif
44 unsigned int syscall_recursion;
45 #ifdef CFG_FAULT_MITIGATION
46 struct ftmn_func_arg *ftmn_arg;
47 #endif
48 };
49
50 void thread_init_canaries(void);
51 void thread_init_primary(void);
52 void thread_init_per_cpu(void);
53
54 struct thread_core_local *thread_get_core_local(void);
55
56 /*
57 * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
58 * first stack, THREAD_ID_0 + 1 for the next and so on.
59 *
60 * Returns true on success and false on errors.
61 */
62 bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
63
64 /*
65 * Initializes thread contexts. Called in thread_init_boot_thread() if
66 * virtualization is disabled. Virtualization subsystem calls it for
67 * every new guest otherwise.
68 */
69 void thread_init_threads(void);
70
71 /*
72 * Called by the init CPU. Sets temporary stack mode for all CPUs
73 * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit for
74 * the init CPU.
75 */
76 void thread_init_thread_core_local(void);
77 void thread_init_core_local_stacks(void);
78
79 #if defined(CFG_CORE_PAUTH)
80 void thread_init_thread_pauth_keys(void);
81 void thread_init_core_local_pauth_keys(void);
82 #else
thread_init_thread_pauth_keys(void)83 static inline void thread_init_thread_pauth_keys(void) { }
thread_init_core_local_pauth_keys(void)84 static inline void thread_init_core_local_pauth_keys(void) { }
85 #endif
86
87 /*
88 * Initializes a thread to be used during boot
89 */
90 void thread_init_boot_thread(void);
91
92 /*
93 * Clears the current thread id
94 * Only supposed to be used during initialization.
95 */
96 void thread_clr_boot_thread(void);
97
98 /*
99 * Returns current thread id.
100 */
101 short int thread_get_id(void);
102
103 /*
104 * Returns current thread id, return -1 on failure.
105 */
106 short int thread_get_id_may_fail(void);
107
108 /* Returns Thread Specific Data (TSD) pointer. */
109 struct thread_specific_data *thread_get_tsd(void);
110
111 /*
112 * Sets foreign interrupts status for current thread, must only be called
113 * from an active thread context.
114 *
115 * enable == true -> enable foreign interrupts
116 * enable == false -> disable foreign interrupts
117 */
118 void thread_set_foreign_intr(bool enable);
119
120 /*
121 * Restores the foreign interrupts status (in CPSR) for current thread, must
122 * only be called from an active thread context.
123 */
124 void thread_restore_foreign_intr(void);
125
126 /*
127 * thread_get_exceptions() - return current exception mask
128 */
129 uint32_t thread_get_exceptions(void);
130
131 /*
132 * thread_set_exceptions() - set exception mask
133 * @exceptions: exception mask to set
134 *
135 * Any previous exception mask is replaced by this exception mask, that is,
136 * old bits are cleared and replaced by these.
137 */
138 void thread_set_exceptions(uint32_t exceptions);
139
140 /*
141 * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
142 * @exceptions exceptions to mask
143 * @returns old exception state
144 */
145 uint32_t thread_mask_exceptions(uint32_t exceptions);
146
147 /*
148 * thread_unmask_exceptions() - Unmasks asynchronous exceptions
149 * @state Old asynchronous exception state to restore (returned by
150 * thread_mask_exceptions())
151 */
152 void thread_unmask_exceptions(uint32_t state);
153
154
thread_foreign_intr_disabled(void)155 static inline bool __nostackcheck thread_foreign_intr_disabled(void)
156 {
157 return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
158 }
159
160 /*
161 * thread_enter_user_mode() - Enters user mode
162 * @a0: Passed in r/x0 for user_func
163 * @a1: Passed in r/x1 for user_func
164 * @a2: Passed in r/x2 for user_func
165 * @a3: Passed in r/x3 for user_func
166 * @user_sp: Assigned sp value in user mode
167 * @user_func: Function to execute in user mode
168 * @is_32bit: True if TA should execute in Aarch32, false if Aarch64
169 * @exit_status0: Pointer to opaque exit staus 0
170 * @exit_status1: Pointer to opaque exit staus 1
171 *
172 * This functions enters user mode with the argument described above,
173 * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
174 * when returning back to the caller of this function through an exception
175 * handler.
176 *
177 * @Returns what's passed in "ret" to thread_unwind_user_mode()
178 */
179 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
180 unsigned long a2, unsigned long a3, unsigned long user_sp,
181 unsigned long entry_func, bool is_32bit,
182 uint32_t *exit_status0, uint32_t *exit_status1);
183
184 /*
185 * thread_unwind_user_mode() - Unwinds kernel stack from user entry
186 * @ret: Value to return from thread_enter_user_mode()
187 * @exit_status0: Exit status 0
188 * @exit_status1: Exit status 1
189 *
190 * This is the function that exception handlers can return into
191 * to resume execution in kernel mode instead of user mode.
192 *
193 * This function is closely coupled with thread_enter_user_mode() since it
194 * need to restore registers saved by thread_enter_user_mode() and when it
195 * returns make it look like thread_enter_user_mode() just returned. It is
196 * expected that the stack pointer is where thread_enter_user_mode() left
197 * it. The stack will be unwound and the function will return to where
198 * thread_enter_user_mode() was called from. Exit_status0 and exit_status1
199 * are filled in the corresponding pointers supplied to
200 * thread_enter_user_mode().
201 */
202 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
203 uint32_t exit_status1);
204
205 /*
206 * Returns the start address (bottom) of the stack for the current thread,
207 * zero if there is no current thread.
208 */
209 vaddr_t thread_stack_start(void);
210
211
212 /* Returns the stack size for the current thread */
213 size_t thread_stack_size(void);
214
215 /*
216 * Returns the start (top, lowest address) and end (bottom, highest address) of
217 * the current stack (thread, temporary or abort stack).
218 * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
219 * soft limits are queried. The difference between soft and hard is that for the
220 * latter, the stack start includes some additional space to let any function
221 * overflow the soft limit and still be able to print a stack dump in this case.
222 */
223 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
224
get_stack_soft_limits(vaddr_t * start,vaddr_t * end)225 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
226 vaddr_t *end)
227 {
228 return get_stack_limits(start, end, false);
229 }
230
get_stack_hard_limits(vaddr_t * start,vaddr_t * end)231 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
232 vaddr_t *end)
233 {
234 return get_stack_limits(start, end, true);
235 }
236
237 bool thread_is_in_normal_mode(void);
238
239 /*
240 * Returns true if previous exeception also was in abort mode.
241 *
242 * Note: it's only valid to call this function from an abort exception
243 * handler before interrupts has been re-enabled.
244 */
245 bool thread_is_from_abort_mode(void);
246
247 /**
248 * Allocates data for payload buffers.
249 *
250 * @size: size in bytes of payload buffer
251 *
252 * @returns mobj that describes allocated buffer or NULL on error
253 */
254 struct mobj *thread_rpc_alloc_payload(size_t size);
255
256 /**
257 * Free physical memory previously allocated with thread_rpc_alloc_payload()
258 *
259 * @mobj: mobj that describes the buffer
260 */
261 void thread_rpc_free_payload(struct mobj *mobj);
262
263 /**
264 * Allocate data for payload buffers only shared with the non-secure kernel
265 *
266 * @size: size in bytes of payload buffer
267 *
268 * @returns mobj that describes allocated buffer or NULL on error
269 */
270 struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
271
272 /**
273 * Free physical memory previously allocated with
274 * thread_rpc_alloc_kernel_payload()
275 *
276 * @mobj: mobj that describes the buffer
277 */
278 void thread_rpc_free_kernel_payload(struct mobj *mobj);
279
280 struct thread_param_memref {
281 size_t offs;
282 size_t size;
283 struct mobj *mobj;
284 };
285
286 struct thread_param_value {
287 uint64_t a;
288 uint64_t b;
289 uint64_t c;
290 };
291
292 /*
293 * Note that there's some arithmetics done on the value so it's important
294 * to keep in IN, OUT, INOUT order.
295 */
296 enum thread_param_attr {
297 THREAD_PARAM_ATTR_NONE = 0,
298 THREAD_PARAM_ATTR_VALUE_IN,
299 THREAD_PARAM_ATTR_VALUE_OUT,
300 THREAD_PARAM_ATTR_VALUE_INOUT,
301 THREAD_PARAM_ATTR_MEMREF_IN,
302 THREAD_PARAM_ATTR_MEMREF_OUT,
303 THREAD_PARAM_ATTR_MEMREF_INOUT,
304 };
305
306 struct thread_param {
307 enum thread_param_attr attr;
308 union {
309 struct thread_param_memref memref;
310 struct thread_param_value value;
311 } u;
312 };
313
314 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
315 (struct thread_param){ \
316 .attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
317 .mobj = (_mobj), .offs = (_offs), .size = (_size) } \
318 }
319
320 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
321 (struct thread_param){ \
322 .attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
323 .a = (_a), .b = (_b), .c = (_c) } \
324 }
325
326 /**
327 * Does an RPC using a preallocated argument buffer
328 * @cmd: RPC cmd
329 * @num_params: number of parameters
330 * @params: RPC parameters
331 * @returns RPC return value
332 */
333 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
334 struct thread_param *params);
335
336 /**
337 * Allocate data for payload buffers.
338 * Buffer is exported to user mode applications.
339 *
340 * @size: size in bytes of payload buffer
341 *
342 * @returns mobj that describes allocated buffer or NULL on error
343 */
344 struct mobj *thread_rpc_alloc_global_payload(size_t size);
345
346 /**
347 * Free physical memory previously allocated with
348 * thread_rpc_alloc_global_payload()
349 *
350 * @mobj: mobj that describes the buffer
351 */
352 void thread_rpc_free_global_payload(struct mobj *mobj);
353
354 /*
355 * enum thread_shm_type - type of non-secure shared memory
356 * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
357 * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
358 * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
359 */
360 enum thread_shm_type {
361 THREAD_SHM_TYPE_APPLICATION,
362 THREAD_SHM_TYPE_KERNEL_PRIVATE,
363 THREAD_SHM_TYPE_GLOBAL,
364 };
365
366 /*
367 * enum thread_shm_cache_user - user of a cache allocation
368 * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
369 * @THREAD_SHM_CACHE_USER_FS - filesystem access
370 * @THREAD_SHM_CACHE_USER_I2C - I2C communication
371 *
372 * To ensure that each user of the shared memory cache doesn't interfere
373 * with each other a unique ID per user is used.
374 */
375 enum thread_shm_cache_user {
376 THREAD_SHM_CACHE_USER_SOCKET,
377 THREAD_SHM_CACHE_USER_FS,
378 THREAD_SHM_CACHE_USER_I2C,
379 };
380
381 /*
382 * Returns a pointer to the cached RPC memory. Each thread and @user tuple
383 * has a unique cache. The pointer is guaranteed to point to a large enough
384 * area or to be NULL.
385 */
386 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
387 enum thread_shm_type shm_type,
388 size_t size, struct mobj **mobj);
389
390 #endif /*__ASSEMBLER__*/
391
392 #endif /*KERNEL_THREAD_H*/
393