1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, Linaro Limited
4 * Copyright (c) 2020, Arm Limited
5 */
6
7 #include <arm.h>
8 #include <assert.h>
9 #include <kernel/abort.h>
10 #include <kernel/ldelf_syscalls.h>
11 #include <kernel/misc.h>
12 #include <kernel/panic.h>
13 #include <kernel/tee_ta_manager.h>
14 #include <kernel/thread.h>
15 #include <kernel/trace_ta.h>
16 #include <kernel/user_ta.h>
17 #include <ldelf.h>
18 #include <mm/vm.h>
19 #include <speculation_barrier.h>
20 #include <string.h>
21 #include <tee/arch_svc.h>
22 #include <tee/svc_cache.h>
23 #include <tee_syscall_numbers.h>
24 #include <tee/tee_svc_cryp.h>
25 #include <tee/tee_svc.h>
26 #include <tee/tee_svc_storage.h>
27 #include <trace.h>
28 #include <util.h>
29
30 #include "arch_svc_private.h"
31
32 #if (TRACE_LEVEL == TRACE_FLOW) && defined(CFG_TEE_CORE_TA_TRACE)
33 #define TRACE_SYSCALLS
34 #endif
35
36 struct syscall_entry {
37 syscall_t fn;
38 #ifdef TRACE_SYSCALLS
39 const char *name;
40 #endif
41 };
42
43 #ifdef TRACE_SYSCALLS
44 #define SYSCALL_ENTRY(_fn) { .fn = (syscall_t)_fn, .name = #_fn }
45 #else
46 #define SYSCALL_ENTRY(_fn) { .fn = (syscall_t)_fn }
47 #endif
48
49 /*
50 * This array is ordered according to the SYSCALL ids TEE_SCN_xxx
51 */
52 static const struct syscall_entry tee_svc_syscall_table[] = {
53 SYSCALL_ENTRY(syscall_sys_return),
54 SYSCALL_ENTRY(syscall_log),
55 SYSCALL_ENTRY(syscall_panic),
56 SYSCALL_ENTRY(syscall_get_property),
57 SYSCALL_ENTRY(syscall_get_property_name_to_index),
58 SYSCALL_ENTRY(syscall_open_ta_session),
59 SYSCALL_ENTRY(syscall_close_ta_session),
60 SYSCALL_ENTRY(syscall_invoke_ta_command),
61 SYSCALL_ENTRY(syscall_check_access_rights),
62 SYSCALL_ENTRY(syscall_get_cancellation_flag),
63 SYSCALL_ENTRY(syscall_unmask_cancellation),
64 SYSCALL_ENTRY(syscall_mask_cancellation),
65 SYSCALL_ENTRY(syscall_wait),
66 SYSCALL_ENTRY(syscall_get_time),
67 SYSCALL_ENTRY(syscall_set_ta_time),
68 SYSCALL_ENTRY(syscall_cryp_state_alloc),
69 SYSCALL_ENTRY(syscall_cryp_state_copy),
70 SYSCALL_ENTRY(syscall_cryp_state_free),
71 SYSCALL_ENTRY(syscall_hash_init),
72 SYSCALL_ENTRY(syscall_hash_update),
73 SYSCALL_ENTRY(syscall_hash_final),
74 SYSCALL_ENTRY(syscall_cipher_init),
75 SYSCALL_ENTRY(syscall_cipher_update),
76 SYSCALL_ENTRY(syscall_cipher_final),
77 SYSCALL_ENTRY(syscall_cryp_obj_get_info),
78 SYSCALL_ENTRY(syscall_cryp_obj_restrict_usage),
79 SYSCALL_ENTRY(syscall_cryp_obj_get_attr),
80 SYSCALL_ENTRY(syscall_cryp_obj_alloc),
81 SYSCALL_ENTRY(syscall_cryp_obj_close),
82 SYSCALL_ENTRY(syscall_cryp_obj_reset),
83 SYSCALL_ENTRY(syscall_cryp_obj_populate),
84 SYSCALL_ENTRY(syscall_cryp_obj_copy),
85 SYSCALL_ENTRY(syscall_cryp_derive_key),
86 SYSCALL_ENTRY(syscall_cryp_random_number_generate),
87 SYSCALL_ENTRY(syscall_authenc_init),
88 SYSCALL_ENTRY(syscall_authenc_update_aad),
89 SYSCALL_ENTRY(syscall_authenc_update_payload),
90 SYSCALL_ENTRY(syscall_authenc_enc_final),
91 SYSCALL_ENTRY(syscall_authenc_dec_final),
92 SYSCALL_ENTRY(syscall_asymm_operate),
93 SYSCALL_ENTRY(syscall_asymm_verify),
94 SYSCALL_ENTRY(syscall_storage_obj_open),
95 SYSCALL_ENTRY(syscall_storage_obj_create),
96 SYSCALL_ENTRY(syscall_storage_obj_del),
97 SYSCALL_ENTRY(syscall_storage_obj_rename),
98 SYSCALL_ENTRY(syscall_storage_alloc_enum),
99 SYSCALL_ENTRY(syscall_storage_free_enum),
100 SYSCALL_ENTRY(syscall_storage_reset_enum),
101 SYSCALL_ENTRY(syscall_storage_start_enum),
102 SYSCALL_ENTRY(syscall_storage_next_enum),
103 SYSCALL_ENTRY(syscall_storage_obj_read),
104 SYSCALL_ENTRY(syscall_storage_obj_write),
105 SYSCALL_ENTRY(syscall_storage_obj_trunc),
106 SYSCALL_ENTRY(syscall_storage_obj_seek),
107 SYSCALL_ENTRY(syscall_obj_generate_key),
108 SYSCALL_ENTRY(syscall_not_supported),
109 SYSCALL_ENTRY(syscall_not_supported),
110 SYSCALL_ENTRY(syscall_not_supported),
111 SYSCALL_ENTRY(syscall_not_supported),
112 SYSCALL_ENTRY(syscall_not_supported),
113 SYSCALL_ENTRY(syscall_not_supported),
114 SYSCALL_ENTRY(syscall_not_supported),
115 SYSCALL_ENTRY(syscall_not_supported),
116 SYSCALL_ENTRY(syscall_not_supported),
117 SYSCALL_ENTRY(syscall_not_supported),
118 SYSCALL_ENTRY(syscall_not_supported),
119 SYSCALL_ENTRY(syscall_not_supported),
120 SYSCALL_ENTRY(syscall_not_supported),
121 SYSCALL_ENTRY(syscall_not_supported),
122 SYSCALL_ENTRY(syscall_not_supported),
123 SYSCALL_ENTRY(syscall_cache_operation),
124 };
125
126 /*
127 * The ldelf return, log, panic syscalls have the same functionality and syscall
128 * number as the user TAs'. To avoid unnecessary code duplication, the ldelf SVC
129 * handler doesn't implement separate functions for these.
130 */
131 static const struct syscall_entry ldelf_syscall_table[] = {
132 SYSCALL_ENTRY(syscall_sys_return),
133 SYSCALL_ENTRY(syscall_log),
134 SYSCALL_ENTRY(syscall_panic),
135 SYSCALL_ENTRY(ldelf_syscall_map_zi),
136 SYSCALL_ENTRY(ldelf_syscall_unmap),
137 SYSCALL_ENTRY(ldelf_syscall_open_bin),
138 SYSCALL_ENTRY(ldelf_syscall_close_bin),
139 SYSCALL_ENTRY(ldelf_syscall_map_bin),
140 SYSCALL_ENTRY(ldelf_syscall_copy_from_bin),
141 SYSCALL_ENTRY(ldelf_syscall_set_prot),
142 SYSCALL_ENTRY(ldelf_syscall_remap),
143 SYSCALL_ENTRY(ldelf_syscall_gen_rnd_num),
144 };
145
146 #ifdef TRACE_SYSCALLS
trace_syscall(size_t num)147 static void trace_syscall(size_t num)
148 {
149 if (num == TEE_SCN_RETURN || num == TEE_SCN_LOG || num > TEE_SCN_MAX)
150 return;
151 FMSG("syscall #%zu (%s)", num, tee_svc_syscall_table[num].name);
152 }
153 #else
trace_syscall(size_t num __unused)154 static void trace_syscall(size_t num __unused)
155 {
156 }
157 #endif
158
159 #ifdef CFG_SYSCALL_FTRACE
ftrace_syscall_enter(size_t num)160 static void __noprof ftrace_syscall_enter(size_t num)
161 {
162 struct ts_session *s = NULL;
163
164 /*
165 * Syscalls related to inter-TA communication can't be traced in the
166 * caller TA's ftrace buffer as it involves context switching to callee
167 * TA's context. Moreover, user can enable ftrace for callee TA to dump
168 * function trace in corresponding ftrace buffer.
169 */
170 if (num == TEE_SCN_OPEN_TA_SESSION || num == TEE_SCN_CLOSE_TA_SESSION ||
171 num == TEE_SCN_INVOKE_TA_COMMAND)
172 return;
173
174 s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
175 if (s && s->fbuf)
176 s->fbuf->syscall_trace_enabled = true;
177 }
178
ftrace_syscall_leave(void)179 static void __noprof ftrace_syscall_leave(void)
180 {
181 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
182
183 if (s && s->fbuf)
184 s->fbuf->syscall_trace_enabled = false;
185 }
186 #else
ftrace_syscall_enter(size_t num __unused)187 static void __noprof ftrace_syscall_enter(size_t num __unused)
188 {
189 }
190
ftrace_syscall_leave(void)191 static void __noprof ftrace_syscall_leave(void)
192 {
193 }
194 #endif
195
196 #ifdef ARM32
get_scn_max_args(struct thread_svc_regs * regs,size_t * scn,size_t * max_args)197 static void get_scn_max_args(struct thread_svc_regs *regs, size_t *scn,
198 size_t *max_args)
199 {
200 *scn = regs->r7;
201 *max_args = regs->r6;
202 }
203 #endif /*ARM32*/
204
205 #ifdef ARM64
get_scn_max_args(struct thread_svc_regs * regs,size_t * scn,size_t * max_args)206 static void get_scn_max_args(struct thread_svc_regs *regs, size_t *scn,
207 size_t *max_args)
208 {
209 if (((regs->spsr >> SPSR_MODE_RW_SHIFT) & SPSR_MODE_RW_MASK) ==
210 SPSR_MODE_RW_32) {
211 *scn = regs->x7;
212 *max_args = regs->x6;
213 } else {
214 *scn = regs->x8;
215 *max_args = 0;
216 }
217 }
218 #endif /*ARM64*/
219
220 #ifdef ARM32
set_svc_retval(struct thread_svc_regs * regs,uint32_t ret_val)221 static void set_svc_retval(struct thread_svc_regs *regs, uint32_t ret_val)
222 {
223 regs->r0 = ret_val;
224 }
225 #endif /*ARM32*/
226
227 #ifdef ARM64
set_svc_retval(struct thread_svc_regs * regs,uint64_t ret_val)228 static void set_svc_retval(struct thread_svc_regs *regs, uint64_t ret_val)
229 {
230 regs->x0 = ret_val;
231 }
232 #endif /*ARM64*/
233
get_tee_syscall_func(size_t num)234 static syscall_t get_tee_syscall_func(size_t num)
235 {
236 /* Cast away const */
237 struct syscall_entry *sc_table = (void *)tee_svc_syscall_table;
238
239 COMPILE_TIME_ASSERT(ARRAY_SIZE(tee_svc_syscall_table) ==
240 (TEE_SCN_MAX + 1));
241
242 if (num > TEE_SCN_MAX)
243 return (syscall_t)syscall_not_supported;
244
245 return load_no_speculate(&sc_table[num].fn, &sc_table[0].fn,
246 &sc_table[TEE_SCN_MAX].fn + 1);
247 }
248
user_ta_handle_svc(struct thread_svc_regs * regs)249 bool user_ta_handle_svc(struct thread_svc_regs *regs)
250 {
251 size_t scn = 0;
252 size_t max_args = 0;
253 syscall_t scf = NULL;
254
255 get_scn_max_args(regs, &scn, &max_args);
256
257 trace_syscall(scn);
258
259 if (max_args > TEE_SVC_MAX_ARGS) {
260 DMSG("Too many arguments for SCN %zu (%zu)", scn, max_args);
261 set_svc_retval(regs, TEE_ERROR_GENERIC);
262 return true; /* return to user mode */
263 }
264
265 scf = get_tee_syscall_func(scn);
266
267 ftrace_syscall_enter(scn);
268
269 set_svc_retval(regs, tee_svc_do_call(regs, scf));
270
271 ftrace_syscall_leave();
272
273 /*
274 * Return true if we're to return to user mode,
275 * thread_svc_handler() will take care of the rest.
276 */
277 return scn != TEE_SCN_RETURN && scn != TEE_SCN_PANIC;
278 }
279
get_ldelf_syscall_func(size_t num)280 static syscall_t get_ldelf_syscall_func(size_t num)
281 {
282 /* Cast away const */
283 struct syscall_entry *sc_table = (void *)ldelf_syscall_table;
284
285 COMPILE_TIME_ASSERT(ARRAY_SIZE(ldelf_syscall_table) ==
286 (LDELF_SCN_MAX + 1));
287
288 if (num > LDELF_SCN_MAX)
289 return (syscall_t)syscall_not_supported;
290
291 return load_no_speculate(&sc_table[num].fn, &sc_table[0].fn,
292 &sc_table[LDELF_SCN_MAX].fn + 1);
293 }
294
ldelf_handle_svc(struct thread_svc_regs * regs)295 bool ldelf_handle_svc(struct thread_svc_regs *regs)
296 {
297 size_t scn = 0;
298 size_t max_args = 0;
299 syscall_t scf = NULL;
300
301 get_scn_max_args(regs, &scn, &max_args);
302
303 trace_syscall(scn);
304
305 if (max_args > TEE_SVC_MAX_ARGS) {
306 DMSG("Too many arguments for SCN %zu (%zu)", scn, max_args);
307 set_svc_retval(regs, TEE_ERROR_GENERIC);
308 return true; /* return to user mode */
309 }
310
311 scf = get_ldelf_syscall_func(scn);
312
313 ftrace_syscall_enter(scn);
314
315 set_svc_retval(regs, tee_svc_do_call(regs, scf));
316
317 ftrace_syscall_leave();
318
319 /*
320 * Return true if we're to return to user mode,
321 * thread_svc_handler() will take care of the rest.
322 */
323 return scn != LDELF_RETURN && scn != LDELF_PANIC;
324 }
325
326 #define TA32_CONTEXT_MAX_SIZE (14 * sizeof(uint32_t))
327 #define TA64_CONTEXT_MAX_SIZE (2 * sizeof(uint64_t))
328
329 #ifdef ARM32
330 #ifdef CFG_UNWIND
331 /* Get register values pushed onto the stack by _utee_panic() */
save_panic_regs_a32_ta(struct thread_specific_data * tsd,uint32_t * pushed)332 static void save_panic_regs_a32_ta(struct thread_specific_data *tsd,
333 uint32_t *pushed)
334 {
335 tsd->abort_regs = (struct thread_abort_regs){
336 .elr = pushed[0],
337 .r0 = pushed[1],
338 .r1 = pushed[2],
339 .r2 = pushed[3],
340 .r3 = pushed[4],
341 .r4 = pushed[5],
342 .r5 = pushed[6],
343 .r6 = pushed[7],
344 .r7 = pushed[8],
345 .r8 = pushed[9],
346 .r9 = pushed[10],
347 .r10 = pushed[11],
348 .r11 = pushed[12],
349 .usr_sp = (uint32_t)pushed,
350 .usr_lr = pushed[13],
351 .spsr = read_spsr(),
352 };
353 }
354
save_panic_stack(struct thread_svc_regs * regs)355 static void save_panic_stack(struct thread_svc_regs *regs)
356 {
357 struct thread_specific_data *tsd = thread_get_tsd();
358 struct ts_session *s = ts_get_current_session();
359 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
360
361 tsd->abort_type = ABORT_TYPE_USER_MODE_PANIC;
362 tsd->abort_descr = 0;
363 tsd->abort_va = 0;
364
365 if (vm_check_access_rights(&utc->uctx,
366 TEE_MEMORY_ACCESS_READ |
367 TEE_MEMORY_ACCESS_WRITE,
368 (uaddr_t)regs->r1, TA32_CONTEXT_MAX_SIZE)) {
369 TAMSG_RAW("");
370 TAMSG_RAW("Can't unwind invalid user stack 0x%" PRIxUA,
371 (uaddr_t)regs->r1);
372 return;
373 }
374
375 save_panic_regs_a32_ta(tsd, (uint32_t *)regs->r1);
376 }
377 #else /* CFG_UNWIND */
save_panic_stack(struct thread_svc_regs * regs __unused)378 static void save_panic_stack(struct thread_svc_regs *regs __unused)
379 {
380 struct thread_specific_data *tsd = thread_get_tsd();
381
382 tsd->abort_type = ABORT_TYPE_USER_MODE_PANIC;
383 }
384 #endif
385 #endif /*ARM32*/
386
387 #ifdef ARM64
388 #ifdef CFG_UNWIND
389 /* Get register values pushed onto the stack by _utee_panic() (32-bit TA) */
save_panic_regs_a32_ta(struct thread_specific_data * tsd,uint32_t * pushed)390 static void save_panic_regs_a32_ta(struct thread_specific_data *tsd,
391 uint32_t *pushed)
392 {
393 tsd->abort_regs = (struct thread_abort_regs){
394 .elr = pushed[0],
395 .x0 = pushed[1],
396 .x1 = pushed[2],
397 .x2 = pushed[3],
398 .x3 = pushed[4],
399 .x4 = pushed[5],
400 .x5 = pushed[6],
401 .x6 = pushed[7],
402 .x7 = pushed[8],
403 .x8 = pushed[9],
404 .x9 = pushed[10],
405 .x10 = pushed[11],
406 .x11 = pushed[12],
407 .x13 = (uint64_t)pushed,
408 .x14 = pushed[13],
409 .spsr = (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT),
410 };
411 }
412
413 /* Get register values pushed onto the stack by _utee_panic() (64-bit TA) */
save_panic_regs_a64_ta(struct thread_specific_data * tsd,uint64_t * pushed)414 static void save_panic_regs_a64_ta(struct thread_specific_data *tsd,
415 uint64_t *pushed)
416 {
417 tsd->abort_regs = (struct thread_abort_regs){
418 .x29 = pushed[0],
419 .elr = pushed[1],
420 .spsr = (SPSR_64_MODE_EL0 << SPSR_64_MODE_EL_SHIFT),
421 };
422 }
423
save_panic_stack(struct thread_svc_regs * regs)424 static void save_panic_stack(struct thread_svc_regs *regs)
425 {
426 struct thread_specific_data *tsd = thread_get_tsd();
427 struct ts_session *s = ts_get_current_session();
428 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
429
430 if (vm_check_access_rights(&utc->uctx,
431 TEE_MEMORY_ACCESS_READ |
432 TEE_MEMORY_ACCESS_WRITE,
433 (uaddr_t)regs->x1,
434 utc->uctx.is_32bit ?
435 TA32_CONTEXT_MAX_SIZE :
436 TA64_CONTEXT_MAX_SIZE)) {
437 TAMSG_RAW("");
438 TAMSG_RAW("Can't unwind invalid user stack 0x%" PRIxUA,
439 (uaddr_t)regs->x1);
440 return;
441 }
442
443 tsd->abort_type = ABORT_TYPE_USER_MODE_PANIC;
444 tsd->abort_descr = 0;
445 tsd->abort_va = 0;
446
447 if (utc->uctx.is_32bit)
448 save_panic_regs_a32_ta(tsd, (uint32_t *)regs->x1);
449 else
450 save_panic_regs_a64_ta(tsd, (uint64_t *)regs->x1);
451 }
452 #else /* CFG_UNWIND */
save_panic_stack(struct thread_svc_regs * regs __unused)453 static void save_panic_stack(struct thread_svc_regs *regs __unused)
454 {
455 struct thread_specific_data *tsd = thread_get_tsd();
456
457 tsd->abort_type = ABORT_TYPE_USER_MODE_PANIC;
458 }
459 #endif /* CFG_UNWIND */
460 #endif /*ARM64*/
461
tee_svc_sys_return_helper(uint32_t ret,bool panic,uint32_t panic_code,struct thread_svc_regs * regs)462 uint32_t tee_svc_sys_return_helper(uint32_t ret, bool panic,
463 uint32_t panic_code,
464 struct thread_svc_regs *regs)
465 {
466 if (panic) {
467 TAMSG_RAW("");
468 TAMSG_RAW("TA panicked with code 0x%" PRIx32, panic_code);
469 save_panic_stack(regs);
470 }
471
472 #ifdef ARM32
473 regs->r1 = panic;
474 regs->r2 = panic_code;
475 #endif
476 #ifdef ARM64
477 regs->x1 = panic;
478 regs->x2 = panic_code;
479 #endif
480
481 return ret;
482 }
483