1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2021 Linaro Limited
5  * Copyright (c) 2020, Arm Limited.
6  */
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <ctype.h>
12 #include <initcall.h>
13 #include <keep.h>
14 #include <kernel/ldelf_loader.h>
15 #include <kernel/linker.h>
16 #include <kernel/panic.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/ts_store.h>
21 #include <kernel/user_access.h>
22 #include <kernel/user_mode_ctx.h>
23 #include <kernel/user_ta.h>
24 #include <mm/core_memprot.h>
25 #include <mm/core_mmu.h>
26 #include <mm/file.h>
27 #include <mm/fobj.h>
28 #include <mm/mobj.h>
29 #include <mm/pgt_cache.h>
30 #include <mm/tee_mm.h>
31 #include <mm/tee_pager.h>
32 #include <mm/vm.h>
33 #include <optee_rpc_cmd.h>
34 #include <printk.h>
35 #include <signed_hdr.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <sys/queue.h>
39 #include <ta_pub_key.h>
40 #include <tee/arch_svc.h>
41 #include <tee/tee_cryp_utl.h>
42 #include <tee/tee_obj.h>
43 #include <tee/tee_svc_cryp.h>
44 #include <tee/tee_svc.h>
45 #include <tee/tee_svc_storage.h>
46 #include <tee/uuid.h>
47 #include <trace.h>
48 #include <types_ext.h>
49 #include <utee_defines.h>
50 #include <util.h>
51 
init_utee_param(struct utee_params * up,const struct tee_ta_param * p,void * va[TEE_NUM_PARAMS])52 static void init_utee_param(struct utee_params *up,
53 			const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS])
54 {
55 	size_t n;
56 
57 	up->types = p->types;
58 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
59 		uintptr_t a;
60 		uintptr_t b;
61 
62 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
63 		case TEE_PARAM_TYPE_MEMREF_INPUT:
64 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
65 		case TEE_PARAM_TYPE_MEMREF_INOUT:
66 			a = (uintptr_t)va[n];
67 			b = p->u[n].mem.size;
68 			break;
69 		case TEE_PARAM_TYPE_VALUE_INPUT:
70 		case TEE_PARAM_TYPE_VALUE_INOUT:
71 			a = p->u[n].val.a;
72 			b = p->u[n].val.b;
73 			break;
74 		default:
75 			a = 0;
76 			b = 0;
77 			break;
78 		}
79 		/* See comment for struct utee_params in utee_types.h */
80 		up->vals[n * 2] = a;
81 		up->vals[n * 2 + 1] = b;
82 	}
83 }
84 
update_from_utee_param(struct tee_ta_param * p,const struct utee_params * up)85 static void update_from_utee_param(struct tee_ta_param *p,
86 			const struct utee_params *up)
87 {
88 	size_t n;
89 
90 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
91 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
92 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
93 		case TEE_PARAM_TYPE_MEMREF_INOUT:
94 			/* See comment for struct utee_params in utee_types.h */
95 			p->u[n].mem.size = up->vals[n * 2 + 1];
96 			break;
97 		case TEE_PARAM_TYPE_VALUE_OUTPUT:
98 		case TEE_PARAM_TYPE_VALUE_INOUT:
99 			/* See comment for struct utee_params in utee_types.h */
100 			p->u[n].val.a = up->vals[n * 2];
101 			p->u[n].val.b = up->vals[n * 2 + 1];
102 			break;
103 		default:
104 			break;
105 		}
106 	}
107 }
108 
inc_recursion(void)109 static bool inc_recursion(void)
110 {
111 	struct thread_specific_data *tsd = thread_get_tsd();
112 
113 	if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
114 		DMSG("Maximum allowed recursion depth reached (%u)",
115 		     CFG_CORE_MAX_SYSCALL_RECURSION);
116 		return false;
117 	}
118 
119 	tsd->syscall_recursion++;
120 	return true;
121 }
122 
dec_recursion(void)123 static void dec_recursion(void)
124 {
125 	struct thread_specific_data *tsd = thread_get_tsd();
126 
127 	assert(tsd->syscall_recursion);
128 	tsd->syscall_recursion--;
129 }
130 
user_ta_enter(struct ts_session * session,enum utee_entry_func func,uint32_t cmd)131 static TEE_Result user_ta_enter(struct ts_session *session,
132 				enum utee_entry_func func, uint32_t cmd)
133 {
134 	TEE_Result res = TEE_SUCCESS;
135 	struct utee_params *usr_params = NULL;
136 	uaddr_t usr_stack = 0;
137 	struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
138 	struct tee_ta_session *ta_sess = to_ta_session(session);
139 	struct ts_session *ts_sess __maybe_unused = NULL;
140 	void *param_va[TEE_NUM_PARAMS] = { NULL };
141 
142 	if (!inc_recursion()) {
143 		/* Using this error code since we've run out of resources. */
144 		res = TEE_ERROR_OUT_OF_MEMORY;
145 		goto out_clr_cancel;
146 	}
147 	if (ta_sess->param) {
148 		/* Map user space memory */
149 		res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
150 		if (res != TEE_SUCCESS)
151 			goto out;
152 	}
153 
154 	/* Switch to user ctx */
155 	ts_push_current_session(session);
156 
157 	/* Make room for usr_params at top of stack */
158 	usr_stack = utc->uctx.stack_ptr;
159 	usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
160 	usr_params = (struct utee_params *)usr_stack;
161 	if (ta_sess->param)
162 		init_utee_param(usr_params, ta_sess->param, param_va);
163 	else
164 		memset(usr_params, 0, sizeof(*usr_params));
165 
166 	res = thread_enter_user_mode(func, kaddr_to_uref(session),
167 				     (vaddr_t)usr_params, cmd, usr_stack,
168 				     utc->uctx.entry_func, utc->uctx.is_32bit,
169 				     &utc->ta_ctx.panicked,
170 				     &utc->ta_ctx.panic_code);
171 
172 	thread_user_clear_vfp(&utc->uctx);
173 
174 	if (utc->ta_ctx.panicked) {
175 		abort_print_current_ts();
176 		DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
177 		     utc->ta_ctx.panic_code);
178 		res = TEE_ERROR_TARGET_DEAD;
179 	} else {
180 		/*
181 		 * According to GP spec the origin should allways be set to
182 		 * the TA after TA execution
183 		 */
184 		ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
185 	}
186 
187 	if (ta_sess->param) {
188 		/* Copy out value results */
189 		update_from_utee_param(ta_sess->param, usr_params);
190 
191 		/*
192 		 * Clear out the parameter mappings added with
193 		 * vm_clean_param() above.
194 		 */
195 		vm_clean_param(&utc->uctx);
196 	}
197 
198 
199 	ts_sess = ts_pop_current_session();
200 	assert(ts_sess == session);
201 
202 out:
203 	dec_recursion();
204 out_clr_cancel:
205 	/*
206 	 * Clear the cancel state now that the user TA has returned. The next
207 	 * time the TA will be invoked will be with a new operation and should
208 	 * not have an old cancellation pending.
209 	 */
210 	ta_sess->cancel = false;
211 
212 	return res;
213 }
214 
user_ta_enter_open_session(struct ts_session * s)215 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
216 {
217 	return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
218 }
219 
user_ta_enter_invoke_cmd(struct ts_session * s,uint32_t cmd)220 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
221 {
222 	return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
223 }
224 
user_ta_enter_close_session(struct ts_session * s)225 static void user_ta_enter_close_session(struct ts_session *s)
226 {
227 	/* Only if the TA was fully initialized by ldelf */
228 	if (!to_user_ta_ctx(s->ctx)->uctx.is_initializing)
229 		user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
230 }
231 
232 #if defined(CFG_TA_STATS)
user_ta_enter_dump_memstats(struct ts_session * s)233 static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
234 {
235 	return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
236 }
237 #endif
238 
dump_state_no_ldelf_dbg(struct user_ta_ctx * utc)239 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
240 {
241 	user_mode_ctx_print_mappings(&utc->uctx);
242 }
243 
user_ta_dump_state(struct ts_ctx * ctx)244 static void user_ta_dump_state(struct ts_ctx *ctx)
245 {
246 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
247 
248 	if (utc->uctx.dump_entry_func) {
249 		TEE_Result res = ldelf_dump_state(&utc->uctx);
250 
251 		if (!res || res == TEE_ERROR_TARGET_DEAD)
252 			return;
253 		/*
254 		 * Fall back to dump_state_no_ldelf_dbg() if
255 		 * ldelf_dump_state() fails for some reason.
256 		 *
257 		 * If ldelf_dump_state() failed with panic
258 		 * we are done since abort_print_current_ts() will be
259 		 * called which will dump the memory map.
260 		 */
261 	}
262 
263 	dump_state_no_ldelf_dbg(utc);
264 }
265 
266 #ifdef CFG_FTRACE_SUPPORT
user_ta_dump_ftrace(struct ts_ctx * ctx)267 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
268 {
269 	uint32_t prot = TEE_MATTR_URW;
270 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
271 	struct thread_param params[3] = { };
272 	TEE_Result res = TEE_SUCCESS;
273 	struct mobj *mobj = NULL;
274 	uint8_t *ubuf = NULL;
275 	void *buf = NULL;
276 	size_t pl_sz = 0;
277 	size_t blen = 0, ld_addr_len = 0;
278 	vaddr_t va = 0;
279 
280 	res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
281 	if (res != TEE_ERROR_SHORT_BUFFER)
282 		return;
283 
284 #define LOAD_ADDR_DUMP_SIZE	64
285 	pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
286 			SMALL_PAGE_SIZE);
287 
288 	mobj = thread_rpc_alloc_payload(pl_sz);
289 	if (!mobj) {
290 		EMSG("Ftrace thread_rpc_alloc_payload failed");
291 		return;
292 	}
293 
294 	buf = mobj_get_va(mobj, 0, pl_sz);
295 	if (!buf)
296 		goto out_free_pl;
297 
298 	res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
299 		     mobj, 0);
300 	if (res)
301 		goto out_free_pl;
302 
303 	ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
304 	memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
305 	ubuf += sizeof(TEE_UUID);
306 
307 	ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
308 			       "TEE load address @ %#"PRIxVA"\n",
309 			       VCORE_START_VA);
310 	ubuf += ld_addr_len;
311 
312 	res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
313 	if (res) {
314 		EMSG("Ftrace dump failed: %#"PRIx32, res);
315 		goto out_unmap_pl;
316 	}
317 
318 	params[0] = THREAD_PARAM_VALUE(INOUT, 0, 0, 0);
319 	params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
320 	params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
321 					blen + ld_addr_len);
322 
323 	res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
324 	if (res)
325 		EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
326 
327 out_unmap_pl:
328 	res = vm_unmap(&utc->uctx, va, mobj->size);
329 	assert(!res);
330 out_free_pl:
331 	thread_rpc_free_payload(mobj);
332 }
333 #endif /*CFG_FTRACE_SUPPORT*/
334 
335 #ifdef CFG_TA_GPROF_SUPPORT
user_ta_gprof_set_status(enum ts_gprof_status status)336 static void user_ta_gprof_set_status(enum ts_gprof_status status)
337 {
338 	if (status == TS_GPROF_SUSPEND)
339 		tee_ta_update_session_utime_suspend();
340 	else
341 		tee_ta_update_session_utime_resume();
342 }
343 #endif /*CFG_TA_GPROF_SUPPORT*/
344 
free_utc(struct user_ta_ctx * utc)345 static void free_utc(struct user_ta_ctx *utc)
346 {
347 
348 	/*
349 	 * Close sessions opened by this TA
350 	 * Note that tee_ta_close_session() removes the item
351 	 * from the utc->open_sessions list.
352 	 */
353 	while (!TAILQ_EMPTY(&utc->open_sessions)) {
354 		tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
355 				     &utc->open_sessions, KERN_IDENTITY);
356 	}
357 
358 	vm_info_final(&utc->uctx);
359 
360 	/* Free cryp states created by this TA */
361 	tee_svc_cryp_free_states(utc);
362 	/* Close cryp objects opened by this TA */
363 	tee_obj_close_all(utc);
364 	/* Free emums created by this TA */
365 	tee_svc_storage_close_all_enum(utc);
366 	free(utc);
367 }
368 
user_ta_ctx_destroy(struct ts_ctx * ctx)369 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
370 {
371 	free_utc(to_user_ta_ctx(ctx));
372 }
373 
user_ta_get_instance_id(struct ts_ctx * ctx)374 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
375 {
376 	return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
377 }
378 
379 /*
380  * Note: this variable is weak just to ease breaking its dependency chain
381  * when added to the unpaged area.
382  */
383 const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
384 	.enter_open_session = user_ta_enter_open_session,
385 	.enter_invoke_cmd = user_ta_enter_invoke_cmd,
386 	.enter_close_session = user_ta_enter_close_session,
387 #if defined(CFG_TA_STATS)
388 	.dump_mem_stats = user_ta_enter_dump_memstats,
389 #endif
390 	.dump_state = user_ta_dump_state,
391 #ifdef CFG_FTRACE_SUPPORT
392 	.dump_ftrace = user_ta_dump_ftrace,
393 #endif
394 	.destroy = user_ta_ctx_destroy,
395 	.get_instance_id = user_ta_get_instance_id,
396 	.handle_svc = user_ta_handle_svc,
397 #ifdef CFG_TA_GPROF_SUPPORT
398 	.gprof_set_status = user_ta_gprof_set_status,
399 #endif
400 };
401 
set_ta_ctx_ops(struct tee_ta_ctx * ctx)402 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
403 {
404 	ctx->ts_ctx.ops = &user_ta_ops;
405 }
406 
is_user_ta_ctx(struct ts_ctx * ctx)407 bool is_user_ta_ctx(struct ts_ctx *ctx)
408 {
409 	return ctx && ctx->ops == &user_ta_ops;
410 }
411 
check_ta_store(void)412 static TEE_Result check_ta_store(void)
413 {
414 	const struct ts_store_ops *op = NULL;
415 
416 	SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
417 		DMSG("TA store: \"%s\"", op->description);
418 
419 	return TEE_SUCCESS;
420 }
421 service_init(check_ta_store);
422 
tee_ta_init_user_ta_session(const TEE_UUID * uuid,struct tee_ta_session * s)423 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
424 				       struct tee_ta_session *s)
425 {
426 	TEE_Result res = TEE_SUCCESS;
427 	struct user_ta_ctx *utc = NULL;
428 
429 	utc = calloc(1, sizeof(struct user_ta_ctx));
430 	if (!utc)
431 		return TEE_ERROR_OUT_OF_MEMORY;
432 
433 	TAILQ_INIT(&utc->open_sessions);
434 	TAILQ_INIT(&utc->cryp_states);
435 	TAILQ_INIT(&utc->objects);
436 	TAILQ_INIT(&utc->storage_enums);
437 	condvar_init(&utc->ta_ctx.busy_cv);
438 	utc->ta_ctx.ref_count = 1;
439 
440 	/*
441 	 * Set context TA operation structure. It is required by generic
442 	 * implementation to identify userland TA versus pseudo TA contexts.
443 	 */
444 	set_ta_ctx_ops(&utc->ta_ctx);
445 
446 	utc->ta_ctx.ts_ctx.uuid = *uuid;
447 	res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
448 	if (res)
449 		goto out;
450 	utc->uctx.is_initializing = true;
451 
452 #ifdef CFG_TA_PAUTH
453 	crypto_rng_read(&utc->uctx.keys, sizeof(utc->uctx.keys));
454 #endif
455 
456 	mutex_lock(&tee_ta_mutex);
457 	s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
458 	s->ts_sess.handle_svc = s->ts_sess.ctx->ops->handle_svc;
459 	/*
460 	 * Another thread trying to load this same TA may need to wait
461 	 * until this context is fully initialized. This is needed to
462 	 * handle single instance TAs.
463 	 */
464 	TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
465 	mutex_unlock(&tee_ta_mutex);
466 
467 	/*
468 	 * We must not hold tee_ta_mutex while allocating page tables as
469 	 * that may otherwise lead to a deadlock.
470 	 */
471 	ts_push_current_session(&s->ts_sess);
472 
473 	res = ldelf_load_ldelf(&utc->uctx);
474 	if (!res)
475 		res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
476 
477 	ts_pop_current_session();
478 
479 	mutex_lock(&tee_ta_mutex);
480 
481 	if (!res) {
482 		utc->uctx.is_initializing = false;
483 	} else {
484 		s->ts_sess.ctx = NULL;
485 		TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
486 	}
487 
488 	/* The state has changed for the context, notify eventual waiters. */
489 	condvar_broadcast(&tee_ta_init_cv);
490 
491 	mutex_unlock(&tee_ta_mutex);
492 
493 out:
494 	if (res) {
495 		condvar_destroy(&utc->ta_ctx.busy_cv);
496 		free_utc(utc);
497 	}
498 
499 	return res;
500 }
501