1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2019-2021, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <compiler.h>
8 #include <config.h>
9 #include <io.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/notif.h>
13 #include <kernel/thread.h>
14 #include <kernel/thread_private.h>
15 #include <kernel/virtualization.h>
16 #include <mm/core_mmu.h>
17 #include <optee_msg.h>
18 #include <optee_rpc_cmd.h>
19 #include <sm/optee_smc.h>
20 #include <sm/sm.h>
21 #include <string.h>
22 #include <tee/entry_fast.h>
23 #include <tee/entry_std.h>
24 #include <tee/tee_cryp_utl.h>
25 #include <tee/tee_fs_rpc.h>
26
27 static bool thread_prealloc_rpc_cache;
28 static unsigned int thread_rpc_pnum;
29
30 static_assert(NOTIF_VALUE_DO_BOTTOM_HALF ==
31 OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF);
32
thread_handle_fast_smc(struct thread_smc_args * args)33 void thread_handle_fast_smc(struct thread_smc_args *args)
34 {
35 thread_check_canaries();
36
37 if (IS_ENABLED(CFG_VIRTUALIZATION) &&
38 virt_set_guest(args->a7)) {
39 args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
40 goto out;
41 }
42
43 tee_entry_fast(args);
44
45 if (IS_ENABLED(CFG_VIRTUALIZATION))
46 virt_unset_guest();
47
48 out:
49 /* Fast handlers must not unmask any exceptions */
50 assert(thread_get_exceptions() == THREAD_EXCP_ALL);
51 }
52
thread_handle_std_smc(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6 __unused,uint32_t a7 __maybe_unused)53 uint32_t thread_handle_std_smc(uint32_t a0, uint32_t a1, uint32_t a2,
54 uint32_t a3, uint32_t a4, uint32_t a5,
55 uint32_t a6 __unused, uint32_t a7 __maybe_unused)
56 {
57 uint32_t rv = OPTEE_SMC_RETURN_OK;
58
59 thread_check_canaries();
60
61 if (IS_ENABLED(CFG_VIRTUALIZATION) && virt_set_guest(a7))
62 return OPTEE_SMC_RETURN_ENOTAVAIL;
63
64 /*
65 * thread_resume_from_rpc() and thread_alloc_and_run() only return
66 * on error. Successful return is done via thread_exit() or
67 * thread_rpc().
68 */
69 if (a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) {
70 thread_resume_from_rpc(a3, a1, a2, a4, a5);
71 rv = OPTEE_SMC_RETURN_ERESUME;
72 } else {
73 thread_alloc_and_run(a0, a1, a2, a3, 0, 0);
74 rv = OPTEE_SMC_RETURN_ETHREAD_LIMIT;
75 }
76
77 if (IS_ENABLED(CFG_VIRTUALIZATION))
78 virt_unset_guest();
79
80 return rv;
81 }
82
83 /**
84 * Free physical memory previously allocated with thread_rpc_alloc_arg()
85 *
86 * @cookie: cookie received when allocating the buffer
87 */
thread_rpc_free_arg(uint64_t cookie)88 static void thread_rpc_free_arg(uint64_t cookie)
89 {
90 if (cookie) {
91 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
92 OPTEE_SMC_RETURN_RPC_FREE
93 };
94
95 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2);
96 thread_rpc(rpc_args);
97 }
98 }
99
get_msg_arg(struct mobj * mobj,size_t offset,size_t * num_params,struct optee_msg_arg ** arg,struct optee_msg_arg ** rpc_arg)100 static uint32_t get_msg_arg(struct mobj *mobj, size_t offset,
101 size_t *num_params, struct optee_msg_arg **arg,
102 struct optee_msg_arg **rpc_arg)
103 {
104 void *p = NULL;
105 size_t sz = 0;
106
107 if (!mobj)
108 return OPTEE_SMC_RETURN_EBADADDR;
109
110 p = mobj_get_va(mobj, offset, sizeof(struct optee_msg_arg));
111 if (!p || !IS_ALIGNED_WITH_TYPE(p, struct optee_msg_arg))
112 return OPTEE_SMC_RETURN_EBADADDR;
113
114 *arg = p;
115 *num_params = READ_ONCE((*arg)->num_params);
116 if (*num_params > OPTEE_MSG_MAX_NUM_PARAMS)
117 return OPTEE_SMC_RETURN_EBADADDR;
118
119 sz = OPTEE_MSG_GET_ARG_SIZE(*num_params);
120 if (!mobj_get_va(mobj, offset, sz))
121 return OPTEE_SMC_RETURN_EBADADDR;
122
123 if (rpc_arg) {
124 size_t rpc_sz = 0;
125
126 rpc_sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
127 p = mobj_get_va(mobj, offset + sz, rpc_sz);
128 if (!p)
129 return OPTEE_SMC_RETURN_EBADADDR;
130 *rpc_arg = p;
131 }
132
133 return OPTEE_SMC_RETURN_OK;
134 }
135
clear_prealloc_rpc_cache(struct thread_ctx * thr)136 static void clear_prealloc_rpc_cache(struct thread_ctx *thr)
137 {
138 thread_rpc_free_arg(mobj_get_cookie(thr->rpc_mobj));
139 mobj_put(thr->rpc_mobj);
140 thr->rpc_arg = NULL;
141 thr->rpc_mobj = NULL;
142 }
143
call_entry_std(struct optee_msg_arg * arg,size_t num_params,struct optee_msg_arg * rpc_arg)144 static uint32_t call_entry_std(struct optee_msg_arg *arg, size_t num_params,
145 struct optee_msg_arg *rpc_arg)
146 {
147 struct thread_ctx *thr = threads + thread_get_id();
148 uint32_t rv = 0;
149
150 if (rpc_arg) {
151 /*
152 * In case the prealloc RPC arg cache is enabled, clear the
153 * cached object for this thread.
154 *
155 * Normally it doesn't make sense to have the prealloc RPC
156 * arg cache enabled together with a supplied RPC arg
157 * struct. But if it is we must use the supplied struct and
158 * at the same time make sure to not break anything.
159 */
160 if (IS_ENABLED(CFG_PREALLOC_RPC_CACHE) &&
161 thread_prealloc_rpc_cache)
162 clear_prealloc_rpc_cache(thr);
163 thr->rpc_arg = rpc_arg;
164 }
165
166 if (tee_entry_std(arg, num_params))
167 rv = OPTEE_SMC_RETURN_EBADCMD;
168 else
169 rv = OPTEE_SMC_RETURN_OK;
170
171 thread_rpc_shm_cache_clear(&thr->shm_cache);
172 if (rpc_arg)
173 thr->rpc_arg = NULL;
174
175 if (rv == OPTEE_SMC_RETURN_OK &&
176 !(IS_ENABLED(CFG_PREALLOC_RPC_CACHE) && thread_prealloc_rpc_cache))
177 clear_prealloc_rpc_cache(thr);
178
179 return rv;
180 }
181
std_entry_with_parg(paddr_t parg,bool with_rpc_arg)182 static uint32_t std_entry_with_parg(paddr_t parg, bool with_rpc_arg)
183 {
184 size_t sz = sizeof(struct optee_msg_arg);
185 struct optee_msg_arg *rpc_arg = NULL;
186 struct optee_msg_arg *arg = NULL;
187 struct mobj *mobj = NULL;
188 size_t num_params = 0;
189 uint32_t rv = 0;
190
191 /* Check if this region is in static shared space */
192 if (core_pbuf_is(CORE_MEM_NSEC_SHM, parg, sz)) {
193 if (!IS_ALIGNED_WITH_TYPE(parg, struct optee_msg_arg))
194 goto bad_addr;
195
196 arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM,
197 sizeof(struct optee_msg_arg));
198 if (!arg)
199 goto bad_addr;
200
201 num_params = READ_ONCE(arg->num_params);
202 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
203 return OPTEE_SMC_RETURN_EBADADDR;
204
205 sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
206 if (with_rpc_arg) {
207 rpc_arg = (void *)((uint8_t *)arg + sz);
208 sz += OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
209 }
210 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, parg, sz))
211 goto bad_addr;
212
213 return call_entry_std(arg, num_params, rpc_arg);
214 } else {
215 if (parg & SMALL_PAGE_MASK)
216 goto bad_addr;
217 /*
218 * mobj_mapped_shm_alloc checks if parg resides in nonsec
219 * ddr.
220 */
221 mobj = mobj_mapped_shm_alloc(&parg, 1, 0, 0);
222 if (!mobj)
223 goto bad_addr;
224 if (with_rpc_arg)
225 rv = get_msg_arg(mobj, 0, &num_params, &arg, &rpc_arg);
226 else
227 rv = get_msg_arg(mobj, 0, &num_params, &arg, NULL);
228 if (!rv)
229 rv = call_entry_std(arg, num_params, rpc_arg);
230 mobj_put(mobj);
231 return rv;
232 }
233
234 bad_addr:
235 EMSG("Bad arg address 0x%"PRIxPA, parg);
236 return OPTEE_SMC_RETURN_EBADADDR;
237 }
238
std_entry_with_regd_arg(uint64_t cookie,size_t offset)239 static uint32_t std_entry_with_regd_arg(uint64_t cookie, size_t offset)
240 {
241 struct optee_msg_arg *rpc_arg = NULL;
242 struct optee_msg_arg *arg = NULL;
243 size_t num_params = 0;
244 struct mobj *mobj = NULL;
245 uint32_t rv = 0;
246
247 mobj = mobj_reg_shm_get_by_cookie(cookie);
248 if (!mobj) {
249 EMSG("Bad arg cookie 0x%"PRIx64, cookie);
250 return OPTEE_SMC_RETURN_EBADADDR;
251 }
252
253 if (mobj_inc_map(mobj)) {
254 rv = OPTEE_SMC_RETURN_ENOMEM;
255 goto out;
256 }
257
258 rv = get_msg_arg(mobj, offset, &num_params, &arg, &rpc_arg);
259 if (!rv)
260 rv = call_entry_std(arg, num_params, rpc_arg);
261
262 mobj_dec_map(mobj);
263 out:
264 mobj_put(mobj);
265
266 return rv;
267 }
268
std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3 __unused)269 static uint32_t std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
270 uint32_t a3 __unused)
271 {
272 const bool with_rpc_arg = true;
273
274 switch (a0) {
275 case OPTEE_SMC_CALL_WITH_ARG:
276 return std_entry_with_parg(reg_pair_to_64(a1, a2),
277 !with_rpc_arg);
278 case OPTEE_SMC_CALL_WITH_RPC_ARG:
279 return std_entry_with_parg(reg_pair_to_64(a1, a2),
280 with_rpc_arg);
281 case OPTEE_SMC_CALL_WITH_REGD_ARG:
282 return std_entry_with_regd_arg(reg_pair_to_64(a1, a2), a3);
283 default:
284 EMSG("Unknown SMC 0x%"PRIx32, a0);
285 return OPTEE_SMC_RETURN_EBADCMD;
286 }
287 }
288
289 /*
290 * Helper routine for the assembly function thread_std_smc_entry()
291 *
292 * Note: this function is weak just to make it possible to exclude it from
293 * the unpaged area.
294 */
__thread_std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4 __unused,uint32_t a5 __unused)295 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
296 uint32_t a3, uint32_t a4 __unused,
297 uint32_t a5 __unused)
298 {
299 if (IS_ENABLED(CFG_VIRTUALIZATION))
300 virt_on_stdcall();
301
302 return std_smc_entry(a0, a1, a2, a3);
303 }
304
thread_disable_prealloc_rpc_cache(uint64_t * cookie)305 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie)
306 {
307 bool rv = false;
308 size_t n = 0;
309 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
310
311 thread_lock_global();
312
313 for (n = 0; n < CFG_NUM_THREADS; n++) {
314 if (threads[n].state != THREAD_STATE_FREE) {
315 rv = false;
316 goto out;
317 }
318 }
319
320 rv = true;
321
322 if (IS_ENABLED(CFG_PREALLOC_RPC_CACHE)) {
323 for (n = 0; n < CFG_NUM_THREADS; n++) {
324 if (threads[n].rpc_arg) {
325 *cookie = mobj_get_cookie(threads[n].rpc_mobj);
326 mobj_put(threads[n].rpc_mobj);
327 threads[n].rpc_arg = NULL;
328 threads[n].rpc_mobj = NULL;
329 goto out;
330 }
331 }
332 }
333
334 *cookie = 0;
335 thread_prealloc_rpc_cache = false;
336 out:
337 thread_unlock_global();
338 thread_unmask_exceptions(exceptions);
339 return rv;
340 }
341
thread_enable_prealloc_rpc_cache(void)342 bool thread_enable_prealloc_rpc_cache(void)
343 {
344 bool rv = false;
345 size_t n = 0;
346 uint32_t exceptions = 0;
347
348 if (!IS_ENABLED(CFG_PREALLOC_RPC_CACHE))
349 return true;
350
351 exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
352 thread_lock_global();
353
354 for (n = 0; n < CFG_NUM_THREADS; n++) {
355 if (threads[n].state != THREAD_STATE_FREE) {
356 rv = false;
357 goto out;
358 }
359 }
360
361 rv = true;
362 thread_prealloc_rpc_cache = true;
363 out:
364 thread_unlock_global();
365 thread_unmask_exceptions(exceptions);
366 return rv;
367 }
368
rpc_shm_mobj_alloc(paddr_t pa,size_t sz,uint64_t cookie)369 static struct mobj *rpc_shm_mobj_alloc(paddr_t pa, size_t sz, uint64_t cookie)
370 {
371 /* Check if this region is in static shared space */
372 if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, sz))
373 return mobj_shm_alloc(pa, sz, cookie);
374
375 if (IS_ENABLED(CFG_CORE_DYN_SHM) &&
376 !(pa & SMALL_PAGE_MASK) && sz <= SMALL_PAGE_SIZE)
377 return mobj_mapped_shm_alloc(&pa, 1, 0, cookie);
378
379 return NULL;
380 }
381
382 /**
383 * Allocates data for struct optee_msg_arg.
384 *
385 * @size: size in bytes of struct optee_msg_arg
386 *
387 * @returns mobj that describes allocated buffer or NULL on error
388 */
thread_rpc_alloc_arg(size_t size)389 static struct mobj *thread_rpc_alloc_arg(size_t size)
390 {
391 paddr_t pa;
392 uint64_t co;
393 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
394 OPTEE_SMC_RETURN_RPC_ALLOC, size
395 };
396 struct mobj *mobj = NULL;
397
398 thread_rpc(rpc_args);
399
400 /* Registers 1 and 2 passed from normal world */
401 pa = reg_pair_to_64(rpc_args[0], rpc_args[1]);
402 /* Registers 4 and 5 passed from normal world */
403 co = reg_pair_to_64(rpc_args[2], rpc_args[3]);
404
405 if (!IS_ALIGNED_WITH_TYPE(pa, struct optee_msg_arg))
406 goto err;
407
408 mobj = rpc_shm_mobj_alloc(pa, size, co);
409 if (!mobj)
410 goto err;
411
412 return mobj;
413 err:
414 thread_rpc_free_arg(co);
415 mobj_put(mobj);
416 return NULL;
417 }
418
set_rmem(struct optee_msg_param * param,struct thread_param * tpm)419 static bool set_rmem(struct optee_msg_param *param,
420 struct thread_param *tpm)
421 {
422 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
423 OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
424 param->u.rmem.offs = tpm->u.memref.offs;
425 param->u.rmem.size = tpm->u.memref.size;
426 if (tpm->u.memref.mobj) {
427 param->u.rmem.shm_ref = mobj_get_cookie(tpm->u.memref.mobj);
428 if (!param->u.rmem.shm_ref)
429 return false;
430 } else {
431 param->u.rmem.shm_ref = 0;
432 }
433
434 return true;
435 }
436
set_tmem(struct optee_msg_param * param,struct thread_param * tpm)437 static bool set_tmem(struct optee_msg_param *param,
438 struct thread_param *tpm)
439 {
440 paddr_t pa = 0;
441 uint64_t shm_ref = 0;
442 struct mobj *mobj = tpm->u.memref.mobj;
443
444 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
445 OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
446 if (mobj) {
447 shm_ref = mobj_get_cookie(mobj);
448 if (!shm_ref)
449 return false;
450 if (mobj_get_pa(mobj, tpm->u.memref.offs, 0, &pa))
451 return false;
452 }
453
454 param->u.tmem.size = tpm->u.memref.size;
455 param->u.tmem.buf_ptr = pa;
456 param->u.tmem.shm_ref = shm_ref;
457
458 return true;
459 }
460
get_rpc_arg(uint32_t cmd,size_t num_params,struct thread_param * params,void ** arg_ret,uint64_t * carg_ret)461 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
462 struct thread_param *params, void **arg_ret,
463 uint64_t *carg_ret)
464 {
465 struct thread_ctx *thr = threads + thread_get_id();
466 struct optee_msg_arg *arg = thr->rpc_arg;
467 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
468
469 if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
470 return TEE_ERROR_BAD_PARAMETERS;
471
472 if (!arg) {
473 struct mobj *mobj = thread_rpc_alloc_arg(sz);
474
475 if (!mobj)
476 return TEE_ERROR_OUT_OF_MEMORY;
477
478 arg = mobj_get_va(mobj, 0, sz);
479 if (!arg) {
480 thread_rpc_free_arg(mobj_get_cookie(mobj));
481 return TEE_ERROR_OUT_OF_MEMORY;
482 }
483
484 thr->rpc_arg = arg;
485 thr->rpc_mobj = mobj;
486 }
487
488 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
489 arg->cmd = cmd;
490 arg->num_params = num_params;
491 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
492
493 for (size_t n = 0; n < num_params; n++) {
494 switch (params[n].attr) {
495 case THREAD_PARAM_ATTR_NONE:
496 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
497 break;
498 case THREAD_PARAM_ATTR_VALUE_IN:
499 case THREAD_PARAM_ATTR_VALUE_OUT:
500 case THREAD_PARAM_ATTR_VALUE_INOUT:
501 arg->params[n].attr = params[n].attr -
502 THREAD_PARAM_ATTR_VALUE_IN +
503 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
504 arg->params[n].u.value.a = params[n].u.value.a;
505 arg->params[n].u.value.b = params[n].u.value.b;
506 arg->params[n].u.value.c = params[n].u.value.c;
507 break;
508 case THREAD_PARAM_ATTR_MEMREF_IN:
509 case THREAD_PARAM_ATTR_MEMREF_OUT:
510 case THREAD_PARAM_ATTR_MEMREF_INOUT:
511 if (!params[n].u.memref.mobj ||
512 mobj_matches(params[n].u.memref.mobj,
513 CORE_MEM_NSEC_SHM)) {
514 if (!set_tmem(arg->params + n, params + n))
515 return TEE_ERROR_BAD_PARAMETERS;
516 } else if (mobj_matches(params[n].u.memref.mobj,
517 CORE_MEM_REG_SHM)) {
518 if (!set_rmem(arg->params + n, params + n))
519 return TEE_ERROR_BAD_PARAMETERS;
520 } else {
521 return TEE_ERROR_BAD_PARAMETERS;
522 }
523 break;
524 default:
525 return TEE_ERROR_BAD_PARAMETERS;
526 }
527 }
528
529 *arg_ret = arg;
530 *carg_ret = mobj_get_cookie(thr->rpc_mobj);
531
532 return TEE_SUCCESS;
533 }
534
get_rpc_arg_res(struct optee_msg_arg * arg,size_t num_params,struct thread_param * params)535 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
536 struct thread_param *params)
537 {
538 for (size_t n = 0; n < num_params; n++) {
539 switch (params[n].attr) {
540 case THREAD_PARAM_ATTR_VALUE_OUT:
541 case THREAD_PARAM_ATTR_VALUE_INOUT:
542 params[n].u.value.a = arg->params[n].u.value.a;
543 params[n].u.value.b = arg->params[n].u.value.b;
544 params[n].u.value.c = arg->params[n].u.value.c;
545 break;
546 case THREAD_PARAM_ATTR_MEMREF_OUT:
547 case THREAD_PARAM_ATTR_MEMREF_INOUT:
548 /*
549 * rmem.size and tmem.size is the same type and
550 * location.
551 */
552 params[n].u.memref.size = arg->params[n].u.rmem.size;
553 break;
554 default:
555 break;
556 }
557 }
558
559 return arg->ret;
560 }
561
thread_rpc_cmd(uint32_t cmd,size_t num_params,struct thread_param * params)562 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
563 struct thread_param *params)
564 {
565 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
566 void *arg = NULL;
567 uint64_t carg = 0;
568 uint32_t ret = 0;
569
570 /* The source CRYPTO_RNG_SRC_JITTER_RPC is safe to use here */
571 plat_prng_add_jitter_entropy(CRYPTO_RNG_SRC_JITTER_RPC,
572 &thread_rpc_pnum);
573
574 ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
575 if (ret)
576 return ret;
577
578 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
579 thread_rpc(rpc_args);
580
581 return get_rpc_arg_res(arg, num_params, params);
582 }
583
584 /**
585 * Free physical memory previously allocated with thread_rpc_alloc()
586 *
587 * @cookie: cookie received when allocating the buffer
588 * @bt: must be the same as supplied when allocating
589 * @mobj: mobj that describes allocated buffer
590 *
591 * This function also frees corresponding mobj.
592 */
thread_rpc_free(unsigned int bt,uint64_t cookie,struct mobj * mobj)593 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
594 {
595 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
596 void *arg = NULL;
597 uint64_t carg = 0;
598 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
599 uint32_t ret = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m,
600 &arg, &carg);
601
602 mobj_put(mobj);
603
604 if (!ret) {
605 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
606 thread_rpc(rpc_args);
607 }
608 }
609
get_rpc_alloc_res(struct optee_msg_arg * arg,unsigned int bt,size_t size)610 static struct mobj *get_rpc_alloc_res(struct optee_msg_arg *arg,
611 unsigned int bt, size_t size)
612 {
613 struct mobj *mobj = NULL;
614 uint64_t cookie = 0;
615 size_t sz = 0;
616 paddr_t p = 0;
617
618 if (arg->ret || arg->num_params != 1)
619 goto err;
620
621 if (arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT &&
622 arg->params[0].attr != (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
623 OPTEE_MSG_ATTR_NONCONTIG))
624 goto err;
625
626 p = arg->params[0].u.tmem.buf_ptr;
627 sz = READ_ONCE(arg->params[0].u.tmem.size);
628 cookie = arg->params[0].u.tmem.shm_ref;
629 if (sz < size)
630 goto err;
631
632 if (arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT)
633 mobj = rpc_shm_mobj_alloc(p, sz, cookie);
634 else
635 mobj = msg_param_mobj_from_noncontig(p, sz, cookie, true);
636
637 if (!mobj) {
638 thread_rpc_free(bt, cookie, mobj);
639 goto err;
640 }
641
642 assert(mobj_is_nonsec(mobj));
643 return mobj;
644 err:
645 EMSG("RPC allocation failed. Non-secure world result: ret=%#"
646 PRIx32" ret_origin=%#"PRIx32, arg->ret, arg->ret_origin);
647 return NULL;
648 }
649
650 /**
651 * Allocates shared memory buffer via RPC
652 *
653 * @size: size in bytes of shared memory buffer
654 * @align: required alignment of buffer
655 * @bt: buffer type OPTEE_RPC_SHM_TYPE_*
656 *
657 * Returns a pointer to MOBJ for the memory on success, or NULL on failure.
658 */
thread_rpc_alloc(size_t size,size_t align,unsigned int bt)659 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
660 {
661 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
662 void *arg = NULL;
663 uint64_t carg = 0;
664 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
665 uint32_t ret = get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m,
666 &arg, &carg);
667
668 if (ret)
669 return NULL;
670
671 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
672 thread_rpc(rpc_args);
673
674 return get_rpc_alloc_res(arg, bt, size);
675 }
676
thread_rpc_alloc_payload(size_t size)677 struct mobj *thread_rpc_alloc_payload(size_t size)
678 {
679 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
680 }
681
thread_rpc_alloc_kernel_payload(size_t size)682 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
683 {
684 /*
685 * Error out early since kernel private dynamic shared memory
686 * allocations don't currently use the `OPTEE_MSG_ATTR_NONCONTIG` bit
687 * and therefore cannot be larger than a page.
688 */
689 if (IS_ENABLED(CFG_CORE_DYN_SHM) && size > SMALL_PAGE_SIZE)
690 return NULL;
691
692 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
693 }
694
thread_rpc_free_kernel_payload(struct mobj * mobj)695 void thread_rpc_free_kernel_payload(struct mobj *mobj)
696 {
697 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
698 }
699
thread_rpc_free_payload(struct mobj * mobj)700 void thread_rpc_free_payload(struct mobj *mobj)
701 {
702 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
703 mobj);
704 }
705
thread_rpc_alloc_global_payload(size_t size)706 struct mobj *thread_rpc_alloc_global_payload(size_t size)
707 {
708 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
709 }
710
thread_rpc_free_global_payload(struct mobj * mobj)711 void thread_rpc_free_global_payload(struct mobj *mobj)
712 {
713 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
714 mobj);
715 }
716