1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2024, Arm Limited
4  */
5 #include <assert.h>
6 #include <io.h>
7 #include <kernel/panic.h>
8 #include <kernel/secure_partition.h>
9 #include <kernel/spinlock.h>
10 #include <kernel/spmc_sp_handler.h>
11 #include <kernel/tee_misc.h>
12 #include <kernel/thread_private.h>
13 #include <mm/mobj.h>
14 #include <mm/sp_mem.h>
15 #include <mm/vm.h>
16 #include <optee_ffa.h>
17 #include <string.h>
18 
19 static unsigned int mem_ref_lock = SPINLOCK_UNLOCK;
20 
spmc_sp_start_thread(struct thread_smc_args * args)21 void spmc_sp_start_thread(struct thread_smc_args *args)
22 {
23 	thread_sp_alloc_and_run(args);
24 }
25 
ffa_set_error(struct thread_smc_args * args,uint32_t error)26 static void ffa_set_error(struct thread_smc_args *args, uint32_t error)
27 {
28 	args->a0 = FFA_ERROR;
29 	args->a1 = FFA_PARAM_MBZ;
30 	args->a2 = error;
31 	args->a3 = FFA_PARAM_MBZ;
32 	args->a4 = FFA_PARAM_MBZ;
33 	args->a5 = FFA_PARAM_MBZ;
34 	args->a6 = FFA_PARAM_MBZ;
35 	args->a7 = FFA_PARAM_MBZ;
36 }
37 
ffa_success(struct thread_smc_args * args)38 static void ffa_success(struct thread_smc_args *args)
39 {
40 	args->a0 = FFA_SUCCESS_32;
41 }
42 
ffa_get_dst(struct thread_smc_args * args,struct sp_session * caller,struct sp_session ** dst)43 static TEE_Result ffa_get_dst(struct thread_smc_args *args,
44 			      struct sp_session *caller,
45 			      struct sp_session **dst)
46 {
47 	struct sp_session *s = NULL;
48 
49 	s = sp_get_session(FFA_DST(args->a1));
50 
51 	/* Message came from the NW */
52 	if (!caller) {
53 		if (!s) {
54 			EMSG("Neither destination nor source is a SP");
55 			return FFA_INVALID_PARAMETERS;
56 		}
57 	} else {
58 		/* Check if the source matches the endpoint we came from */
59 		if (FFA_SRC(args->a1) != caller->endpoint_id) {
60 			EMSG("Source address doesn't match the endpoint id");
61 			return FFA_INVALID_PARAMETERS;
62 		}
63 	}
64 
65 	*dst = s;
66 
67 	return FFA_OK;
68 }
69 
find_sp_mem_receiver(struct sp_session * s,struct sp_mem * smem)70 static struct sp_mem_receiver *find_sp_mem_receiver(struct sp_session *s,
71 						    struct sp_mem *smem)
72 {
73 	struct sp_mem_receiver *receiver = NULL;
74 
75 	/*
76 	 * FF-A Spec 8.10.2:
77 	 * Each Handle identifies a single unique composite memory region
78 	 * description that is, there is a 1:1 mapping between the two.
79 	 *
80 	 * Each memory share has an unique handle. We can only have each SP
81 	 * once as a receiver in the memory share. For each receiver of a
82 	 * memory share, we have one sp_mem_access_descr object.
83 	 * This means that there can only be one SP linked to a specific
84 	 * struct sp_mem_access_descr.
85 	 */
86 	SLIST_FOREACH(receiver, &smem->receivers, link) {
87 		if (receiver->perm.endpoint_id == s->endpoint_id)
88 			break;
89 	}
90 	return receiver;
91 }
92 
add_mem_region_to_sp(struct ffa_mem_access * mem_acc,struct sp_mem * smem)93 static int add_mem_region_to_sp(struct ffa_mem_access *mem_acc,
94 				struct sp_mem *smem)
95 {
96 	struct ffa_mem_access_perm *access_perm = &mem_acc->access_perm;
97 	struct sp_session *s = NULL;
98 	struct sp_mem_receiver *receiver = NULL;
99 	uint8_t perm = READ_ONCE(access_perm->perm);
100 	uint16_t endpoint_id = READ_ONCE(access_perm->endpoint_id);
101 
102 	s = sp_get_session(endpoint_id);
103 
104 	/* Only add memory shares of loaded SPs */
105 	if (!s)
106 		return FFA_DENIED;
107 
108 	/* Only allow each endpoint once */
109 	if (find_sp_mem_receiver(s, smem))
110 		return FFA_DENIED;
111 
112 	if (perm & ~FFA_MEM_ACC_MASK)
113 		return FFA_DENIED;
114 
115 	receiver = calloc(1, sizeof(struct sp_mem_receiver));
116 	if (!receiver)
117 		return FFA_NO_MEMORY;
118 
119 	receiver->smem = smem;
120 
121 	receiver->perm.endpoint_id = endpoint_id;
122 	receiver->perm.perm = perm;
123 	receiver->perm.flags = READ_ONCE(access_perm->flags);
124 
125 	SLIST_INSERT_HEAD(&smem->receivers, receiver, link);
126 
127 	return FFA_OK;
128 }
129 
spmc_sp_handle_mem_share(struct thread_smc_args * args,struct ffa_rxtx * rxtx,struct sp_session * owner_sp)130 static void spmc_sp_handle_mem_share(struct thread_smc_args *args,
131 				     struct ffa_rxtx *rxtx,
132 				     struct sp_session *owner_sp)
133 {
134 	struct ffa_mem_transaction_x mem_trans = { };
135 	uint32_t tot_len = args->a1;
136 	uint32_t frag_len = args->a2;
137 	uint64_t global_handle = 0;
138 	int res = FFA_OK;
139 
140 	cpu_spin_lock(&rxtx->spinlock);
141 
142 	/* Descriptor fragments or custom buffers aren't supported yet. */
143 	if (frag_len != tot_len || args->a3 || args->a4)
144 		res = FFA_NOT_SUPPORTED;
145 	else if (frag_len > rxtx->size)
146 		res = FFA_INVALID_PARAMETERS;
147 	else
148 		res = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx,
149 						frag_len, &mem_trans);
150 	if (!res)
151 		res = spmc_sp_add_share(&mem_trans, rxtx, tot_len, frag_len,
152 					&global_handle, owner_sp);
153 	if (!res) {
154 		args->a3 = high32_from_64(global_handle);
155 		args->a2 = low32_from_64(global_handle);
156 		args->a1 = FFA_PARAM_MBZ;
157 		args->a0 = FFA_SUCCESS_32;
158 	} else {
159 		ffa_set_error(args, res);
160 	}
161 
162 	cpu_spin_unlock(&rxtx->spinlock);
163 }
164 
spmc_sp_add_sp_region(struct sp_mem * smem,struct ffa_address_range * mem_reg,struct sp_session * owner_sp,uint8_t highest_permission)165 static int spmc_sp_add_sp_region(struct sp_mem *smem,
166 				 struct ffa_address_range *mem_reg,
167 				 struct sp_session *owner_sp,
168 				 uint8_t highest_permission)
169 {
170 	struct sp_ctx *sp_ctx = NULL;
171 	uint64_t va = READ_ONCE(mem_reg->address);
172 	int res = FFA_OK;
173 	uint64_t region_len = READ_ONCE(mem_reg->page_count) * SMALL_PAGE_SIZE;
174 	struct mobj *mobj = NULL;
175 
176 	sp_ctx = to_sp_ctx(owner_sp->ts_sess.ctx);
177 
178 	/*
179 	 * The memory region we try to share might not be linked to just one
180 	 * mobj. Create a new region for each mobj.
181 	 */
182 	while (region_len) {
183 		size_t len = region_len;
184 		struct sp_mem_map_region *region = NULL;
185 		uint16_t prot = 0;
186 		size_t offs = 0;
187 
188 		/*
189 		 * There is already a mobj for each address that is in the SPs
190 		 * address range.
191 		 */
192 		mobj = vm_get_mobj(&sp_ctx->uctx, va, &len, &prot, &offs);
193 		if (!mobj)
194 			return FFA_DENIED;
195 
196 		/*
197 		 * If we share memory from a SP, check if we are not sharing
198 		 * with a higher permission than the memory was originally
199 		 * mapped.
200 		 */
201 		if ((highest_permission & FFA_MEM_ACC_RW) &&
202 		    !(prot & TEE_MATTR_UW)) {
203 			res = FFA_DENIED;
204 			goto err;
205 		}
206 
207 		if ((highest_permission & FFA_MEM_ACC_EXE) &&
208 		    !(prot & TEE_MATTR_UX)) {
209 			res = FFA_DENIED;
210 			goto err;
211 		}
212 
213 		region = calloc(1, sizeof(*region));
214 		region->mobj = mobj;
215 		region->page_offset = offs;
216 		region->page_count = len / SMALL_PAGE_SIZE;
217 
218 		if (!sp_has_exclusive_access(region, &sp_ctx->uctx)) {
219 			free(region);
220 			res = FFA_DENIED;
221 			goto err;
222 		}
223 
224 		va += len;
225 		region_len -= len;
226 		SLIST_INSERT_HEAD(&smem->regions, region, link);
227 	}
228 
229 	return FFA_OK;
230 err:
231 	mobj_put(mobj);
232 
233 	return res;
234 }
235 
spmc_sp_add_nw_region(struct sp_mem * smem,struct ffa_mem_region * mem_reg)236 static int spmc_sp_add_nw_region(struct sp_mem *smem,
237 				 struct ffa_mem_region *mem_reg)
238 {
239 	uint64_t page_count = READ_ONCE(mem_reg->total_page_count);
240 	struct sp_mem_map_region *region = NULL;
241 	struct mobj *m = sp_mem_new_mobj(page_count, TEE_MATTR_MEM_TYPE_CACHED,
242 					 false);
243 	unsigned int i = 0;
244 	unsigned int idx = 0;
245 	int res = FFA_OK;
246 	uint64_t address_count = READ_ONCE(mem_reg->address_range_count);
247 
248 	if (!m)
249 		return FFA_NO_MEMORY;
250 
251 	for (i = 0; i < address_count; i++) {
252 		struct ffa_address_range *addr_range = NULL;
253 
254 		addr_range = &mem_reg->address_range_array[i];
255 		if (sp_mem_add_pages(m, &idx,
256 				     READ_ONCE(addr_range->address),
257 				     READ_ONCE(addr_range->page_count))) {
258 			res = FFA_DENIED;
259 			goto clean_up;
260 		}
261 	}
262 
263 	region = calloc(1, sizeof(*region));
264 	if (!region) {
265 		res = FFA_NO_MEMORY;
266 		goto clean_up;
267 	}
268 
269 	region->mobj = m;
270 	region->page_count = page_count;
271 
272 	if (!sp_has_exclusive_access(region, NULL)) {
273 		free(region);
274 		res = FFA_DENIED;
275 		goto clean_up;
276 	}
277 
278 	SLIST_INSERT_HEAD(&smem->regions, region, link);
279 	return FFA_OK;
280 clean_up:
281 	mobj_put(m);
282 	return res;
283 }
284 
spmc_sp_add_share(struct ffa_mem_transaction_x * mem_trans,struct ffa_rxtx * rxtx,size_t blen,size_t flen,uint64_t * global_handle,struct sp_session * owner_sp)285 int spmc_sp_add_share(struct ffa_mem_transaction_x *mem_trans,
286 		      struct ffa_rxtx *rxtx, size_t blen, size_t flen,
287 		      uint64_t *global_handle, struct sp_session *owner_sp)
288 {
289 	int res = FFA_INVALID_PARAMETERS;
290 	unsigned int num_mem_accs = 0;
291 	unsigned int i = 0;
292 	struct ffa_mem_access *mem_acc = NULL;
293 	size_t needed_size = 0;
294 	size_t addr_range_offs = 0;
295 	struct ffa_mem_region *mem_reg = NULL;
296 	uint8_t highest_permission = 0;
297 	struct sp_mem *smem = NULL;
298 	uint16_t sender_id = mem_trans->sender_id;
299 	size_t addr_range_cnt = 0;
300 	struct ffa_address_range *addr_range = NULL;
301 	size_t total_page_count = 0;
302 	size_t page_count_sum = 0;
303 
304 	if (blen != flen) {
305 		DMSG("Fragmented memory share is not supported for SPs");
306 		return FFA_NOT_SUPPORTED;
307 	}
308 
309 	smem = sp_mem_new();
310 	if (!smem)
311 		return FFA_NO_MEMORY;
312 
313 	if ((owner_sp && owner_sp->endpoint_id != sender_id) ||
314 	    (!owner_sp && sp_get_session(sender_id))) {
315 		res = FFA_DENIED;
316 		goto cleanup;
317 	}
318 
319 	num_mem_accs = mem_trans->mem_access_count;
320 	mem_acc = (void *)((vaddr_t)rxtx->rx + mem_trans->mem_access_offs);
321 
322 	if (!num_mem_accs) {
323 		res = FFA_INVALID_PARAMETERS;
324 		goto cleanup;
325 	}
326 
327 	/* Store the ffa_mem_transaction */
328 	smem->sender_id = sender_id;
329 	smem->mem_reg_attr = mem_trans->mem_reg_attr;
330 	smem->flags = mem_trans->flags;
331 	smem->tag = mem_trans->tag;
332 
333 	if (MUL_OVERFLOW(num_mem_accs, sizeof(*mem_acc), &needed_size) ||
334 	    ADD_OVERFLOW(needed_size, mem_trans->mem_access_offs,
335 			 &needed_size) || needed_size > blen) {
336 		res = FFA_INVALID_PARAMETERS;
337 		goto cleanup;
338 	}
339 
340 	for (i = 0; i < num_mem_accs; i++)
341 		highest_permission |= READ_ONCE(mem_acc[i].access_perm.perm);
342 
343 	/* Check if the memory region array fits into the buffer */
344 	addr_range_offs = READ_ONCE(mem_acc[0].region_offs);
345 
346 	if (ADD_OVERFLOW(addr_range_offs, sizeof(*mem_reg), &needed_size) ||
347 	    needed_size > blen) {
348 		res = FFA_INVALID_PARAMETERS;
349 		goto cleanup;
350 	}
351 
352 	mem_reg = (void *)((char *)rxtx->rx + addr_range_offs);
353 	addr_range_cnt = READ_ONCE(mem_reg->address_range_count);
354 	total_page_count = READ_ONCE(mem_reg->total_page_count);
355 
356 	/* Memory transaction without address ranges or pages is invalid */
357 	if (!addr_range_cnt || !total_page_count) {
358 		res = FFA_INVALID_PARAMETERS;
359 		goto cleanup;
360 	}
361 
362 	/* Check if the region descriptors fit into the buffer */
363 	if (MUL_OVERFLOW(addr_range_cnt, sizeof(*addr_range), &needed_size) ||
364 	    ADD_OVERFLOW(needed_size, addr_range_offs, &needed_size) ||
365 	    needed_size > blen) {
366 		res = FFA_INVALID_PARAMETERS;
367 		goto cleanup;
368 	}
369 
370 	page_count_sum = 0;
371 	for (i = 0; i < addr_range_cnt; i++) {
372 		addr_range = &mem_reg->address_range_array[i];
373 
374 		/* Memory region without pages is invalid */
375 		if (!addr_range->page_count) {
376 			res = FFA_INVALID_PARAMETERS;
377 			goto cleanup;
378 		}
379 
380 		/* Sum the page count of each region */
381 		if (ADD_OVERFLOW(page_count_sum, addr_range->page_count,
382 				 &page_count_sum)) {
383 			res = FFA_INVALID_PARAMETERS;
384 			goto cleanup;
385 		}
386 	}
387 
388 	/* Validate total page count */
389 	if (total_page_count != page_count_sum) {
390 		res = FFA_INVALID_PARAMETERS;
391 		goto cleanup;
392 	}
393 
394 	/* Iterate over all the addresses */
395 	if (owner_sp) {
396 		for (i = 0; i < addr_range_cnt; i++) {
397 			addr_range = &mem_reg->address_range_array[i];
398 			res = spmc_sp_add_sp_region(smem, addr_range,
399 						    owner_sp,
400 						    highest_permission);
401 			if (res)
402 				goto cleanup;
403 		}
404 	} else {
405 		res = spmc_sp_add_nw_region(smem, mem_reg);
406 		if (res)
407 			goto cleanup;
408 	}
409 
410 	/* Add the memory address to the SP */
411 	for (i = 0; i < num_mem_accs; i++) {
412 		res = add_mem_region_to_sp(&mem_acc[i], smem);
413 		if (res)
414 			goto cleanup;
415 	}
416 	*global_handle = smem->global_handle;
417 	sp_mem_add(smem);
418 
419 	return FFA_OK;
420 
421 cleanup:
422 	sp_mem_remove(smem);
423 	return res;
424 }
425 
spmc_sp_set_to_preempted(struct ts_session * ts_sess)426 void spmc_sp_set_to_preempted(struct ts_session *ts_sess)
427 {
428 	if (ts_sess && is_sp_ctx(ts_sess->ctx)) {
429 		struct sp_session *sp_sess = to_sp_session(ts_sess);
430 
431 		assert(sp_sess->state == sp_busy);
432 		sp_sess->state = sp_preempted;
433 	}
434 }
435 
spmc_sp_resume_from_preempted(uint16_t endpoint_id)436 int spmc_sp_resume_from_preempted(uint16_t endpoint_id)
437 {
438 	struct sp_session *sp_sess = sp_get_session(endpoint_id);
439 
440 	if (!sp_sess)
441 		return FFA_INVALID_PARAMETERS;
442 
443 	if (sp_sess->state != sp_preempted)
444 		return FFA_DENIED;
445 
446 	sp_sess->state = sp_busy;
447 
448 	return FFA_OK;
449 }
450 
check_rxtx(struct ffa_rxtx * rxtx)451 static bool check_rxtx(struct ffa_rxtx *rxtx)
452 {
453 	return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0;
454 }
455 
456 static TEE_Result
check_retrieve_request(struct sp_mem_receiver * receiver,uint32_t ffa_vers,struct ffa_mem_transaction_x * mem_trans,void * rx,struct sp_mem * smem,int64_t tx_len)457 check_retrieve_request(struct sp_mem_receiver *receiver, uint32_t ffa_vers,
458 		       struct ffa_mem_transaction_x *mem_trans,
459 		       void *rx, struct sp_mem *smem, int64_t tx_len)
460 {
461 	struct ffa_mem_access *retr_access = NULL;
462 	uint8_t share_perm = receiver->perm.perm;
463 	uint32_t retr_perm = 0;
464 	uint32_t retr_flags = mem_trans->flags;
465 	uint64_t retr_tag = mem_trans->tag;
466 	struct sp_mem_map_region *reg = NULL;
467 
468 	/*
469 	 * The request came from the endpoint. It should only have one
470 	 * ffa_mem_access element
471 	 */
472 	if (mem_trans->mem_access_count != 1)
473 		return TEE_ERROR_BAD_PARAMETERS;
474 
475 	retr_access = (void *)((vaddr_t)rx + mem_trans->mem_access_offs);
476 	retr_perm = READ_ONCE(retr_access->access_perm.perm);
477 
478 	/* Check if tag is correct */
479 	if (receiver->smem->tag != retr_tag) {
480 		EMSG("Incorrect tag %#"PRIx64" %#"PRIx64, receiver->smem->tag,
481 		     retr_tag);
482 		return TEE_ERROR_BAD_PARAMETERS;
483 	}
484 
485 	/* Check permissions and flags */
486 	if ((retr_perm & FFA_MEM_ACC_RW) &&
487 	    !(share_perm & FFA_MEM_ACC_RW)) {
488 		DMSG("Incorrect memshare permission set");
489 		return TEE_ERROR_BAD_PARAMETERS;
490 	}
491 
492 	if ((retr_perm & FFA_MEM_ACC_EXE) &&
493 	    !(share_perm & FFA_MEM_ACC_EXE)) {
494 		DMSG("Incorrect memshare permission set");
495 		return TEE_ERROR_BAD_PARAMETERS;
496 	}
497 
498 	if (retr_flags & FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) {
499 		DMSG("CLEAR_RELINQUISH is not allowed for FFA_SHARE");
500 		return TEE_ERROR_BAD_PARAMETERS;
501 	}
502 
503 	/*
504 	 * Check if there is enough space in the tx buffer to send the respons.
505 	 */
506 	if (ffa_vers <= FFA_VERSION_1_0)
507 		tx_len -= sizeof(struct ffa_mem_transaction_1_0);
508 	else
509 		tx_len -= sizeof(struct ffa_mem_transaction_1_1);
510 	tx_len -= sizeof(struct ffa_mem_access) +
511 		  sizeof(struct ffa_mem_region);
512 
513 	if (tx_len < 0)
514 		return FFA_NO_MEMORY;
515 
516 	SLIST_FOREACH(reg, &smem->regions, link) {
517 		tx_len -= sizeof(struct ffa_address_range);
518 		if (tx_len < 0)
519 			return FFA_NO_MEMORY;
520 	}
521 
522 	return TEE_SUCCESS;
523 }
524 
create_retrieve_response(uint32_t ffa_vers,void * dst_buffer,struct sp_mem_receiver * receiver,struct sp_mem * smem,struct sp_session * s)525 static void create_retrieve_response(uint32_t ffa_vers, void *dst_buffer,
526 				     struct sp_mem_receiver *receiver,
527 				     struct sp_mem *smem, struct sp_session *s)
528 {
529 	size_t off = 0;
530 	struct ffa_mem_region *dst_region =  NULL;
531 	struct ffa_address_range *addr_dst = NULL;
532 	struct sp_mem_map_region *reg = NULL;
533 	struct ffa_mem_access *mem_acc = NULL;
534 
535 	/*
536 	 * we respond with a ffa_mem_retrieve_resp which defines the
537 	 * following data in the rx buffer of the sp.
538 	 * struct mem_transaction_descr
539 	 * struct mem_access_descr (always 1 element)
540 	 * struct mem_region_descr
541 	 */
542 	if (ffa_vers <= FFA_VERSION_1_0) {
543 		struct ffa_mem_transaction_1_0 *d_ds = dst_buffer;
544 
545 		memset(d_ds, 0, sizeof(*d_ds));
546 
547 		off = sizeof(*d_ds);
548 		mem_acc = d_ds->mem_access_array;
549 
550 		/* copy the mem_transaction_descr */
551 		d_ds->sender_id = receiver->smem->sender_id;
552 		d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
553 		d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
554 		d_ds->tag = receiver->smem->tag;
555 		d_ds->mem_access_count = 1;
556 	} else {
557 		struct ffa_mem_transaction_1_1 *d_ds = dst_buffer;
558 
559 		memset(d_ds, 0, sizeof(*d_ds));
560 
561 		off = sizeof(*d_ds);
562 		mem_acc = (void *)(d_ds + 1);
563 
564 		d_ds->sender_id = receiver->smem->sender_id;
565 		d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
566 		d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
567 		d_ds->tag = receiver->smem->tag;
568 		d_ds->mem_access_size = sizeof(*mem_acc);
569 		d_ds->mem_access_count = 1;
570 		d_ds->mem_access_offs = off;
571 	}
572 
573 	off += sizeof(struct ffa_mem_access);
574 	dst_region = (struct ffa_mem_region *)(mem_acc + 1);
575 
576 	/* Copy the mem_accsess_descr */
577 	mem_acc[0].region_offs = off;
578 	memcpy(&mem_acc[0].access_perm, &receiver->perm,
579 	       sizeof(struct ffa_mem_access_perm));
580 
581 	/* Copy the mem_region_descr */
582 	memset(dst_region, 0, sizeof(*dst_region));
583 	dst_region->address_range_count = 0;
584 	dst_region->total_page_count = 0;
585 
586 	addr_dst = dst_region->address_range_array;
587 
588 	SLIST_FOREACH(reg, &smem->regions, link) {
589 		uint32_t offset = reg->page_offset;
590 		struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
591 
592 		addr_dst->address = (uint64_t)sp_mem_get_va(&ctx->uctx,
593 							    offset,
594 							    reg->mobj);
595 		addr_dst->page_count = reg->page_count;
596 		dst_region->address_range_count++;
597 
598 		dst_region->total_page_count += addr_dst->page_count;
599 	}
600 }
601 
ffa_mem_retrieve(struct thread_smc_args * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)602 static void ffa_mem_retrieve(struct thread_smc_args *args,
603 			     struct sp_session *caller_sp,
604 			     struct ffa_rxtx *rxtx)
605 {
606 	struct ffa_mem_transaction_x mem_trans = { };
607 	uint32_t tot_len = args->a1;
608 	uint32_t frag_len = args->a2;
609 	int ret = FFA_OK;
610 	size_t tx_len = 0;
611 	struct ffa_mem_access *mem_acc = NULL;
612 	struct ffa_mem_region *mem_region = NULL;
613 	uint64_t va = 0;
614 	struct sp_mem *smem = NULL;
615 	struct sp_mem_receiver *receiver = NULL;
616 	uint32_t exceptions = 0;
617 	uint32_t address_offset = 0;
618 	size_t needed_size = 0;
619 
620 	if (!check_rxtx(rxtx) || !rxtx->tx_is_mine) {
621 		ret = FFA_DENIED;
622 		goto err;
623 	}
624 	/* Descriptor fragments aren't supported yet. */
625 	if (frag_len != tot_len) {
626 		ret = FFA_NOT_SUPPORTED;
627 		goto err;
628 	}
629 	if (frag_len > rxtx->size) {
630 		ret = FFA_INVALID_PARAMETERS;
631 		goto err;
632 	}
633 
634 	tx_len = rxtx->size;
635 
636 	ret = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, frag_len,
637 					&mem_trans);
638 	if (ret)
639 		goto err;
640 
641 	smem = sp_mem_get(mem_trans.global_handle);
642 	if (!smem) {
643 		DMSG("Incorrect handle");
644 		ret = FFA_DENIED;
645 		goto err;
646 	}
647 
648 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
649 
650 	mem_acc = (void *)((vaddr_t)rxtx->rx + mem_trans.mem_access_offs);
651 
652 	address_offset = READ_ONCE(mem_acc[0].region_offs);
653 
654 	if (ADD_OVERFLOW(address_offset, sizeof(struct ffa_mem_region),
655 			 &needed_size) || needed_size > tx_len) {
656 		ret = FFA_INVALID_PARAMETERS;
657 		goto err;
658 	}
659 
660 	if (check_retrieve_request(receiver, rxtx->ffa_vers, &mem_trans,
661 				   rxtx->rx, smem, tx_len) != TEE_SUCCESS) {
662 		ret = FFA_INVALID_PARAMETERS;
663 		goto err;
664 	}
665 
666 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
667 
668 	if (receiver->ref_count == UINT8_MAX) {
669 		ret = FFA_DENIED;
670 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
671 		goto err;
672 	}
673 
674 	receiver->ref_count++;
675 
676 	/* We only need to map the region the first time we request it. */
677 	if (receiver->ref_count == 1) {
678 		TEE_Result ret_map = TEE_SUCCESS;
679 
680 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
681 
682 		/*
683 		 * Try to map the memory linked to the handle in
684 		 * sp_mem_access_descr.
685 		 */
686 		mem_region = (struct ffa_mem_region *)((vaddr_t)rxtx->rx +
687 						       address_offset);
688 
689 		va = READ_ONCE(mem_region->address_range_array[0].address);
690 		ret_map = sp_map_shared(caller_sp, receiver, smem,  &va);
691 
692 		if (ret_map) {
693 			EMSG("Could not map memory region: %#"PRIx32, ret_map);
694 			exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
695 			receiver->ref_count--;
696 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
697 			ret = FFA_DENIED;
698 			goto err;
699 		}
700 	} else {
701 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
702 	}
703 
704 	create_retrieve_response(rxtx->ffa_vers, rxtx->tx, receiver, smem,
705 				 caller_sp);
706 
707 	args->a0 = FFA_MEM_RETRIEVE_RESP;
708 	args->a1 = tx_len;
709 	args->a2 = tx_len;
710 
711 	rxtx->tx_is_mine = false;
712 
713 	return;
714 err:
715 	ffa_set_error(args, ret);
716 }
717 
ffa_mem_relinquish(struct thread_smc_args * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)718 static void ffa_mem_relinquish(struct thread_smc_args *args,
719 			       struct sp_session *caller_sp,
720 			       struct ffa_rxtx  *rxtx)
721 {
722 	struct sp_mem *smem = NULL;
723 	struct ffa_mem_relinquish *mem = rxtx->rx;
724 	struct sp_mem_receiver *receiver = NULL;
725 	int err = FFA_NOT_SUPPORTED;
726 	uint32_t exceptions = 0;
727 
728 	if (!check_rxtx(rxtx)) {
729 		ffa_set_error(args, FFA_DENIED);
730 		return;
731 	}
732 
733 	exceptions = cpu_spin_lock_xsave(&rxtx->spinlock);
734 	smem = sp_mem_get(READ_ONCE(mem->handle));
735 
736 	if (!smem) {
737 		DMSG("Incorrect handle");
738 		err = FFA_DENIED;
739 		goto err_unlock_rxtwx;
740 	}
741 
742 	if (READ_ONCE(mem->endpoint_count) != 1) {
743 		DMSG("Incorrect endpoint count");
744 		err = FFA_INVALID_PARAMETERS;
745 		goto err_unlock_rxtwx;
746 	}
747 
748 	if (READ_ONCE(mem->endpoint_id_array[0]) != caller_sp->endpoint_id) {
749 		DMSG("Incorrect endpoint id");
750 		err = FFA_DENIED;
751 		goto err_unlock_rxtwx;
752 	}
753 
754 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
755 
756 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
757 
758 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
759 	if (!receiver->ref_count) {
760 		DMSG("To many relinquish requests");
761 		err = FFA_DENIED;
762 		goto err_unlock_memref;
763 	}
764 
765 	receiver->ref_count--;
766 	if (!receiver->ref_count) {
767 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
768 		if (sp_unmap_ffa_regions(caller_sp, smem) != TEE_SUCCESS) {
769 			DMSG("Failed to unmap region");
770 			ffa_set_error(args, FFA_DENIED);
771 			return;
772 		}
773 	} else {
774 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
775 	}
776 
777 	ffa_success(args);
778 	return;
779 
780 err_unlock_rxtwx:
781 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
782 	ffa_set_error(args, err);
783 	return;
784 err_unlock_memref:
785 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
786 	ffa_set_error(args, err);
787 }
788 
zero_mem_region(struct sp_mem * smem,struct sp_session * s)789 static void zero_mem_region(struct sp_mem *smem, struct sp_session *s)
790 {
791 	void *addr = NULL;
792 	struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
793 	struct sp_mem_map_region *reg = NULL;
794 
795 	ts_push_current_session(&s->ts_sess);
796 	SLIST_FOREACH(reg, &smem->regions, link) {
797 		size_t sz = reg->page_count * SMALL_PAGE_SIZE;
798 
799 		addr = sp_mem_get_va(&ctx->uctx, reg->page_offset, reg->mobj);
800 
801 		assert(addr);
802 		memset(addr, 0, sz);
803 	}
804 	ts_pop_current_session();
805 }
806 
807 /*
808  * ffa_mem_reclaim returns false if it couldn't process the reclaim message.
809  * This happens when the memory regions was shared with the OP-TEE endpoint.
810  * After this thread_spmc calls handle_mem_reclaim() to make sure that the
811  * region is reclaimed from the OP-TEE endpoint.
812  */
ffa_mem_reclaim(struct thread_smc_args * args,struct sp_session * caller_sp)813 bool ffa_mem_reclaim(struct thread_smc_args *args,
814 		     struct sp_session *caller_sp)
815 {
816 	uint64_t handle = reg_pair_to_64(args->a2, args->a1);
817 	uint32_t flags = args->a3;
818 	struct sp_mem *smem = NULL;
819 	struct sp_mem_receiver *receiver  = NULL;
820 	uint32_t exceptions = 0;
821 
822 	smem = sp_mem_get(handle);
823 	if (!smem)
824 		return false;
825 
826 	/*
827 	 * If the caller is an SP, make sure that it is the owner of the share.
828 	 * If the call comes from NWd this is ensured by the hypervisor.
829 	 */
830 	if (caller_sp && caller_sp->endpoint_id != smem->sender_id) {
831 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
832 		return true;
833 	}
834 
835 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
836 
837 	/* Make sure that all shares where relinquished */
838 	SLIST_FOREACH(receiver, &smem->receivers, link) {
839 		if (receiver->ref_count != 0) {
840 			ffa_set_error(args, FFA_DENIED);
841 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
842 			return true;
843 		}
844 	}
845 
846 	if (flags & FFA_MEMORY_REGION_FLAG_CLEAR) {
847 		if (caller_sp) {
848 			zero_mem_region(smem, caller_sp);
849 		} else {
850 			/*
851 			 * Currently we don't support zeroing Normal World
852 			 * memory. To do this we would have to map the memory
853 			 * again, zero it and unmap it.
854 			 */
855 			ffa_set_error(args, FFA_DENIED);
856 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
857 			return true;
858 		}
859 	}
860 
861 	sp_mem_remove(smem);
862 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
863 
864 	ffa_success(args);
865 	return true;
866 }
867 
868 static struct sp_session *
ffa_handle_sp_direct_req(struct thread_smc_args * args,struct sp_session * caller_sp)869 ffa_handle_sp_direct_req(struct thread_smc_args *args,
870 			 struct sp_session *caller_sp)
871 {
872 	struct sp_session *dst = NULL;
873 	TEE_Result res = FFA_OK;
874 
875 	res = ffa_get_dst(args, caller_sp, &dst);
876 	if (res) {
877 		/* Tried to send message to an incorrect endpoint */
878 		ffa_set_error(args, res);
879 		return caller_sp;
880 	}
881 	if (!dst) {
882 		EMSG("Request to normal world not supported");
883 		ffa_set_error(args, FFA_NOT_SUPPORTED);
884 		return caller_sp;
885 	}
886 
887 	if (dst == caller_sp) {
888 		EMSG("Cannot send message to own ID");
889 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
890 		return caller_sp;
891 	}
892 
893 	if (caller_sp &&
894 	    !(caller_sp->props & FFA_PART_PROP_DIRECT_REQ_SEND)) {
895 		EMSG("SP 0x%"PRIx16" doesn't support sending direct requests",
896 		     caller_sp->endpoint_id);
897 		ffa_set_error(args, FFA_NOT_SUPPORTED);
898 		return caller_sp;
899 	}
900 
901 	if (!(dst->props & FFA_PART_PROP_DIRECT_REQ_RECV)) {
902 		EMSG("SP 0x%"PRIx16" doesn't support receipt of direct requests",
903 		     dst->endpoint_id);
904 		ffa_set_error(args, FFA_NOT_SUPPORTED);
905 		return caller_sp;
906 	}
907 
908 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
909 		switch (args->a2 & FFA_MSG_TYPE_MASK) {
910 		case FFA_MSG_SEND_VM_CREATED:
911 			/* The sender must be the NWd hypervisor (ID 0) */
912 			if (FFA_SRC(args->a1) != 0 || caller_sp) {
913 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
914 				return caller_sp;
915 			}
916 
917 			/* The SP must be subscribed for this message */
918 			if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
919 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
920 				return caller_sp;
921 			}
922 			break;
923 		case FFA_MSG_SEND_VM_DESTROYED:
924 			/* The sender must be the NWd hypervisor (ID 0) */
925 			if (FFA_SRC(args->a1) != 0 || caller_sp) {
926 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
927 				return caller_sp;
928 			}
929 
930 			/* The SP must be subscribed for this message */
931 			if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
932 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
933 				return caller_sp;
934 			}
935 			break;
936 		default:
937 			ffa_set_error(args, FFA_NOT_SUPPORTED);
938 			return caller_sp;
939 		}
940 	} else if (args->a2 != FFA_PARAM_MBZ) {
941 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
942 		return caller_sp;
943 	}
944 
945 	cpu_spin_lock(&dst->spinlock);
946 	if (dst->state != sp_idle) {
947 		DMSG("SP is busy");
948 		ffa_set_error(args, FFA_BUSY);
949 		cpu_spin_unlock(&dst->spinlock);
950 		return caller_sp;
951 	}
952 
953 	dst->state = sp_busy;
954 	cpu_spin_unlock(&dst->spinlock);
955 
956 	/*
957 	 * Store the calling endpoint id. This will make it possible to check
958 	 * if the response is sent back to the correct endpoint.
959 	 */
960 	dst->caller_id = FFA_SRC(args->a1);
961 
962 	/* Forward the message to the destination SP */
963 	res = sp_enter(args, dst);
964 	if (res) {
965 		/* The SP Panicked */
966 		ffa_set_error(args, FFA_ABORTED);
967 		/* Return error to calling SP */
968 		return caller_sp;
969 	}
970 
971 	return dst;
972 }
973 
974 static struct sp_session *
ffa_handle_sp_direct_resp(struct thread_smc_args * args,struct sp_session * caller_sp)975 ffa_handle_sp_direct_resp(struct thread_smc_args *args,
976 			  struct sp_session *caller_sp)
977 {
978 	struct sp_session *dst = NULL;
979 	TEE_Result res = FFA_OK;
980 
981 	if (!caller_sp) {
982 		EMSG("Response from normal world not supported");
983 		ffa_set_error(args, FFA_NOT_SUPPORTED);
984 		return NULL;
985 	}
986 
987 	res = ffa_get_dst(args, caller_sp, &dst);
988 	if (res) {
989 		/* Tried to send response to an incorrect endpoint */
990 		ffa_set_error(args, res);
991 		return caller_sp;
992 	}
993 
994 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
995 		switch (args->a2 & FFA_MSG_TYPE_MASK) {
996 		case FFA_MSG_RESP_VM_CREATED:
997 			/* The destination must be the NWd hypervisor (ID 0) */
998 			if (FFA_DST(args->a1) != 0 || dst) {
999 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1000 				return caller_sp;
1001 			}
1002 
1003 			/* The SP must be subscribed for this message */
1004 			if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
1005 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1006 				return caller_sp;
1007 			}
1008 			break;
1009 		case FFA_MSG_RESP_VM_DESTROYED:
1010 			/* The destination must be the NWd hypervisor (ID 0) */
1011 			if (FFA_DST(args->a1) != 0 || dst) {
1012 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1013 				return caller_sp;
1014 			}
1015 
1016 			/* The SP must be subscribed for this message */
1017 			if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
1018 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1019 				return caller_sp;
1020 			}
1021 			break;
1022 		default:
1023 			ffa_set_error(args, FFA_NOT_SUPPORTED);
1024 			return caller_sp;
1025 		}
1026 	} else if (args->a2 != FFA_PARAM_MBZ) {
1027 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
1028 		return caller_sp;
1029 	}
1030 
1031 	if (dst && dst->state != sp_busy) {
1032 		EMSG("SP is not waiting for a request");
1033 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
1034 		return caller_sp;
1035 	}
1036 
1037 	if (caller_sp->caller_id != FFA_DST(args->a1)) {
1038 		EMSG("FFA_MSG_SEND_DIRECT_RESP to incorrect SP");
1039 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
1040 		return caller_sp;
1041 	}
1042 
1043 	caller_sp->caller_id = 0;
1044 
1045 	cpu_spin_lock(&caller_sp->spinlock);
1046 	caller_sp->state = sp_idle;
1047 	cpu_spin_unlock(&caller_sp->spinlock);
1048 
1049 	if (!dst) {
1050 		/* Send message back to the NW */
1051 		return NULL;
1052 	}
1053 
1054 	/* Forward the message to the destination SP */
1055 	res = sp_enter(args, dst);
1056 	if (res) {
1057 		/* The SP Panicked */
1058 		ffa_set_error(args, FFA_ABORTED);
1059 		/* Return error to calling SP */
1060 		return caller_sp;
1061 	}
1062 	return dst;
1063 }
1064 
1065 static struct sp_session *
ffa_handle_sp_error(struct thread_smc_args * args,struct sp_session * caller_sp)1066 ffa_handle_sp_error(struct thread_smc_args *args,
1067 		    struct sp_session *caller_sp)
1068 {
1069 	/* If caller_sp == NULL send message to Normal World */
1070 	if (caller_sp && sp_enter(args, caller_sp)) {
1071 		/*
1072 		 * We can not return the error. Unwind the call chain with one
1073 		 * link. Set the state of the SP to dead.
1074 		 */
1075 		caller_sp->state = sp_dead;
1076 		/* Create error. */
1077 		ffa_set_error(args, FFA_ABORTED);
1078 		return  sp_get_session(caller_sp->caller_id);
1079 	}
1080 
1081 	return caller_sp;
1082 }
1083 
handle_features(struct thread_smc_args * args)1084 static void handle_features(struct thread_smc_args *args)
1085 {
1086 	uint32_t ret_fid = 0;
1087 	uint32_t ret_w2 = FFA_PARAM_MBZ;
1088 
1089 	switch (args->a1) {
1090 #ifdef ARM64
1091 	case FFA_RXTX_MAP_64:
1092 #endif
1093 	case FFA_RXTX_MAP_32:
1094 		ret_fid = FFA_SUCCESS_32;
1095 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
1096 		break;
1097 	case FFA_ERROR:
1098 	case FFA_VERSION:
1099 	case FFA_SUCCESS_32:
1100 #ifdef ARM64
1101 	case FFA_SUCCESS_64:
1102 #endif
1103 	default:
1104 		ret_fid = FFA_ERROR;
1105 		ret_w2 = FFA_NOT_SUPPORTED;
1106 		break;
1107 	}
1108 
1109 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
1110 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1111 }
1112 
handle_mem_perm_get(struct thread_smc_args * args,struct sp_session * sp_s)1113 static void handle_mem_perm_get(struct thread_smc_args *args,
1114 				struct sp_session *sp_s)
1115 {
1116 	struct sp_ctx *sp_ctx = NULL;
1117 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1118 	uint16_t attrs = 0;
1119 	uint32_t ret_fid = FFA_ERROR;
1120 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1121 
1122 	/*
1123 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1124 	 */
1125 	if (sp_s->is_initialized) {
1126 		ret_val = FFA_DENIED;
1127 		goto out;
1128 	}
1129 
1130 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1131 	if (!sp_ctx)
1132 		goto out;
1133 
1134 	/* Query memory attributes */
1135 	ts_push_current_session(&sp_s->ts_sess);
1136 	res = vm_get_prot(&sp_ctx->uctx, args->a1, SMALL_PAGE_SIZE, &attrs);
1137 	ts_pop_current_session();
1138 	if (res)
1139 		goto out;
1140 
1141 	/* Build response value */
1142 	ret_fid = FFA_SUCCESS_32;
1143 	ret_val = 0;
1144 	if ((attrs & TEE_MATTR_URW) == TEE_MATTR_URW)
1145 		ret_val |= FFA_MEM_PERM_RW;
1146 	else if (attrs & TEE_MATTR_UR)
1147 		ret_val |= FFA_MEM_PERM_RO;
1148 
1149 	if ((attrs & TEE_MATTR_UX) == 0)
1150 		ret_val |= FFA_MEM_PERM_NX;
1151 
1152 out:
1153 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1154 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1155 }
1156 
handle_mem_perm_set(struct thread_smc_args * args,struct sp_session * sp_s)1157 static void handle_mem_perm_set(struct thread_smc_args *args,
1158 				struct sp_session *sp_s)
1159 {
1160 	struct sp_ctx *sp_ctx = NULL;
1161 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1162 	size_t region_size = 0;
1163 	uint32_t data_perm = 0;
1164 	uint32_t instruction_perm = 0;
1165 	uint16_t attrs = 0;
1166 	uint32_t ret_fid = FFA_ERROR;
1167 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1168 
1169 	/*
1170 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1171 	 */
1172 	if (sp_s->is_initialized) {
1173 		ret_val = FFA_DENIED;
1174 		goto out;
1175 	}
1176 
1177 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1178 	if (!sp_ctx)
1179 		goto out;
1180 
1181 	if (MUL_OVERFLOW(args->a2, SMALL_PAGE_SIZE, &region_size))
1182 		goto out;
1183 
1184 	if (args->a3 & FFA_MEM_PERM_RESERVED) {
1185 		/* Non-zero reserved bits */
1186 		goto out;
1187 	}
1188 
1189 	data_perm = args->a3 & FFA_MEM_PERM_DATA_PERM;
1190 	instruction_perm = args->a3 & FFA_MEM_PERM_INSTRUCTION_PERM;
1191 
1192 	/* RWX access right configuration is not permitted */
1193 	if (data_perm == FFA_MEM_PERM_RW && instruction_perm == FFA_MEM_PERM_X)
1194 		goto out;
1195 
1196 	switch (data_perm) {
1197 	case FFA_MEM_PERM_RO:
1198 		attrs = TEE_MATTR_UR;
1199 		break;
1200 	case FFA_MEM_PERM_RW:
1201 		attrs = TEE_MATTR_URW;
1202 		break;
1203 	default:
1204 		/* Invalid permission value */
1205 		goto out;
1206 	}
1207 
1208 	if (instruction_perm == FFA_MEM_PERM_X)
1209 		attrs |= TEE_MATTR_UX;
1210 
1211 	/* Set access rights */
1212 	ts_push_current_session(&sp_s->ts_sess);
1213 	res = vm_set_prot(&sp_ctx->uctx, args->a1, region_size, attrs);
1214 	ts_pop_current_session();
1215 	if (res != TEE_SUCCESS)
1216 		goto out;
1217 
1218 	ret_fid = FFA_SUCCESS_32;
1219 	ret_val = FFA_PARAM_MBZ;
1220 
1221 out:
1222 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1223 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1224 }
1225 
spmc_handle_version(struct thread_smc_args * args,struct ffa_rxtx * rxtx)1226 static void spmc_handle_version(struct thread_smc_args *args,
1227 				struct ffa_rxtx *rxtx)
1228 {
1229 	spmc_set_args(args, spmc_exchange_version(args->a1, rxtx),
1230 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1231 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1232 }
1233 
handle_console_log(struct thread_smc_args * args)1234 static void handle_console_log(struct thread_smc_args *args)
1235 {
1236 	uint32_t ret_fid = FFA_ERROR;
1237 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1238 	size_t char_count = args->a1 & FFA_CONSOLE_LOG_CHAR_COUNT_MASK;
1239 	const void *reg_list[] = {
1240 		&args->a2, &args->a3, &args->a4,
1241 		&args->a5, &args->a6, &args->a7
1242 	};
1243 	char buffer[FFA_CONSOLE_LOG_64_MAX_MSG_LEN + 1] = { 0 };
1244 	size_t max_length = 0;
1245 	size_t reg_size = 0;
1246 	size_t n = 0;
1247 
1248 	if (args->a0 == FFA_CONSOLE_LOG_64) {
1249 		max_length = FFA_CONSOLE_LOG_64_MAX_MSG_LEN;
1250 		reg_size = sizeof(uint64_t);
1251 	} else {
1252 		max_length = FFA_CONSOLE_LOG_32_MAX_MSG_LEN;
1253 		reg_size = sizeof(uint32_t);
1254 	}
1255 
1256 	if (char_count < 1 || char_count > max_length)
1257 		goto out;
1258 
1259 	for (n = 0; n < char_count; n += reg_size)
1260 		memcpy(buffer + n, reg_list[n / reg_size],
1261 		       MIN(char_count - n, reg_size));
1262 
1263 	buffer[char_count] = '\0';
1264 
1265 	trace_ext_puts(buffer);
1266 
1267 	ret_fid = FFA_SUCCESS_32;
1268 	ret_val = FFA_PARAM_MBZ;
1269 
1270 out:
1271 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1272 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1273 }
1274 
1275 /*
1276  * FF-A messages handler for SP. Every messages for or from a SP is handled
1277  * here. This is the entry of the sp_spmc kernel thread. The caller_sp is set
1278  * to NULL when it is the Normal World.
1279  */
spmc_sp_msg_handler(struct thread_smc_args * args,struct sp_session * caller_sp)1280 void spmc_sp_msg_handler(struct thread_smc_args *args,
1281 			 struct sp_session *caller_sp)
1282 {
1283 	thread_check_canaries();
1284 	do {
1285 		switch (args->a0) {
1286 #ifdef ARM64
1287 		case FFA_MSG_SEND_DIRECT_REQ_64:
1288 #endif
1289 		case FFA_MSG_SEND_DIRECT_REQ_32:
1290 			caller_sp = ffa_handle_sp_direct_req(args, caller_sp);
1291 			break;
1292 #ifdef ARM64
1293 		case FFA_MSG_SEND_DIRECT_RESP_64:
1294 #endif
1295 		case FFA_MSG_SEND_DIRECT_RESP_32:
1296 			caller_sp = ffa_handle_sp_direct_resp(args, caller_sp);
1297 			break;
1298 		case FFA_ERROR:
1299 			caller_sp = ffa_handle_sp_error(args, caller_sp);
1300 			break;
1301 		case FFA_MSG_WAIT:
1302 			/* FFA_WAIT gives control back to NW */
1303 			cpu_spin_lock(&caller_sp->spinlock);
1304 			caller_sp->state = sp_idle;
1305 			cpu_spin_unlock(&caller_sp->spinlock);
1306 			caller_sp = NULL;
1307 			break;
1308 #ifdef ARM64
1309 		case FFA_RXTX_MAP_64:
1310 #endif
1311 		case FFA_RXTX_MAP_32:
1312 			ts_push_current_session(&caller_sp->ts_sess);
1313 			spmc_handle_rxtx_map(args, &caller_sp->rxtx);
1314 			ts_pop_current_session();
1315 			sp_enter(args, caller_sp);
1316 			break;
1317 		case FFA_RXTX_UNMAP:
1318 			ts_push_current_session(&caller_sp->ts_sess);
1319 			spmc_handle_rxtx_unmap(args, &caller_sp->rxtx);
1320 			ts_pop_current_session();
1321 			sp_enter(args, caller_sp);
1322 			break;
1323 		case FFA_RX_RELEASE:
1324 			ts_push_current_session(&caller_sp->ts_sess);
1325 			spmc_handle_rx_release(args, &caller_sp->rxtx);
1326 			ts_pop_current_session();
1327 			sp_enter(args, caller_sp);
1328 			break;
1329 		case FFA_ID_GET:
1330 			args->a0 = FFA_SUCCESS_32;
1331 			args->a2 = caller_sp->endpoint_id;
1332 			sp_enter(args, caller_sp);
1333 			break;
1334 		case FFA_VERSION:
1335 			spmc_handle_version(args, &caller_sp->rxtx);
1336 			sp_enter(args, caller_sp);
1337 			break;
1338 		case FFA_FEATURES:
1339 			handle_features(args);
1340 			sp_enter(args, caller_sp);
1341 			break;
1342 		case FFA_SPM_ID_GET:
1343 			spmc_handle_spm_id_get(args);
1344 			sp_enter(args, caller_sp);
1345 			break;
1346 		case FFA_PARTITION_INFO_GET:
1347 			ts_push_current_session(&caller_sp->ts_sess);
1348 			spmc_handle_partition_info_get(args, &caller_sp->rxtx);
1349 			ts_pop_current_session();
1350 			sp_enter(args, caller_sp);
1351 			break;
1352 #ifdef ARM64
1353 		case FFA_MEM_SHARE_64:
1354 #endif
1355 		case FFA_MEM_SHARE_32:
1356 			ts_push_current_session(&caller_sp->ts_sess);
1357 			spmc_sp_handle_mem_share(args, &caller_sp->rxtx,
1358 						 caller_sp);
1359 			ts_pop_current_session();
1360 			sp_enter(args, caller_sp);
1361 			break;
1362 #ifdef ARM64
1363 		case FFA_MEM_RETRIEVE_REQ_64:
1364 #endif
1365 		case FFA_MEM_RETRIEVE_REQ_32:
1366 			ts_push_current_session(&caller_sp->ts_sess);
1367 			ffa_mem_retrieve(args, caller_sp, &caller_sp->rxtx);
1368 			ts_pop_current_session();
1369 			sp_enter(args, caller_sp);
1370 			break;
1371 		case FFA_MEM_RELINQUISH:
1372 			ts_push_current_session(&caller_sp->ts_sess);
1373 			ffa_mem_relinquish(args, caller_sp, &caller_sp->rxtx);
1374 			ts_pop_current_session();
1375 			sp_enter(args, caller_sp);
1376 			break;
1377 		case FFA_MEM_RECLAIM:
1378 			ffa_mem_reclaim(args, caller_sp);
1379 			sp_enter(args, caller_sp);
1380 			break;
1381 #ifdef ARM64
1382 		case FFA_MEM_PERM_GET_64:
1383 #endif
1384 		case FFA_MEM_PERM_GET_32:
1385 			handle_mem_perm_get(args, caller_sp);
1386 			sp_enter(args, caller_sp);
1387 			break;
1388 
1389 #ifdef ARM64
1390 		case FFA_MEM_PERM_SET_64:
1391 #endif
1392 		case FFA_MEM_PERM_SET_32:
1393 			handle_mem_perm_set(args, caller_sp);
1394 			sp_enter(args, caller_sp);
1395 			break;
1396 
1397 #ifdef ARM64
1398 		case FFA_CONSOLE_LOG_64:
1399 #endif
1400 		case FFA_CONSOLE_LOG_32:
1401 			handle_console_log(args);
1402 			sp_enter(args, caller_sp);
1403 			break;
1404 
1405 		default:
1406 			EMSG("Unhandled FFA function ID %#"PRIx32,
1407 			     (uint32_t)args->a0);
1408 			ffa_set_error(args, FFA_INVALID_PARAMETERS);
1409 			sp_enter(args, caller_sp);
1410 		}
1411 	} while (caller_sp);
1412 }
1413