1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2022, Arm Limited
4  */
5 #include <assert.h>
6 #include <bench.h>
7 #include <io.h>
8 #include <kernel/panic.h>
9 #include <kernel/secure_partition.h>
10 #include <kernel/spinlock.h>
11 #include <kernel/spmc_sp_handler.h>
12 #include <kernel/tee_misc.h>
13 #include <kernel/thread_private.h>
14 #include <mm/mobj.h>
15 #include <mm/sp_mem.h>
16 #include <mm/vm.h>
17 #include <optee_ffa.h>
18 #include <string.h>
19 
20 static unsigned int mem_ref_lock = SPINLOCK_UNLOCK;
21 
spmc_sp_start_thread(struct thread_smc_args * args)22 void spmc_sp_start_thread(struct thread_smc_args *args)
23 {
24 	thread_sp_alloc_and_run(args);
25 }
26 
ffa_set_error(struct thread_smc_args * args,uint32_t error)27 static void ffa_set_error(struct thread_smc_args *args, uint32_t error)
28 {
29 	args->a0 = FFA_ERROR;
30 	args->a2 = error;
31 }
32 
ffa_success(struct thread_smc_args * args)33 static void ffa_success(struct thread_smc_args *args)
34 {
35 	args->a0 = FFA_SUCCESS_32;
36 }
37 
ffa_get_dst(struct thread_smc_args * args,struct sp_session * caller,struct sp_session ** dst)38 static TEE_Result ffa_get_dst(struct thread_smc_args *args,
39 			      struct sp_session *caller,
40 			      struct sp_session **dst)
41 {
42 	struct sp_session *s = NULL;
43 
44 	if (args->a2 != FFA_PARAM_MBZ)
45 		return FFA_INVALID_PARAMETERS;
46 
47 	s = sp_get_session(FFA_DST(args->a1));
48 
49 	/* Message came from the NW */
50 	if (!caller) {
51 		if (!s) {
52 			EMSG("Neither destination nor source is a SP");
53 			return FFA_INVALID_PARAMETERS;
54 		}
55 	} else {
56 		/* Check if the source matches the endpoint we came from */
57 		if (FFA_SRC(args->a1) != caller->endpoint_id) {
58 			EMSG("Source address doesn't match the endpoint id");
59 			return FFA_INVALID_PARAMETERS;
60 		}
61 	}
62 
63 	*dst = s;
64 
65 	return FFA_OK;
66 }
67 
find_sp_mem_receiver(struct sp_session * s,struct sp_mem * smem)68 static struct sp_mem_receiver *find_sp_mem_receiver(struct sp_session *s,
69 						    struct sp_mem *smem)
70 {
71 	struct sp_mem_receiver *receiver = NULL;
72 
73 	/*
74 	 * FF-A Spec 8.10.2:
75 	 * Each Handle identifies a single unique composite memory region
76 	 * description that is, there is a 1:1 mapping between the two.
77 	 *
78 	 * Each memory share has an unique handle. We can only have each SP
79 	 * once as a receiver in the memory share. For each receiver of a
80 	 * memory share, we have one sp_mem_access_descr object.
81 	 * This means that there can only be one SP linked to a specific
82 	 * struct sp_mem_access_descr.
83 	 */
84 	SLIST_FOREACH(receiver, &smem->receivers, link) {
85 		if (receiver->perm.endpoint_id == s->endpoint_id)
86 			break;
87 	}
88 	return receiver;
89 }
90 
add_mem_region_to_sp(struct ffa_mem_access * mem_acc,struct sp_mem * smem)91 static int add_mem_region_to_sp(struct ffa_mem_access *mem_acc,
92 				struct sp_mem *smem)
93 {
94 	struct ffa_mem_access_perm *access_perm = &mem_acc->access_perm;
95 	struct sp_session *s = NULL;
96 	struct sp_mem_receiver *receiver = NULL;
97 	uint8_t perm = READ_ONCE(access_perm->perm);
98 	uint16_t endpoint_id = READ_ONCE(access_perm->endpoint_id);
99 
100 	s = sp_get_session(endpoint_id);
101 
102 	/* Only add memory shares of loaded SPs */
103 	if (!s)
104 		return FFA_DENIED;
105 
106 	/* Only allow each endpoint once */
107 	if (find_sp_mem_receiver(s, smem))
108 		return FFA_DENIED;
109 
110 	if (perm & ~FFA_MEM_ACC_MASK)
111 		return FFA_DENIED;
112 
113 	receiver = calloc(1, sizeof(struct sp_mem_receiver));
114 	if (!receiver)
115 		return FFA_NO_MEMORY;
116 
117 	receiver->smem = smem;
118 
119 	receiver->perm.endpoint_id = endpoint_id;
120 	receiver->perm.perm = perm;
121 	receiver->perm.flags = READ_ONCE(access_perm->flags);
122 
123 	SLIST_INSERT_HEAD(&smem->receivers, receiver, link);
124 
125 	return FFA_OK;
126 }
127 
spmc_sp_handle_mem_share(struct thread_smc_args * args,struct ffa_rxtx * rxtx,struct sp_session * owner_sp)128 static void spmc_sp_handle_mem_share(struct thread_smc_args *args,
129 				     struct ffa_rxtx *rxtx,
130 				     struct sp_session *owner_sp)
131 {
132 	uint64_t global_handle = 0;
133 	int res = FFA_OK;
134 	uint32_t ret_w2 = 0;
135 	uint32_t ret_w3 = 0;
136 
137 	cpu_spin_lock(&rxtx->spinlock);
138 
139 	res = spmc_sp_add_share(rxtx, args->a1, &global_handle, owner_sp);
140 	if (!res) {
141 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
142 		args->a3 = ret_w3;
143 		args->a2 = ret_w2;
144 		args->a1 = FFA_PARAM_MBZ;
145 		args->a0 = FFA_SUCCESS_32;
146 	} else {
147 		ffa_set_error(args, res);
148 	}
149 
150 	cpu_spin_unlock(&rxtx->spinlock);
151 }
152 
spmc_sp_add_sp_region(struct sp_mem * smem,struct ffa_address_range * mem_reg,struct sp_session * owner_sp,uint8_t highest_permission)153 static int spmc_sp_add_sp_region(struct sp_mem *smem,
154 				 struct ffa_address_range *mem_reg,
155 				 struct sp_session *owner_sp,
156 				 uint8_t highest_permission)
157 {
158 	struct sp_ctx *sp_ctx = NULL;
159 	uint64_t va = READ_ONCE(mem_reg->address);
160 	int res = FFA_OK;
161 	uint64_t region_len = READ_ONCE(mem_reg->page_count) * SMALL_PAGE_SIZE;
162 	struct mobj *mobj = NULL;
163 
164 	sp_ctx = to_sp_ctx(owner_sp->ts_sess.ctx);
165 
166 	/*
167 	 * The memory region we try to share might not be linked to just one
168 	 * mobj. Create a new region for each mobj.
169 	 */
170 	while (region_len) {
171 		size_t len = region_len;
172 		struct sp_mem_map_region *region = NULL;
173 		uint16_t prot = 0;
174 		size_t offs = 0;
175 
176 		/*
177 		 * There is already a mobj for each address that is in the SPs
178 		 * address range.
179 		 */
180 		mobj = vm_get_mobj(&sp_ctx->uctx, va, &len, &prot, &offs);
181 		if (!mobj)
182 			return FFA_DENIED;
183 
184 		/*
185 		 * If we share memory from a SP, check if we are not sharing
186 		 * with a higher permission than the memory was originally
187 		 * mapped.
188 		 */
189 		if ((highest_permission & FFA_MEM_ACC_RW) &&
190 		    !(prot & TEE_MATTR_UW)) {
191 			res = FFA_DENIED;
192 			goto err;
193 		}
194 
195 		if ((highest_permission & FFA_MEM_ACC_EXE) &&
196 		    !(prot & TEE_MATTR_UX)) {
197 			res = FFA_DENIED;
198 			goto err;
199 		}
200 
201 		region = calloc(1, sizeof(*region));
202 		region->mobj = mobj;
203 		region->page_offset = offs;
204 		region->page_count = len / SMALL_PAGE_SIZE;
205 
206 		if (!sp_has_exclusive_access(region, &sp_ctx->uctx)) {
207 			free(region);
208 			res = FFA_DENIED;
209 			goto err;
210 		}
211 
212 		va += len;
213 		region_len -= len;
214 		SLIST_INSERT_HEAD(&smem->regions, region, link);
215 	}
216 
217 	return FFA_OK;
218 err:
219 	mobj_put(mobj);
220 
221 	return res;
222 }
223 
spmc_sp_add_nw_region(struct sp_mem * smem,struct ffa_mem_region * mem_reg)224 static int spmc_sp_add_nw_region(struct sp_mem *smem,
225 				 struct ffa_mem_region *mem_reg)
226 {
227 	uint64_t page_count = READ_ONCE(mem_reg->total_page_count);
228 	struct sp_mem_map_region *region = NULL;
229 	struct mobj *m = sp_mem_new_mobj(page_count, TEE_MATTR_MEM_TYPE_CACHED,
230 					 false);
231 	unsigned int i = 0;
232 	unsigned int idx = 0;
233 	int res = FFA_OK;
234 	uint64_t address_count = READ_ONCE(mem_reg->address_range_count);
235 
236 	if (!m)
237 		return FFA_NO_MEMORY;
238 
239 	for (i = 0; i < address_count; i++) {
240 		struct ffa_address_range *addr_range = NULL;
241 
242 		addr_range = &mem_reg->address_range_array[i];
243 		if (sp_mem_add_pages(m, &idx,
244 				     READ_ONCE(addr_range->address),
245 				     READ_ONCE(addr_range->page_count))) {
246 			res = FFA_DENIED;
247 			goto clean_up;
248 		}
249 	}
250 
251 	region = calloc(1, sizeof(*region));
252 	if (!region) {
253 		res = FFA_NO_MEMORY;
254 		goto clean_up;
255 	}
256 
257 	region->mobj = m;
258 	region->page_count = page_count;
259 
260 	if (!sp_has_exclusive_access(region, NULL)) {
261 		free(region);
262 		res = FFA_DENIED;
263 		goto clean_up;
264 	}
265 
266 	SLIST_INSERT_HEAD(&smem->regions, region, link);
267 	return FFA_OK;
268 clean_up:
269 	mobj_put(m);
270 	return res;
271 }
272 
spmc_sp_add_share(struct ffa_rxtx * rxtx,size_t blen,uint64_t * global_handle,struct sp_session * owner_sp)273 int spmc_sp_add_share(struct ffa_rxtx *rxtx,
274 		      size_t blen, uint64_t *global_handle,
275 		      struct sp_session *owner_sp)
276 {
277 	int res = FFA_INVALID_PARAMETERS;
278 	unsigned int num_mem_accs = 0;
279 	unsigned int i = 0;
280 	struct ffa_mem_access *mem_acc = NULL;
281 	size_t needed_size = 0;
282 	size_t addr_range_offs = 0;
283 	struct ffa_mem_region *mem_reg = NULL;
284 	uint8_t highest_permission = 0;
285 	struct sp_mem *smem = sp_mem_new();
286 	struct ffa_mem_transaction *input_descr = rxtx->rx;
287 	uint16_t sender_id = READ_ONCE(input_descr->sender_id);
288 
289 	if (!smem)
290 		return FFA_NO_MEMORY;
291 
292 	if ((owner_sp && owner_sp->endpoint_id != sender_id) ||
293 	    (!owner_sp && sp_get_session(sender_id))) {
294 		res = FFA_DENIED;
295 		goto cleanup;
296 	}
297 
298 	num_mem_accs = READ_ONCE(input_descr->mem_access_count);
299 	mem_acc = input_descr->mem_access_array;
300 
301 	if (!num_mem_accs) {
302 		res = FFA_DENIED;
303 		goto cleanup;
304 	}
305 
306 	/* Store the ffa_mem_transaction */
307 	smem->sender_id = sender_id;
308 	smem->mem_reg_attr = READ_ONCE(input_descr->mem_reg_attr);
309 	smem->flags = READ_ONCE(input_descr->flags);
310 	smem->tag = READ_ONCE(input_descr->tag);
311 
312 	if (MUL_OVERFLOW(num_mem_accs, sizeof(*mem_acc), &needed_size) ||
313 	    ADD_OVERFLOW(needed_size, sizeof(*input_descr), &needed_size) ||
314 	    needed_size > blen) {
315 		res = FFA_NO_MEMORY;
316 		goto cleanup;
317 	}
318 
319 	for (i = 0; i < num_mem_accs; i++)
320 		highest_permission |= READ_ONCE(mem_acc[i].access_perm.perm);
321 
322 	addr_range_offs = READ_ONCE(mem_acc[0].region_offs);
323 	mem_reg = (void *)((char *)input_descr + addr_range_offs);
324 
325 	/* Iterate over all the addresses */
326 	if (owner_sp) {
327 		size_t address_range = READ_ONCE(mem_reg->address_range_count);
328 
329 		for (i = 0; i < address_range; i++) {
330 			struct ffa_address_range *addr_range = NULL;
331 
332 			addr_range = &mem_reg->address_range_array[i];
333 
334 			if (!core_is_buffer_inside((vaddr_t)addr_range,
335 						   sizeof(*addr_range),
336 						   (vaddr_t)rxtx->rx,
337 						   rxtx->size)) {
338 				res = FFA_NO_MEMORY;
339 				goto cleanup;
340 			}
341 			res = spmc_sp_add_sp_region(smem, addr_range,
342 						    owner_sp,
343 						    highest_permission);
344 			if (res)
345 				goto cleanup;
346 		}
347 	} else {
348 		res = spmc_sp_add_nw_region(smem, mem_reg);
349 		if (res)
350 			goto cleanup;
351 	}
352 
353 	/* Add the memory address to the SP */
354 	for (i = 0; i < num_mem_accs; i++) {
355 		res = add_mem_region_to_sp(&mem_acc[i], smem);
356 		if (res)
357 			goto cleanup;
358 	}
359 	*global_handle = smem->global_handle;
360 	sp_mem_add(smem);
361 
362 	return FFA_OK;
363 
364 cleanup:
365 	sp_mem_remove(smem);
366 	return res;
367 }
368 
check_rxtx(struct ffa_rxtx * rxtx)369 static bool check_rxtx(struct ffa_rxtx *rxtx)
370 {
371 	return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0;
372 }
373 
check_retrieve_request(struct sp_mem_receiver * receiver,struct ffa_mem_transaction * retr_dsc,struct sp_mem * smem,int64_t tx_len)374 static TEE_Result check_retrieve_request(struct sp_mem_receiver *receiver,
375 					 struct ffa_mem_transaction *retr_dsc,
376 					 struct sp_mem *smem, int64_t tx_len)
377 {
378 	struct ffa_mem_access *retr_access = NULL;
379 	uint8_t share_perm = receiver->perm.perm;
380 	uint32_t retr_perm = 0;
381 	uint32_t retr_flags = READ_ONCE(retr_dsc->flags);
382 	uint64_t retr_tag = READ_ONCE(retr_dsc->tag);
383 	struct sp_mem_map_region *reg = NULL;
384 
385 	/*
386 	 * The request came from the endpoint. It should only have one
387 	 * ffa_mem_access element
388 	 */
389 	if (READ_ONCE(retr_dsc->mem_access_count) != 1)
390 		return TEE_ERROR_BAD_PARAMETERS;
391 
392 	retr_access = retr_dsc->mem_access_array;
393 	retr_perm = READ_ONCE(retr_access->access_perm.perm);
394 
395 	/* Check if tag is correct */
396 	if (receiver->smem->tag != retr_tag) {
397 		EMSG("Incorrect tag %#"PRIx64" %#"PRIx64, receiver->smem->tag,
398 		     retr_tag);
399 		return TEE_ERROR_BAD_PARAMETERS;
400 	}
401 
402 	/* Check permissions and flags */
403 	if ((retr_perm & FFA_MEM_ACC_RW) &&
404 	    !(share_perm & FFA_MEM_ACC_RW)) {
405 		DMSG("Incorrect memshare permission set");
406 		return TEE_ERROR_BAD_PARAMETERS;
407 	}
408 
409 	if ((retr_perm & FFA_MEM_ACC_EXE) &&
410 	    !(share_perm & FFA_MEM_ACC_EXE)) {
411 		DMSG("Incorrect memshare permission set");
412 		return TEE_ERROR_BAD_PARAMETERS;
413 	}
414 
415 	if (retr_flags & FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) {
416 		DMSG("CLEAR_RELINQUISH is not allowed for FFA_SHARE");
417 		return TEE_ERROR_BAD_PARAMETERS;
418 	}
419 
420 	/*
421 	 * Check if there is enough space in the tx buffer to send the respons.
422 	 */
423 	tx_len -= sizeof(struct ffa_mem_transaction) +
424 		  sizeof(struct ffa_mem_access) +
425 		  sizeof(struct ffa_mem_region);
426 
427 	if (tx_len < 0)
428 		return FFA_NO_MEMORY;
429 
430 	SLIST_FOREACH(reg, &smem->regions, link) {
431 		tx_len -= sizeof(struct ffa_address_range);
432 		if (tx_len < 0)
433 			return FFA_NO_MEMORY;
434 	}
435 
436 	return TEE_SUCCESS;
437 }
438 
create_retrieve_response(void * dst_buffer,struct sp_mem_receiver * receiver,struct sp_mem * smem,struct sp_session * s)439 static void create_retrieve_response(void *dst_buffer,
440 				     struct sp_mem_receiver *receiver,
441 				     struct sp_mem *smem, struct sp_session *s)
442 {
443 	size_t off = 0;
444 	struct ffa_mem_region *dst_region =  NULL;
445 	struct ffa_mem_transaction *d_ds = dst_buffer;
446 	struct ffa_address_range *addr_dst = NULL;
447 	struct sp_mem_map_region *reg = NULL;
448 
449 	/*
450 	 * We respond with a FFA_MEM_RETRIEVE_RESP which defines the
451 	 * following data in the rx buffer of the SP.
452 	 * struct mem_transaction_descr
453 	 * struct mem_access_descr (always 1 Element)
454 	 * struct mem_region_descr
455 	 */
456 	/* Copy the mem_transaction_descr */
457 	d_ds->sender_id = receiver->smem->sender_id;
458 	d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
459 	d_ds->flags = receiver->smem->flags;
460 	d_ds->tag = receiver->smem->tag;
461 
462 	off = sizeof(struct ffa_mem_transaction) +
463 	      sizeof(struct ffa_mem_access);
464 
465 	d_ds->mem_access_count = 1;
466 
467 	/* Copy the mem_accsess_descr */
468 	d_ds->mem_access_array[0].region_offs = off;
469 	memcpy(&d_ds->mem_access_array[0].access_perm,
470 	       &receiver->perm, sizeof(struct ffa_mem_access_perm));
471 
472 	/* Copy the mem_region_descr */
473 	dst_region = (struct ffa_mem_region *)((vaddr_t)d_ds + off);
474 
475 	dst_region->address_range_count = 0;
476 	dst_region->total_page_count = 0;
477 
478 	addr_dst = dst_region->address_range_array;
479 
480 	SLIST_FOREACH(reg, &smem->regions, link) {
481 		uint32_t offset = reg->page_offset;
482 		struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
483 
484 		addr_dst->address = (uint64_t)sp_mem_get_va(&ctx->uctx,
485 							    offset,
486 							    reg->mobj);
487 		addr_dst->page_count = reg->page_count;
488 		dst_region->address_range_count++;
489 
490 		dst_region->total_page_count += addr_dst->page_count;
491 	}
492 }
493 
ffa_mem_retrieve(struct thread_smc_args * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)494 static void ffa_mem_retrieve(struct thread_smc_args *args,
495 			     struct sp_session *caller_sp,
496 			     struct ffa_rxtx *rxtx)
497 {
498 	int ret = FFA_OK;
499 	size_t tx_len = 0;
500 	struct ffa_mem_transaction *retr_dsc = NULL;
501 	struct ffa_mem_region *mem_region = NULL;
502 	uint64_t va = 0;
503 	struct sp_mem *smem = NULL;
504 	struct sp_mem_receiver *receiver = NULL;
505 	uint32_t exceptions = 0;
506 	uint32_t address_offset = 0;
507 	size_t needed_size = 0;
508 
509 	if (!check_rxtx(rxtx) || !rxtx->tx_is_mine) {
510 		ret = FFA_DENIED;
511 		goto err;
512 	}
513 
514 	tx_len = rxtx->size;
515 	retr_dsc = rxtx->rx;
516 
517 	smem = sp_mem_get(retr_dsc->global_handle);
518 	if (!smem) {
519 		DMSG("Incorrect handle");
520 		ret = FFA_DENIED;
521 		goto err;
522 	}
523 
524 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
525 
526 	address_offset = READ_ONCE(retr_dsc->mem_access_array[0].region_offs);
527 
528 	if (ADD_OVERFLOW(address_offset, sizeof(struct ffa_mem_region),
529 			 &needed_size) || needed_size > tx_len) {
530 		ret = FFA_INVALID_PARAMETERS;
531 		goto err;
532 	}
533 
534 	if (check_retrieve_request(receiver, retr_dsc, smem, tx_len) !=
535 	    TEE_SUCCESS) {
536 		ret = FFA_INVALID_PARAMETERS;
537 		goto err;
538 	}
539 
540 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
541 
542 	if (receiver->ref_count == UINT8_MAX) {
543 		ret = FFA_DENIED;
544 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
545 		goto err;
546 	}
547 
548 	receiver->ref_count++;
549 
550 	/* We only need to map the region the first time we request it. */
551 	if (receiver->ref_count == 1) {
552 		TEE_Result ret_map = TEE_SUCCESS;
553 
554 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
555 
556 		/*
557 		 * Try to map the memory linked to the handle in
558 		 * sp_mem_access_descr.
559 		 */
560 		mem_region = (struct ffa_mem_region *)((vaddr_t)retr_dsc +
561 						       address_offset);
562 
563 		va = READ_ONCE(mem_region->address_range_array[0].address);
564 		ret_map = sp_map_shared(caller_sp, receiver, smem,  &va);
565 
566 		if (ret_map) {
567 			EMSG("Could not map memory region: %#"PRIx32, ret_map);
568 			exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
569 			receiver->ref_count--;
570 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
571 			ret = FFA_DENIED;
572 			goto err;
573 		}
574 	} else {
575 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
576 	}
577 
578 	create_retrieve_response(rxtx->tx, receiver, smem, caller_sp);
579 
580 	args->a0 = FFA_MEM_RETRIEVE_RESP;
581 	args->a1 = tx_len;
582 	args->a2 = tx_len;
583 
584 	rxtx->tx_is_mine = false;
585 
586 	return;
587 err:
588 	ffa_set_error(args, ret);
589 }
590 
ffa_mem_relinquish(struct thread_smc_args * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)591 static void ffa_mem_relinquish(struct thread_smc_args *args,
592 			       struct sp_session *caller_sp,
593 			       struct ffa_rxtx  *rxtx)
594 {
595 	struct sp_mem *smem = NULL;
596 	struct ffa_mem_relinquish *mem = rxtx->rx;
597 	struct sp_mem_receiver *receiver = NULL;
598 	int err = FFA_NOT_SUPPORTED;
599 	uint32_t exceptions = 0;
600 
601 	if (!check_rxtx(rxtx)) {
602 		ffa_set_error(args, FFA_DENIED);
603 		return;
604 	}
605 
606 	exceptions = cpu_spin_lock_xsave(&rxtx->spinlock);
607 	smem = sp_mem_get(READ_ONCE(mem->handle));
608 
609 	if (!smem) {
610 		DMSG("Incorrect handle");
611 		err = FFA_DENIED;
612 		goto err_unlock_rxtwx;
613 	}
614 
615 	if (READ_ONCE(mem->endpoint_count) != 1) {
616 		DMSG("Incorrect endpoint count");
617 		err = FFA_INVALID_PARAMETERS;
618 		goto err_unlock_rxtwx;
619 	}
620 
621 	if (READ_ONCE(mem->endpoint_id_array[0]) != caller_sp->endpoint_id) {
622 		DMSG("Incorrect endpoint id");
623 		err = FFA_DENIED;
624 		goto err_unlock_rxtwx;
625 	}
626 
627 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
628 
629 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
630 
631 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
632 	if (!receiver->ref_count) {
633 		DMSG("To many relinquish requests");
634 		err = FFA_DENIED;
635 		goto err_unlock_memref;
636 	}
637 
638 	receiver->ref_count--;
639 	if (!receiver->ref_count) {
640 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
641 		if (sp_unmap_ffa_regions(caller_sp, smem) != TEE_SUCCESS) {
642 			DMSG("Failed to unmap region");
643 			ffa_set_error(args, FFA_DENIED);
644 			return;
645 		}
646 	} else {
647 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
648 	}
649 
650 	ffa_success(args);
651 	return;
652 
653 err_unlock_rxtwx:
654 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
655 	ffa_set_error(args, err);
656 	return;
657 err_unlock_memref:
658 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
659 	ffa_set_error(args, err);
660 }
661 
zero_mem_region(struct sp_mem * smem,struct sp_session * s)662 static void zero_mem_region(struct sp_mem *smem, struct sp_session *s)
663 {
664 	void *addr = NULL;
665 	struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
666 	struct sp_mem_map_region *reg = NULL;
667 
668 	ts_push_current_session(&s->ts_sess);
669 	SLIST_FOREACH(reg, &smem->regions, link) {
670 		size_t sz = reg->page_count * SMALL_PAGE_SIZE;
671 
672 		addr = sp_mem_get_va(&ctx->uctx, reg->page_offset, reg->mobj);
673 
674 		assert(addr);
675 		memset(addr, 0, sz);
676 	}
677 	ts_pop_current_session();
678 }
679 
680 /*
681  * ffa_mem_reclaim returns false if it couldn't process the reclaim message.
682  * This happens when the memory regions was shared with the OP-TEE endpoint.
683  * After this thread_spmc calls handle_mem_reclaim() to make sure that the
684  * region is reclaimed from the OP-TEE endpoint.
685  */
ffa_mem_reclaim(struct thread_smc_args * args,struct sp_session * caller_sp)686 bool ffa_mem_reclaim(struct thread_smc_args *args,
687 		     struct sp_session *caller_sp)
688 {
689 	uint64_t handle = reg_pair_to_64(args->a2, args->a1);
690 	uint32_t flags = args->a3;
691 	uint32_t endpoint = 0;
692 	struct sp_mem *smem = NULL;
693 	struct sp_mem_receiver *receiver  = NULL;
694 	uint32_t exceptions = 0;
695 
696 	smem = sp_mem_get(handle);
697 	if (!smem)
698 		return false;
699 
700 	if (caller_sp)
701 		endpoint = caller_sp->endpoint_id;
702 
703 	/* Make sure that the caller is the owner of the share */
704 	if (smem->sender_id != endpoint) {
705 		ffa_set_error(args, FFA_DENIED);
706 		return true;
707 	}
708 
709 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
710 
711 	/* Make sure that all shares where relinquished */
712 	SLIST_FOREACH(receiver, &smem->receivers, link) {
713 		if (receiver->ref_count != 0) {
714 			ffa_set_error(args, FFA_DENIED);
715 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
716 			return true;
717 		}
718 	}
719 
720 	if (flags & FFA_MEMORY_REGION_FLAG_CLEAR) {
721 		if (caller_sp) {
722 			zero_mem_region(smem, caller_sp);
723 		} else {
724 			/*
725 			 * Currently we don't support zeroing Normal World
726 			 * memory. To do this we would have to map the memory
727 			 * again, zero it and unmap it.
728 			 */
729 			ffa_set_error(args, FFA_DENIED);
730 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
731 			return true;
732 		}
733 	}
734 
735 	sp_mem_remove(smem);
736 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
737 
738 	ffa_success(args);
739 	return true;
740 }
741 
742 static struct sp_session *
ffa_handle_sp_direct_req(struct thread_smc_args * args,struct sp_session * caller_sp)743 ffa_handle_sp_direct_req(struct thread_smc_args *args,
744 			 struct sp_session *caller_sp)
745 {
746 	struct sp_session *dst = NULL;
747 	TEE_Result res = FFA_OK;
748 
749 	if (args->a2 != FFA_PARAM_MBZ) {
750 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
751 		return NULL;
752 	}
753 
754 	res = ffa_get_dst(args, caller_sp, &dst);
755 	if (res) {
756 		/* Tried to send message to an incorrect endpoint */
757 		ffa_set_error(args, res);
758 		return caller_sp;
759 	}
760 	if (!dst) {
761 		EMSG("Request to normal world not supported");
762 		ffa_set_error(args, FFA_NOT_SUPPORTED);
763 		return NULL;
764 	}
765 
766 	cpu_spin_lock(&dst->spinlock);
767 	if (dst->state != sp_idle) {
768 		DMSG("SP is busy");
769 		ffa_set_error(args, FFA_BUSY);
770 		cpu_spin_unlock(&dst->spinlock);
771 		return caller_sp;
772 	}
773 
774 	dst->state = sp_busy;
775 	cpu_spin_unlock(&dst->spinlock);
776 
777 	/*
778 	 * Store the calling endpoint id. This will make it possible to check
779 	 * if the response is sent back to the correct endpoint.
780 	 */
781 	dst->caller_id = FFA_SRC(args->a1);
782 
783 	/* Forward the message to the destination SP */
784 	res = sp_enter(args, dst);
785 	if (res) {
786 		/* The SP Panicked */
787 		ffa_set_error(args, FFA_ABORTED);
788 		/* Return error to calling SP */
789 		return caller_sp;
790 	}
791 
792 	return dst;
793 }
794 
795 static struct sp_session *
ffa_handle_sp_direct_resp(struct thread_smc_args * args,struct sp_session * caller_sp)796 ffa_handle_sp_direct_resp(struct thread_smc_args *args,
797 			  struct sp_session *caller_sp)
798 {
799 	struct sp_session *dst = NULL;
800 	TEE_Result res = FFA_OK;
801 
802 	if (!caller_sp) {
803 		EMSG("Response from normal world not supported");
804 		ffa_set_error(args, FFA_NOT_SUPPORTED);
805 		return NULL;
806 	}
807 
808 	res = ffa_get_dst(args, caller_sp, &dst);
809 	if (res) {
810 		/* Tried to send response to an incorrect endpoint */
811 		ffa_set_error(args, res);
812 		return caller_sp;
813 	}
814 
815 	if (caller_sp->state != sp_busy) {
816 		EMSG("SP is not waiting for a request");
817 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
818 		return caller_sp;
819 	}
820 
821 	if (caller_sp->caller_id != FFA_DST(args->a1)) {
822 		EMSG("FFA_MSG_SEND_DIRECT_RESP to incorrect SP");
823 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
824 		return caller_sp;
825 	}
826 
827 	caller_sp->caller_id = 0;
828 
829 	cpu_spin_lock(&caller_sp->spinlock);
830 	caller_sp->state = sp_idle;
831 	cpu_spin_unlock(&caller_sp->spinlock);
832 
833 	if (!dst) {
834 		/* Send message back to the NW */
835 		return NULL;
836 	}
837 
838 	/* Forward the message to the destination SP */
839 	res = sp_enter(args, dst);
840 	if (res) {
841 		/* The SP Panicked */
842 		ffa_set_error(args, FFA_ABORTED);
843 		/* Return error to calling SP */
844 		return caller_sp;
845 	}
846 	return dst;
847 }
848 
849 static struct sp_session *
ffa_handle_sp_error(struct thread_smc_args * args,struct sp_session * caller_sp)850 ffa_handle_sp_error(struct thread_smc_args *args,
851 		    struct sp_session *caller_sp)
852 {
853 	struct sp_session *dst = NULL;
854 
855 	dst = sp_get_session(FFA_DST(args->a1));
856 
857 	/* FFA_ERROR Came from Noral World */
858 	if (caller_sp)
859 		caller_sp->state = sp_idle;
860 
861 	/* If dst == NULL send message to Normal World */
862 	if (dst && sp_enter(args, dst)) {
863 		/*
864 		 * We can not return the error. Unwind the call chain with one
865 		 * link. Set the state of the SP to dead.
866 		 */
867 		dst->state = sp_dead;
868 		/* Create error. */
869 		ffa_set_error(args, FFA_DENIED);
870 		return  sp_get_session(dst->caller_id);
871 	}
872 
873 	return dst;
874 }
875 
handle_features(struct thread_smc_args * args)876 static void handle_features(struct thread_smc_args *args)
877 {
878 	uint32_t ret_fid = 0;
879 	uint32_t ret_w2 = FFA_PARAM_MBZ;
880 
881 	switch (args->a1) {
882 #ifdef ARM64
883 	case FFA_RXTX_MAP_64:
884 #endif
885 	case FFA_RXTX_MAP_32:
886 		ret_fid = FFA_SUCCESS_32;
887 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
888 		break;
889 	case FFA_ERROR:
890 	case FFA_VERSION:
891 	case FFA_SUCCESS_32:
892 #ifdef ARM64
893 	case FFA_SUCCESS_64:
894 #endif
895 	default:
896 		ret_fid = FFA_ERROR;
897 		ret_w2 = FFA_NOT_SUPPORTED;
898 		break;
899 	}
900 
901 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
902 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
903 }
904 
handle_mem_perm_get(struct thread_smc_args * args,struct sp_session * sp_s)905 static void handle_mem_perm_get(struct thread_smc_args *args,
906 				struct sp_session *sp_s)
907 {
908 	struct sp_ctx *sp_ctx = NULL;
909 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
910 	uint16_t attrs = 0;
911 	uint32_t ret_fid = FFA_ERROR;
912 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
913 
914 	/*
915 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
916 	 */
917 	if (sp_s->is_initialized) {
918 		ret_val = FFA_DENIED;
919 		goto out;
920 	}
921 
922 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
923 	if (!sp_ctx)
924 		goto out;
925 
926 	/* Query memory attributes */
927 	ts_push_current_session(&sp_s->ts_sess);
928 	res = vm_get_prot(&sp_ctx->uctx, args->a1, SMALL_PAGE_SIZE, &attrs);
929 	ts_pop_current_session();
930 	if (res)
931 		goto out;
932 
933 	/* Build response value */
934 	ret_fid = FFA_SUCCESS_32;
935 	ret_val = 0;
936 	if ((attrs & TEE_MATTR_URW) == TEE_MATTR_URW)
937 		ret_val |= FFA_MEM_PERM_RW;
938 	else if (attrs & TEE_MATTR_UR)
939 		ret_val |= FFA_MEM_PERM_RO;
940 
941 	if ((attrs & TEE_MATTR_UX) == 0)
942 		ret_val |= FFA_MEM_PERM_NX;
943 
944 out:
945 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
946 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
947 }
948 
handle_mem_perm_set(struct thread_smc_args * args,struct sp_session * sp_s)949 static void handle_mem_perm_set(struct thread_smc_args *args,
950 				struct sp_session *sp_s)
951 {
952 	struct sp_ctx *sp_ctx = NULL;
953 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
954 	size_t region_size = 0;
955 	uint32_t data_perm = 0;
956 	uint32_t instruction_perm = 0;
957 	uint16_t attrs = 0;
958 	uint32_t ret_fid = FFA_ERROR;
959 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
960 
961 	/*
962 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
963 	 */
964 	if (sp_s->is_initialized) {
965 		ret_val = FFA_DENIED;
966 		goto out;
967 	}
968 
969 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
970 	if (!sp_ctx)
971 		goto out;
972 
973 	if (MUL_OVERFLOW(args->a2, SMALL_PAGE_SIZE, &region_size))
974 		goto out;
975 
976 	if (args->a3 & FFA_MEM_PERM_RESERVED) {
977 		/* Non-zero reserved bits */
978 		goto out;
979 	}
980 
981 	data_perm = args->a3 & FFA_MEM_PERM_DATA_PERM;
982 	instruction_perm = args->a3 & FFA_MEM_PERM_INSTRUCTION_PERM;
983 
984 	/* RWX access right configuration is not permitted */
985 	if (data_perm == FFA_MEM_PERM_RW && instruction_perm == FFA_MEM_PERM_X)
986 		goto out;
987 
988 	switch (data_perm) {
989 	case FFA_MEM_PERM_RO:
990 		attrs = TEE_MATTR_UR;
991 		break;
992 	case FFA_MEM_PERM_RW:
993 		attrs = TEE_MATTR_URW;
994 		break;
995 	default:
996 		/* Invalid permission value */
997 		goto out;
998 	}
999 
1000 	if (instruction_perm == FFA_MEM_PERM_X)
1001 		attrs |= TEE_MATTR_UX;
1002 
1003 	/* Set access rights */
1004 	ts_push_current_session(&sp_s->ts_sess);
1005 	res = vm_set_prot(&sp_ctx->uctx, args->a1, region_size, attrs);
1006 	ts_pop_current_session();
1007 	if (res != TEE_SUCCESS)
1008 		goto out;
1009 
1010 	ret_fid = FFA_SUCCESS_32;
1011 	ret_val = FFA_PARAM_MBZ;
1012 
1013 out:
1014 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1015 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1016 }
1017 
1018 /*
1019  * FF-A messages handler for SP. Every messages for or from a SP is handled
1020  * here. This is the entry of the sp_spmc kernel thread. The caller_sp is set
1021  * to NULL when it is the Normal World.
1022  */
spmc_sp_msg_handler(struct thread_smc_args * args,struct sp_session * caller_sp)1023 void spmc_sp_msg_handler(struct thread_smc_args *args,
1024 			 struct sp_session *caller_sp)
1025 {
1026 	thread_check_canaries();
1027 	do {
1028 		switch (args->a0) {
1029 #ifdef ARM64
1030 		case FFA_MSG_SEND_DIRECT_REQ_64:
1031 #endif
1032 		case FFA_MSG_SEND_DIRECT_REQ_32:
1033 			caller_sp = ffa_handle_sp_direct_req(args, caller_sp);
1034 			break;
1035 #ifdef ARM64
1036 		case FFA_MSG_SEND_DIRECT_RESP_64:
1037 #endif
1038 		case FFA_MSG_SEND_DIRECT_RESP_32:
1039 			caller_sp = ffa_handle_sp_direct_resp(args, caller_sp);
1040 			break;
1041 		case FFA_ERROR:
1042 			caller_sp = ffa_handle_sp_error(args, caller_sp);
1043 			break;
1044 		case FFA_MSG_WAIT:
1045 			/* FFA_WAIT gives control back to NW */
1046 			cpu_spin_lock(&caller_sp->spinlock);
1047 			caller_sp->state = sp_idle;
1048 			cpu_spin_unlock(&caller_sp->spinlock);
1049 			caller_sp = NULL;
1050 			break;
1051 #ifdef ARM64
1052 		case FFA_RXTX_MAP_64:
1053 #endif
1054 		case FFA_RXTX_MAP_32:
1055 			ts_push_current_session(&caller_sp->ts_sess);
1056 			spmc_handle_rxtx_map(args, &caller_sp->rxtx);
1057 			ts_pop_current_session();
1058 			sp_enter(args, caller_sp);
1059 			break;
1060 		case FFA_RXTX_UNMAP:
1061 			ts_push_current_session(&caller_sp->ts_sess);
1062 			spmc_handle_rxtx_unmap(args, &caller_sp->rxtx);
1063 			ts_pop_current_session();
1064 			sp_enter(args, caller_sp);
1065 			break;
1066 		case FFA_RX_RELEASE:
1067 			ts_push_current_session(&caller_sp->ts_sess);
1068 			spmc_handle_rx_release(args, &caller_sp->rxtx);
1069 			ts_pop_current_session();
1070 			sp_enter(args, caller_sp);
1071 			break;
1072 		case FFA_ID_GET:
1073 			args->a0 = FFA_SUCCESS_32;
1074 			args->a2 = caller_sp->endpoint_id;
1075 			sp_enter(args, caller_sp);
1076 			break;
1077 		case FFA_VERSION:
1078 			spmc_handle_version(args);
1079 			sp_enter(args, caller_sp);
1080 			break;
1081 		case FFA_FEATURES:
1082 			handle_features(args);
1083 			sp_enter(args, caller_sp);
1084 			break;
1085 		case FFA_PARTITION_INFO_GET:
1086 			ts_push_current_session(&caller_sp->ts_sess);
1087 			spmc_handle_partition_info_get(args, &caller_sp->rxtx);
1088 			ts_pop_current_session();
1089 			sp_enter(args, caller_sp);
1090 			break;
1091 #ifdef ARM64
1092 		case FFA_MEM_SHARE_64:
1093 #endif
1094 		case FFA_MEM_SHARE_32:
1095 			ts_push_current_session(&caller_sp->ts_sess);
1096 			spmc_sp_handle_mem_share(args, &caller_sp->rxtx,
1097 						 caller_sp);
1098 			ts_pop_current_session();
1099 			sp_enter(args, caller_sp);
1100 			break;
1101 #ifdef ARM64
1102 		case FFA_MEM_RETRIEVE_REQ_64:
1103 #endif
1104 		case FFA_MEM_RETRIEVE_REQ_32:
1105 			ts_push_current_session(&caller_sp->ts_sess);
1106 			ffa_mem_retrieve(args, caller_sp, &caller_sp->rxtx);
1107 			ts_pop_current_session();
1108 			sp_enter(args, caller_sp);
1109 			break;
1110 		case FFA_MEM_RELINQUISH:
1111 			ts_push_current_session(&caller_sp->ts_sess);
1112 			ffa_mem_relinquish(args, caller_sp, &caller_sp->rxtx);
1113 			ts_pop_current_session();
1114 			sp_enter(args, caller_sp);
1115 			break;
1116 		case FFA_MEM_RECLAIM:
1117 			ffa_mem_reclaim(args, caller_sp);
1118 			sp_enter(args, caller_sp);
1119 			break;
1120 #ifdef ARM64
1121 		case FFA_MEM_PERM_GET_64:
1122 #endif
1123 		case FFA_MEM_PERM_GET_32:
1124 			handle_mem_perm_get(args, caller_sp);
1125 			sp_enter(args, caller_sp);
1126 			break;
1127 
1128 #ifdef ARM64
1129 		case FFA_MEM_PERM_SET_64:
1130 #endif
1131 		case FFA_MEM_PERM_SET_32:
1132 			handle_mem_perm_set(args, caller_sp);
1133 			sp_enter(args, caller_sp);
1134 			break;
1135 		default:
1136 			EMSG("Unhandled FFA function ID %#"PRIx32,
1137 			     (uint32_t)args->a0);
1138 			ffa_set_error(args, FFA_INVALID_PARAMETERS);
1139 			sp_enter(args, caller_sp);
1140 		}
1141 	} while (caller_sp);
1142 }
1143