1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 
9 #include <common/debug.h>
10 #include <common/runtime_svc.h>
11 #include <lib/object_pool.h>
12 #include <lib/spinlock.h>
13 #include <lib/xlat_tables/xlat_tables_v2.h>
14 #include <services/ffa_svc.h>
15 #include "spmc.h"
16 #include "spmc_shared_mem.h"
17 
18 #include <platform_def.h>
19 
20 /**
21  * struct spmc_shmem_obj - Shared memory object.
22  * @desc_size:      Size of @desc.
23  * @desc_filled:    Size of @desc already received.
24  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
25  *                  without a matching ffa_mem_relinquish call.
26  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
27  */
28 struct spmc_shmem_obj {
29 	size_t desc_size;
30 	size_t desc_filled;
31 	size_t in_use;
32 	struct ffa_mtd desc;
33 };
34 
35 /*
36  * Declare our data structure to store the metadata of memory share requests.
37  * The main datastore is allocated on a per platform basis to ensure enough
38  * storage can be made available.
39  * The address of the data store will be populated by the SPMC during its
40  * initialization.
41  */
42 
43 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
44 	/* Set start value for handle so top 32 bits are needed quickly. */
45 	.next_handle = 0xffffffc0U,
46 };
47 
48 /**
49  * spmc_shmem_obj_size - Convert from descriptor size to object size.
50  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
51  *
52  * Return: Size of struct spmc_shmem_obj object.
53  */
spmc_shmem_obj_size(size_t desc_size)54 static size_t spmc_shmem_obj_size(size_t desc_size)
55 {
56 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
57 }
58 
59 /**
60  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
61  * @state:      Global state.
62  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
63  *              allocated object will hold.
64  *
65  * Return: Pointer to newly allocated object, or %NULL if there not enough space
66  *         left. The returned pointer is only valid while @state is locked, to
67  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
68  *         called.
69  */
70 static struct spmc_shmem_obj *
spmc_shmem_obj_alloc(struct spmc_shmem_obj_state * state,size_t desc_size)71 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
72 {
73 	struct spmc_shmem_obj *obj;
74 	size_t free = state->data_size - state->allocated;
75 	size_t obj_size;
76 
77 	if (state->data == NULL) {
78 		ERROR("Missing shmem datastore!\n");
79 		return NULL;
80 	}
81 
82 	obj_size = spmc_shmem_obj_size(desc_size);
83 
84 	/* Ensure the obj size has not overflowed. */
85 	if (obj_size < desc_size) {
86 		WARN("%s(0x%zx) desc_size overflow\n",
87 		     __func__, desc_size);
88 		return NULL;
89 	}
90 
91 	if (obj_size > free) {
92 		WARN("%s(0x%zx) failed, free 0x%zx\n",
93 		     __func__, desc_size, free);
94 		return NULL;
95 	}
96 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
97 	obj->desc = (struct ffa_mtd) {0};
98 	obj->desc_size = desc_size;
99 	obj->desc_filled = 0;
100 	obj->in_use = 0;
101 	state->allocated += obj_size;
102 	return obj;
103 }
104 
105 /**
106  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
107  * @state:      Global state.
108  * @obj:        Object to free.
109  *
110  * Release memory used by @obj. Other objects may move, so on return all
111  * pointers to struct spmc_shmem_obj object should be considered invalid, not
112  * just @obj.
113  *
114  * The current implementation always compacts the remaining objects to simplify
115  * the allocator and to avoid fragmentation.
116  */
117 
spmc_shmem_obj_free(struct spmc_shmem_obj_state * state,struct spmc_shmem_obj * obj)118 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
119 				  struct spmc_shmem_obj *obj)
120 {
121 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
122 	uint8_t *shift_dest = (uint8_t *)obj;
123 	uint8_t *shift_src = shift_dest + free_size;
124 	size_t shift_size = state->allocated - (shift_src - state->data);
125 
126 	if (shift_size != 0U) {
127 		memmove(shift_dest, shift_src, shift_size);
128 	}
129 	state->allocated -= free_size;
130 }
131 
132 /**
133  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
134  * @state:      Global state.
135  * @handle:     Unique handle of object to return.
136  *
137  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
138  *         %NULL, if not object in @state->data has a matching handle.
139  */
140 static struct spmc_shmem_obj *
spmc_shmem_obj_lookup(struct spmc_shmem_obj_state * state,uint64_t handle)141 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
142 {
143 	uint8_t *curr = state->data;
144 
145 	while (curr - state->data < state->allocated) {
146 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
147 
148 		if (obj->desc.handle == handle) {
149 			return obj;
150 		}
151 		curr += spmc_shmem_obj_size(obj->desc_size);
152 	}
153 	return NULL;
154 }
155 
156 /**
157  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
158  * @offset:     Offset used to track which objects have previously been
159  *              returned.
160  *
161  * Return: the next struct spmc_shmem_obj_state object from the provided
162  *	   offset.
163  *	   %NULL, if there are no more objects.
164  */
165 static struct spmc_shmem_obj *
spmc_shmem_obj_get_next(struct spmc_shmem_obj_state * state,size_t * offset)166 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
167 {
168 	uint8_t *curr = state->data + *offset;
169 
170 	if (curr - state->data < state->allocated) {
171 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
172 
173 		*offset += spmc_shmem_obj_size(obj->desc_size);
174 
175 		return obj;
176 	}
177 	return NULL;
178 }
179 
180 /*******************************************************************************
181  * FF-A memory descriptor helper functions.
182  ******************************************************************************/
183 /**
184  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
185  *                           clients FF-A version.
186  * @desc:         The memory transaction descriptor.
187  * @index:        The index of the emad element to be accessed.
188  * @ffa_version:  FF-A version of the provided structure.
189  * @emad_size:    Will be populated with the size of the returned emad
190  *                descriptor.
191  * Return: A pointer to the requested emad structure.
192  */
193 static void *
spmc_shmem_obj_get_emad(const struct ffa_mtd * desc,uint32_t index,uint32_t ffa_version,size_t * emad_size)194 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
195 			uint32_t ffa_version, size_t *emad_size)
196 {
197 	uint8_t *emad;
198 	/*
199 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
200 	 * format, otherwise assume it is a v1.1 format.
201 	 */
202 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
203 		/* Cast our descriptor to the v1.0 format. */
204 		struct ffa_mtd_v1_0 *mtd_v1_0 =
205 					(struct ffa_mtd_v1_0 *) desc;
206 		emad = (uint8_t *)  &(mtd_v1_0->emad);
207 		*emad_size = sizeof(struct ffa_emad_v1_0);
208 	} else {
209 		if (!is_aligned(desc->emad_offset, 16)) {
210 			WARN("Emad offset is not aligned.\n");
211 			return NULL;
212 		}
213 		emad = ((uint8_t *) desc + desc->emad_offset);
214 		*emad_size = desc->emad_size;
215 	}
216 	return (emad + (*emad_size * index));
217 }
218 
219 /**
220  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
221  *				 FF-A version of the descriptor.
222  * @obj:    Object containing ffa_memory_region_descriptor.
223  *
224  * Return: struct ffa_comp_mrd object corresponding to the composite memory
225  *	   region descriptor.
226  */
227 static struct ffa_comp_mrd *
spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj * obj,uint32_t ffa_version)228 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
229 {
230 	size_t emad_size;
231 	/*
232 	 * The comp_mrd_offset field of the emad descriptor remains consistent
233 	 * between FF-A versions therefore we can use the v1.0 descriptor here
234 	 * in all cases.
235 	 */
236 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
237 							     ffa_version,
238 							     &emad_size);
239 	/* Ensure the emad array was found. */
240 	if (emad == NULL) {
241 		return NULL;
242 	}
243 
244 	/* Ensure the composite descriptor offset is aligned. */
245 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
246 		WARN("Unaligned composite memory region descriptor offset.\n");
247 		return NULL;
248 	}
249 
250 	return (struct ffa_comp_mrd *)
251 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
252 }
253 
254 /**
255  * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
256  * @obj:    Object containing ffa_memory_region_descriptor.
257  *
258  * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
259  */
260 static size_t
spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj * obj,uint32_t ffa_version)261 spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
262 				    uint32_t ffa_version)
263 {
264 	struct ffa_comp_mrd *comp_mrd;
265 
266 	comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
267 	if (comp_mrd == NULL) {
268 		return 0;
269 	}
270 	return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
271 }
272 
273 /**
274  * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
275  *				a given memory transaction.
276  * @sp_id:      Partition ID to validate.
277  * @desc:       Descriptor of the memory transaction.
278  *
279  * Return: true if ID is valid, else false.
280  */
spmc_shmem_obj_validate_id(const struct ffa_mtd * desc,uint16_t sp_id)281 bool spmc_shmem_obj_validate_id(const struct ffa_mtd *desc, uint16_t sp_id)
282 {
283 	bool found = false;
284 
285 	/* Validate the partition is a valid participant. */
286 	for (unsigned int i = 0U; i < desc->emad_count; i++) {
287 		size_t emad_size;
288 		struct ffa_emad_v1_0 *emad;
289 
290 		emad = spmc_shmem_obj_get_emad(desc, i,
291 					       MAKE_FFA_VERSION(1, 1),
292 					       &emad_size);
293 		if (sp_id == emad->mapd.endpoint_id) {
294 			found = true;
295 			break;
296 		}
297 	}
298 	return found;
299 }
300 
301 /*
302  * Compare two memory regions to determine if any range overlaps with another
303  * ongoing memory transaction.
304  */
305 static bool
overlapping_memory_regions(struct ffa_comp_mrd * region1,struct ffa_comp_mrd * region2)306 overlapping_memory_regions(struct ffa_comp_mrd *region1,
307 			   struct ffa_comp_mrd *region2)
308 {
309 	uint64_t region1_start;
310 	uint64_t region1_size;
311 	uint64_t region1_end;
312 	uint64_t region2_start;
313 	uint64_t region2_size;
314 	uint64_t region2_end;
315 
316 	assert(region1 != NULL);
317 	assert(region2 != NULL);
318 
319 	if (region1 == region2) {
320 		return true;
321 	}
322 
323 	/*
324 	 * Check each memory region in the request against existing
325 	 * transactions.
326 	 */
327 	for (size_t i = 0; i < region1->address_range_count; i++) {
328 
329 		region1_start = region1->address_range_array[i].address;
330 		region1_size =
331 			region1->address_range_array[i].page_count *
332 			PAGE_SIZE_4KB;
333 		region1_end = region1_start + region1_size;
334 
335 		for (size_t j = 0; j < region2->address_range_count; j++) {
336 
337 			region2_start = region2->address_range_array[j].address;
338 			region2_size =
339 				region2->address_range_array[j].page_count *
340 				PAGE_SIZE_4KB;
341 			region2_end = region2_start + region2_size;
342 
343 			/* Check if regions are not overlapping. */
344 			if (!((region2_end <= region1_start) ||
345 			      (region1_end <= region2_start))) {
346 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
347 				     region1_start, region1_end,
348 				     region2_start, region2_end);
349 				return true;
350 			}
351 		}
352 	}
353 	return false;
354 }
355 
356 /*******************************************************************************
357  * FF-A v1.0 Memory Descriptor Conversion Helpers.
358  ******************************************************************************/
359 /**
360  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
361  *                                     converted descriptor.
362  * @orig:       The original v1.0 memory transaction descriptor.
363  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
364  *
365  * Return: the size required to store the descriptor store in the v1.1 format.
366  */
367 static size_t
spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 * orig,size_t desc_size)368 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
369 {
370 	size_t size = 0;
371 	struct ffa_comp_mrd *mrd;
372 	struct ffa_emad_v1_0 *emad_array = orig->emad;
373 
374 	/* Get the size of the v1.1 descriptor. */
375 	size += sizeof(struct ffa_mtd);
376 
377 	/* Add the size of the emad descriptors. */
378 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
379 
380 	/* Add the size of the composite mrds. */
381 	size += sizeof(struct ffa_comp_mrd);
382 
383 	/* Add the size of the constituent mrds. */
384 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
385 	      emad_array[0].comp_mrd_offset);
386 
387 	/* Check the calculated address is within the memory descriptor. */
388 	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
389 		return 0;
390 	}
391 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
392 
393 	return size;
394 }
395 
396 /**
397  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
398  *                                     converted descriptor.
399  * @orig:       The original v1.1 memory transaction descriptor.
400  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
401  *
402  * Return: the size required to store the descriptor store in the v1.0 format.
403  */
404 static size_t
spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd * orig,size_t desc_size)405 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
406 {
407 	size_t size = 0;
408 	struct ffa_comp_mrd *mrd;
409 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
410 					   ((uint8_t *) orig +
411 					    orig->emad_offset);
412 
413 	/* Get the size of the v1.0 descriptor. */
414 	size += sizeof(struct ffa_mtd_v1_0);
415 
416 	/* Add the size of the v1.0 emad descriptors. */
417 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
418 
419 	/* Add the size of the composite mrds. */
420 	size += sizeof(struct ffa_comp_mrd);
421 
422 	/* Add the size of the constituent mrds. */
423 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
424 	      emad_array[0].comp_mrd_offset);
425 
426 	/* Check the calculated address is within the memory descriptor. */
427 	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
428 		return 0;
429 	}
430 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
431 
432 	return size;
433 }
434 
435 /**
436  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
437  * @out_obj:	The shared memory object to populate the converted descriptor.
438  * @orig:	The shared memory object containing the v1.0 descriptor.
439  *
440  * Return: true if the conversion is successful else false.
441  */
442 static bool
spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)443 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
444 				     struct spmc_shmem_obj *orig)
445 {
446 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
447 	struct ffa_mtd *out = &out_obj->desc;
448 	struct ffa_emad_v1_0 *emad_array_in;
449 	struct ffa_emad_v1_0 *emad_array_out;
450 	struct ffa_comp_mrd *mrd_in;
451 	struct ffa_comp_mrd *mrd_out;
452 
453 	size_t mrd_in_offset;
454 	size_t mrd_out_offset;
455 	size_t mrd_size = 0;
456 
457 	/* Populate the new descriptor format from the v1.0 struct. */
458 	out->sender_id = mtd_orig->sender_id;
459 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
460 	out->flags = mtd_orig->flags;
461 	out->handle = mtd_orig->handle;
462 	out->tag = mtd_orig->tag;
463 	out->emad_count = mtd_orig->emad_count;
464 	out->emad_size = sizeof(struct ffa_emad_v1_0);
465 
466 	/*
467 	 * We will locate the emad descriptors directly after the ffa_mtd
468 	 * struct. This will be 8-byte aligned.
469 	 */
470 	out->emad_offset = sizeof(struct ffa_mtd);
471 
472 	emad_array_in = mtd_orig->emad;
473 	emad_array_out = (struct ffa_emad_v1_0 *)
474 			 ((uint8_t *) out + out->emad_offset);
475 
476 	/* Copy across the emad structs. */
477 	for (unsigned int i = 0U; i < out->emad_count; i++) {
478 		memcpy(&emad_array_out[i], &emad_array_in[i],
479 		       sizeof(struct ffa_emad_v1_0));
480 	}
481 
482 	/* Place the mrd descriptors after the end of the emad descriptors.*/
483 	mrd_in_offset = emad_array_in->comp_mrd_offset;
484 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
485 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
486 
487 	/* Add the size of the composite memory region descriptor. */
488 	mrd_size += sizeof(struct ffa_comp_mrd);
489 
490 	/* Find the mrd descriptor. */
491 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
492 
493 	/* Add the size of the constituent memory region descriptors. */
494 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
495 
496 	/*
497 	 * Update the offset in the emads by the delta between the input and
498 	 * output addresses.
499 	 */
500 	for (unsigned int i = 0U; i < out->emad_count; i++) {
501 		emad_array_out[i].comp_mrd_offset =
502 			emad_array_in[i].comp_mrd_offset +
503 			(mrd_out_offset - mrd_in_offset);
504 	}
505 
506 	/* Verify that we stay within bound of the memory descriptors. */
507 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
508 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
509 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
510 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
511 		ERROR("%s: Invalid mrd structure.\n", __func__);
512 		return false;
513 	}
514 
515 	/* Copy the mrd descriptors directly. */
516 	memcpy(mrd_out, mrd_in, mrd_size);
517 
518 	return true;
519 }
520 
521 /**
522  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
523  *                                v1.0 memory object.
524  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
525  * @orig:       The shared memory object containing the v1.1 descriptor.
526  *
527  * Return: true if the conversion is successful else false.
528  */
529 static bool
spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)530 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
531 			     struct spmc_shmem_obj *orig)
532 {
533 	struct ffa_mtd *mtd_orig = &orig->desc;
534 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
535 	struct ffa_emad_v1_0 *emad_in;
536 	struct ffa_emad_v1_0 *emad_array_in;
537 	struct ffa_emad_v1_0 *emad_array_out;
538 	struct ffa_comp_mrd *mrd_in;
539 	struct ffa_comp_mrd *mrd_out;
540 
541 	size_t mrd_in_offset;
542 	size_t mrd_out_offset;
543 	size_t emad_out_array_size;
544 	size_t mrd_size = 0;
545 
546 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
547 	out->sender_id = mtd_orig->sender_id;
548 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
549 	out->flags = mtd_orig->flags;
550 	out->handle = mtd_orig->handle;
551 	out->tag = mtd_orig->tag;
552 	out->emad_count = mtd_orig->emad_count;
553 
554 	/* Determine the location of the emad array in both descriptors. */
555 	emad_array_in = (struct ffa_emad_v1_0 *)
556 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
557 	emad_array_out = out->emad;
558 
559 	/* Copy across the emad structs. */
560 	emad_in = emad_array_in;
561 	for (unsigned int i = 0U; i < out->emad_count; i++) {
562 		memcpy(&emad_array_out[i], emad_in,
563 		       sizeof(struct ffa_emad_v1_0));
564 
565 		emad_in +=  mtd_orig->emad_size;
566 	}
567 
568 	/* Place the mrd descriptors after the end of the emad descriptors. */
569 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
570 
571 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
572 			  emad_out_array_size;
573 
574 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
575 
576 	mrd_in_offset = mtd_orig->emad_offset +
577 			(mtd_orig->emad_size * mtd_orig->emad_count);
578 
579 	/* Add the size of the composite memory region descriptor. */
580 	mrd_size += sizeof(struct ffa_comp_mrd);
581 
582 	/* Find the mrd descriptor. */
583 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
584 
585 	/* Add the size of the constituent memory region descriptors. */
586 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
587 
588 	/*
589 	 * Update the offset in the emads by the delta between the input and
590 	 * output addresses.
591 	 */
592 	emad_in = emad_array_in;
593 
594 	for (unsigned int i = 0U; i < out->emad_count; i++) {
595 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
596 						    (mrd_out_offset -
597 						     mrd_in_offset);
598 		emad_in +=  mtd_orig->emad_size;
599 	}
600 
601 	/* Verify that we stay within bound of the memory descriptors. */
602 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
603 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
604 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
605 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
606 		ERROR("%s: Invalid mrd structure.\n", __func__);
607 		return false;
608 	}
609 
610 	/* Copy the mrd descriptors directly. */
611 	memcpy(mrd_out, mrd_in, mrd_size);
612 
613 	return true;
614 }
615 
616 /**
617  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
618  *                                     the v1.0 format and populates the
619  *                                     provided buffer.
620  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
621  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
622  * @buf_size:	    Size of the buffer to populate.
623  * @offset:	    The offset of the converted descriptor to copy.
624  * @copy_size:	    Will be populated with the number of bytes copied.
625  * @out_desc_size:  Will be populated with the total size of the v1.0
626  *                  descriptor.
627  *
628  * Return: 0 if conversion and population succeeded.
629  * Note: This function invalidates the reference to @orig therefore
630  * `spmc_shmem_obj_lookup` must be called if further usage is required.
631  */
632 static uint32_t
spmc_populate_ffa_v1_0_descriptor(void * dst,struct spmc_shmem_obj * orig_obj,size_t buf_size,size_t offset,size_t * copy_size,size_t * v1_0_desc_size)633 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
634 				 size_t buf_size, size_t offset,
635 				 size_t *copy_size, size_t *v1_0_desc_size)
636 {
637 		struct spmc_shmem_obj *v1_0_obj;
638 
639 		/* Calculate the size that the v1.0 descriptor will require. */
640 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
641 					&orig_obj->desc, orig_obj->desc_size);
642 
643 		if (*v1_0_desc_size == 0) {
644 			ERROR("%s: cannot determine size of descriptor.\n",
645 			      __func__);
646 			return FFA_ERROR_INVALID_PARAMETER;
647 		}
648 
649 		/* Get a new obj to store the v1.0 descriptor. */
650 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
651 						*v1_0_desc_size);
652 
653 		if (!v1_0_obj) {
654 			return FFA_ERROR_NO_MEMORY;
655 		}
656 
657 		/* Perform the conversion from v1.1 to v1.0. */
658 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
659 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
660 			return FFA_ERROR_INVALID_PARAMETER;
661 		}
662 
663 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
664 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
665 
666 		/*
667 		 * We're finished with the v1.0 descriptor for now so free it.
668 		 * Note that this will invalidate any references to the v1.1
669 		 * descriptor.
670 		 */
671 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
672 
673 		return 0;
674 }
675 
676 /**
677  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
678  * @obj:	  Object containing ffa_memory_region_descriptor.
679  * @ffa_version:  FF-A version of the provided descriptor.
680  *
681  * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
682  * offset or count is invalid.
683  */
spmc_shmem_check_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)684 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
685 				uint32_t ffa_version)
686 {
687 	uint32_t comp_mrd_offset = 0;
688 
689 	if (obj->desc.emad_count == 0U) {
690 		WARN("%s: unsupported attribute desc count %u.\n",
691 		     __func__, obj->desc.emad_count);
692 		return -EINVAL;
693 	}
694 
695 	for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
696 		size_t size;
697 		size_t count;
698 		size_t expected_size;
699 		size_t total_page_count;
700 		size_t emad_size;
701 		size_t desc_size;
702 		size_t header_emad_size;
703 		uint32_t offset;
704 		struct ffa_comp_mrd *comp;
705 		struct ffa_emad_v1_0 *emad;
706 
707 		emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
708 					       ffa_version, &emad_size);
709 		if (emad == NULL) {
710 			WARN("%s: invalid emad structure.\n", __func__);
711 			return -EINVAL;
712 		}
713 
714 		/*
715 		 * Validate the calculated emad address resides within the
716 		 * descriptor.
717 		 */
718 		if ((uintptr_t) emad >=
719 		    (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
720 			WARN("Invalid emad access.\n");
721 			return -EINVAL;
722 		}
723 
724 		offset = emad->comp_mrd_offset;
725 
726 		if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
727 			desc_size =  sizeof(struct ffa_mtd_v1_0);
728 		} else {
729 			desc_size =  sizeof(struct ffa_mtd);
730 		}
731 
732 		header_emad_size = desc_size +
733 			(obj->desc.emad_count * emad_size);
734 
735 		if (offset < header_emad_size) {
736 			WARN("%s: invalid object, offset %u < header + emad %zu\n",
737 			     __func__, offset, header_emad_size);
738 			return -EINVAL;
739 		}
740 
741 		size = obj->desc_size;
742 
743 		if (offset > size) {
744 			WARN("%s: invalid object, offset %u > total size %zu\n",
745 			     __func__, offset, obj->desc_size);
746 			return -EINVAL;
747 		}
748 		size -= offset;
749 
750 		if (size < sizeof(struct ffa_comp_mrd)) {
751 			WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
752 			     __func__, offset, obj->desc_size);
753 			return -EINVAL;
754 		}
755 		size -= sizeof(struct ffa_comp_mrd);
756 
757 		count = size / sizeof(struct ffa_cons_mrd);
758 
759 		comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
760 
761 		if (comp == NULL) {
762 			WARN("%s: invalid comp_mrd offset\n", __func__);
763 			return -EINVAL;
764 		}
765 
766 		if (comp->address_range_count != count) {
767 			WARN("%s: invalid object, desc count %u != %zu\n",
768 			     __func__, comp->address_range_count, count);
769 			return -EINVAL;
770 		}
771 
772 		expected_size = offset + sizeof(*comp) +
773 				spmc_shmem_obj_ffa_constituent_size(obj,
774 								    ffa_version);
775 
776 		if (expected_size != obj->desc_size) {
777 			WARN("%s: invalid object, computed size %zu != size %zu\n",
778 			       __func__, expected_size, obj->desc_size);
779 			return -EINVAL;
780 		}
781 
782 		if (obj->desc_filled < obj->desc_size) {
783 			/*
784 			 * The whole descriptor has not yet been received.
785 			 * Skip final checks.
786 			 */
787 			return 0;
788 		}
789 
790 		/*
791 		 * The offset provided to the composite memory region descriptor
792 		 * should be consistent across endpoint descriptors. Store the
793 		 * first entry and compare against subsequent entries.
794 		 */
795 		if (comp_mrd_offset == 0) {
796 			comp_mrd_offset = offset;
797 		} else {
798 			if (comp_mrd_offset != offset) {
799 				ERROR("%s: mismatching offsets provided, %u != %u\n",
800 				       __func__, offset, comp_mrd_offset);
801 				return -EINVAL;
802 			}
803 		}
804 
805 		total_page_count = 0;
806 
807 		for (size_t i = 0; i < count; i++) {
808 			total_page_count +=
809 				comp->address_range_array[i].page_count;
810 		}
811 		if (comp->total_page_count != total_page_count) {
812 			WARN("%s: invalid object, desc total_page_count %u != %zu\n",
813 			     __func__, comp->total_page_count,
814 			total_page_count);
815 			return -EINVAL;
816 		}
817 	}
818 	return 0;
819 }
820 
821 /**
822  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
823  *				regions that are currently involved with an
824  *				existing memory transactions. This implies that
825  *				the memory is not in a valid state for lending.
826  * @obj:    Object containing ffa_memory_region_descriptor.
827  *
828  * Return: 0 if object is valid, -EINVAL if invalid memory state.
829  */
spmc_shmem_check_state_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)830 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
831 				      uint32_t ffa_version)
832 {
833 	size_t obj_offset = 0;
834 	struct spmc_shmem_obj *inflight_obj;
835 
836 	struct ffa_comp_mrd *other_mrd;
837 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
838 								  ffa_version);
839 
840 	if (requested_mrd == NULL) {
841 		return -EINVAL;
842 	}
843 
844 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
845 					       &obj_offset);
846 
847 	while (inflight_obj != NULL) {
848 		/*
849 		 * Don't compare the transaction to itself or to partially
850 		 * transmitted descriptors.
851 		 */
852 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
853 		    (obj->desc_size == obj->desc_filled)) {
854 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
855 							  FFA_VERSION_COMPILED);
856 			if (other_mrd == NULL) {
857 				return -EINVAL;
858 			}
859 			if (overlapping_memory_regions(requested_mrd,
860 						       other_mrd)) {
861 				return -EINVAL;
862 			}
863 		}
864 
865 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
866 						       &obj_offset);
867 	}
868 	return 0;
869 }
870 
spmc_ffa_fill_desc(struct mailbox * mbox,struct spmc_shmem_obj * obj,uint32_t fragment_length,ffa_mtd_flag32_t mtd_flag,uint32_t ffa_version,void * smc_handle)871 static long spmc_ffa_fill_desc(struct mailbox *mbox,
872 			       struct spmc_shmem_obj *obj,
873 			       uint32_t fragment_length,
874 			       ffa_mtd_flag32_t mtd_flag,
875 			       uint32_t ffa_version,
876 			       void *smc_handle)
877 {
878 	int ret;
879 	size_t emad_size;
880 	uint32_t handle_low;
881 	uint32_t handle_high;
882 	struct ffa_emad_v1_0 *emad;
883 	struct ffa_emad_v1_0 *other_emad;
884 
885 	if (mbox->rxtx_page_count == 0U) {
886 		WARN("%s: buffer pair not registered.\n", __func__);
887 		ret = FFA_ERROR_INVALID_PARAMETER;
888 		goto err_arg;
889 	}
890 
891 	if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
892 		WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
893 		     fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
894 		ret = FFA_ERROR_INVALID_PARAMETER;
895 		goto err_arg;
896 	}
897 
898 	if (fragment_length > obj->desc_size - obj->desc_filled) {
899 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
900 		     fragment_length, obj->desc_size - obj->desc_filled);
901 		ret = FFA_ERROR_INVALID_PARAMETER;
902 		goto err_arg;
903 	}
904 
905 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
906 	       (uint8_t *) mbox->tx_buffer, fragment_length);
907 
908 	/* Ensure that the sender ID resides in the normal world. */
909 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
910 		WARN("%s: Invalid sender ID 0x%x.\n",
911 		     __func__, obj->desc.sender_id);
912 		ret = FFA_ERROR_DENIED;
913 		goto err_arg;
914 	}
915 
916 	/* Ensure the NS bit is set to 0. */
917 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
918 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
919 		ret = FFA_ERROR_INVALID_PARAMETER;
920 		goto err_arg;
921 	}
922 
923 	/*
924 	 * We don't currently support any optional flags so ensure none are
925 	 * requested.
926 	 */
927 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
928 	    (obj->desc.flags != mtd_flag)) {
929 		WARN("%s: invalid memory transaction flags %u != %u\n",
930 		     __func__, obj->desc.flags, mtd_flag);
931 		ret = FFA_ERROR_INVALID_PARAMETER;
932 		goto err_arg;
933 	}
934 
935 	if (obj->desc_filled == 0U) {
936 		/* First fragment, descriptor header has been copied */
937 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
938 		obj->desc.flags |= mtd_flag;
939 	}
940 
941 	obj->desc_filled += fragment_length;
942 	ret = spmc_shmem_check_obj(obj, ffa_version);
943 	if (ret != 0) {
944 		ret = FFA_ERROR_INVALID_PARAMETER;
945 		goto err_bad_desc;
946 	}
947 
948 	handle_low = (uint32_t)obj->desc.handle;
949 	handle_high = obj->desc.handle >> 32;
950 
951 	if (obj->desc_filled != obj->desc_size) {
952 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
953 			 handle_high, obj->desc_filled,
954 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
955 	}
956 
957 	/* The full descriptor has been received, perform any final checks. */
958 
959 	/*
960 	 * If a partition ID resides in the secure world validate that the
961 	 * partition ID is for a known partition. Ignore any partition ID
962 	 * belonging to the normal world as it is assumed the Hypervisor will
963 	 * have validated these.
964 	 */
965 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
966 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
967 					       &emad_size);
968 		if (emad == NULL) {
969 			ret = FFA_ERROR_INVALID_PARAMETER;
970 			goto err_bad_desc;
971 		}
972 
973 		ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
974 
975 		if (ffa_is_secure_world_id(ep_id)) {
976 			if (spmc_get_sp_ctx(ep_id) == NULL) {
977 				WARN("%s: Invalid receiver id 0x%x\n",
978 				     __func__, ep_id);
979 				ret = FFA_ERROR_INVALID_PARAMETER;
980 				goto err_bad_desc;
981 			}
982 		}
983 	}
984 
985 	/* Ensure partition IDs are not duplicated. */
986 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
987 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
988 					       &emad_size);
989 		if (emad == NULL) {
990 			ret = FFA_ERROR_INVALID_PARAMETER;
991 			goto err_bad_desc;
992 		}
993 		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
994 			other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
995 							     ffa_version,
996 							     &emad_size);
997 			if (other_emad == NULL) {
998 				ret = FFA_ERROR_INVALID_PARAMETER;
999 				goto err_bad_desc;
1000 			}
1001 
1002 			if (emad->mapd.endpoint_id ==
1003 				other_emad->mapd.endpoint_id) {
1004 				WARN("%s: Duplicated endpoint id 0x%x\n",
1005 				     __func__, emad->mapd.endpoint_id);
1006 				ret = FFA_ERROR_INVALID_PARAMETER;
1007 				goto err_bad_desc;
1008 			}
1009 		}
1010 	}
1011 
1012 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
1013 	if (ret) {
1014 		ERROR("%s: invalid memory region descriptor.\n", __func__);
1015 		ret = FFA_ERROR_INVALID_PARAMETER;
1016 		goto err_bad_desc;
1017 	}
1018 
1019 	/*
1020 	 * Everything checks out, if the sender was using FF-A v1.0, convert
1021 	 * the descriptor format to use the v1.1 structures.
1022 	 */
1023 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1024 		struct spmc_shmem_obj *v1_1_obj;
1025 		uint64_t mem_handle;
1026 
1027 		/* Calculate the size that the v1.1 descriptor will required. */
1028 		size_t v1_1_desc_size =
1029 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1030 						      obj->desc_size);
1031 
1032 		if (v1_1_desc_size == 0U) {
1033 			ERROR("%s: cannot determine size of descriptor.\n",
1034 			      __func__);
1035 			goto err_arg;
1036 		}
1037 
1038 		/* Get a new obj to store the v1.1 descriptor. */
1039 		v1_1_obj =
1040 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1041 
1042 		if (!v1_1_obj) {
1043 			ret = FFA_ERROR_NO_MEMORY;
1044 			goto err_arg;
1045 		}
1046 
1047 		/* Perform the conversion from v1.0 to v1.1. */
1048 		v1_1_obj->desc_size = v1_1_desc_size;
1049 		v1_1_obj->desc_filled = v1_1_desc_size;
1050 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1051 			ERROR("%s: Could not convert mtd!\n", __func__);
1052 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1053 			goto err_arg;
1054 		}
1055 
1056 		/*
1057 		 * We're finished with the v1.0 descriptor so free it
1058 		 * and continue our checks with the new v1.1 descriptor.
1059 		 */
1060 		mem_handle = obj->desc.handle;
1061 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1062 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1063 		if (obj == NULL) {
1064 			ERROR("%s: Failed to find converted descriptor.\n",
1065 			     __func__);
1066 			ret = FFA_ERROR_INVALID_PARAMETER;
1067 			return spmc_ffa_error_return(smc_handle, ret);
1068 		}
1069 	}
1070 
1071 	/* Allow for platform specific operations to be performed. */
1072 	ret = plat_spmc_shmem_begin(&obj->desc);
1073 	if (ret != 0) {
1074 		goto err_arg;
1075 	}
1076 
1077 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1078 		 0, 0, 0);
1079 
1080 err_bad_desc:
1081 err_arg:
1082 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1083 	return spmc_ffa_error_return(smc_handle, ret);
1084 }
1085 
1086 /**
1087  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1088  * @client:             Client state.
1089  * @total_length:       Total length of shared memory descriptor.
1090  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1091  *                      this call.
1092  * @address:            Not supported, must be 0.
1093  * @page_count:         Not supported, must be 0.
1094  * @smc_handle:         Handle passed to smc call. Used to return
1095  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1096  *
1097  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1098  * to share or lend memory from non-secure os to secure os (with no stream
1099  * endpoints).
1100  *
1101  * Return: 0 on success, error code on failure.
1102  */
spmc_ffa_mem_send(uint32_t smc_fid,bool secure_origin,uint64_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1103 long spmc_ffa_mem_send(uint32_t smc_fid,
1104 			bool secure_origin,
1105 			uint64_t total_length,
1106 			uint32_t fragment_length,
1107 			uint64_t address,
1108 			uint32_t page_count,
1109 			void *cookie,
1110 			void *handle,
1111 			uint64_t flags)
1112 
1113 {
1114 	long ret;
1115 	struct spmc_shmem_obj *obj;
1116 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1117 	ffa_mtd_flag32_t mtd_flag;
1118 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1119 
1120 	if (address != 0U || page_count != 0U) {
1121 		WARN("%s: custom memory region for message not supported.\n",
1122 		     __func__);
1123 		return spmc_ffa_error_return(handle,
1124 					     FFA_ERROR_INVALID_PARAMETER);
1125 	}
1126 
1127 	if (secure_origin) {
1128 		WARN("%s: unsupported share direction.\n", __func__);
1129 		return spmc_ffa_error_return(handle,
1130 					     FFA_ERROR_INVALID_PARAMETER);
1131 	}
1132 
1133 	/*
1134 	 * Check if the descriptor is smaller than the v1.0 descriptor. The
1135 	 * descriptor cannot be smaller than this structure.
1136 	 */
1137 	if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
1138 		WARN("%s: bad first fragment size %u < %zu\n",
1139 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1140 		return spmc_ffa_error_return(handle,
1141 					     FFA_ERROR_INVALID_PARAMETER);
1142 	}
1143 
1144 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1145 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1146 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1147 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1148 	} else {
1149 		WARN("%s: invalid memory management operation.\n", __func__);
1150 		return spmc_ffa_error_return(handle,
1151 					     FFA_ERROR_INVALID_PARAMETER);
1152 	}
1153 
1154 	spin_lock(&spmc_shmem_obj_state.lock);
1155 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1156 	if (obj == NULL) {
1157 		ret = FFA_ERROR_NO_MEMORY;
1158 		goto err_unlock;
1159 	}
1160 
1161 	spin_lock(&mbox->lock);
1162 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1163 				 ffa_version, handle);
1164 	spin_unlock(&mbox->lock);
1165 
1166 	spin_unlock(&spmc_shmem_obj_state.lock);
1167 	return ret;
1168 
1169 err_unlock:
1170 	spin_unlock(&spmc_shmem_obj_state.lock);
1171 	return spmc_ffa_error_return(handle, ret);
1172 }
1173 
1174 /**
1175  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1176  * @client:             Client state.
1177  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1178  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1179  * @fragment_length:    Length of fragments transmitted.
1180  * @sender_id:          Vmid of sender in bits [31:16]
1181  * @smc_handle:         Handle passed to smc call. Used to return
1182  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1183  *
1184  * Return: @smc_handle on success, error code on failure.
1185  */
spmc_ffa_mem_frag_tx(uint32_t smc_fid,bool secure_origin,uint64_t handle_low,uint64_t handle_high,uint32_t fragment_length,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1186 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1187 			  bool secure_origin,
1188 			  uint64_t handle_low,
1189 			  uint64_t handle_high,
1190 			  uint32_t fragment_length,
1191 			  uint32_t sender_id,
1192 			  void *cookie,
1193 			  void *handle,
1194 			  uint64_t flags)
1195 {
1196 	long ret;
1197 	uint32_t desc_sender_id;
1198 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1199 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1200 
1201 	struct spmc_shmem_obj *obj;
1202 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1203 
1204 	spin_lock(&spmc_shmem_obj_state.lock);
1205 
1206 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1207 	if (obj == NULL) {
1208 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1209 		     __func__, mem_handle);
1210 		ret = FFA_ERROR_INVALID_PARAMETER;
1211 		goto err_unlock;
1212 	}
1213 
1214 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1215 	if (sender_id != desc_sender_id) {
1216 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1217 		     sender_id, desc_sender_id);
1218 		ret = FFA_ERROR_INVALID_PARAMETER;
1219 		goto err_unlock;
1220 	}
1221 
1222 	if (obj->desc_filled == obj->desc_size) {
1223 		WARN("%s: object desc already filled, %zu\n", __func__,
1224 		     obj->desc_filled);
1225 		ret = FFA_ERROR_INVALID_PARAMETER;
1226 		goto err_unlock;
1227 	}
1228 
1229 	spin_lock(&mbox->lock);
1230 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1231 				 handle);
1232 	spin_unlock(&mbox->lock);
1233 
1234 	spin_unlock(&spmc_shmem_obj_state.lock);
1235 	return ret;
1236 
1237 err_unlock:
1238 	spin_unlock(&spmc_shmem_obj_state.lock);
1239 	return spmc_ffa_error_return(handle, ret);
1240 }
1241 
1242 /**
1243  * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1244  *				      if the caller implements a version greater
1245  *				      than FF-A 1.0 or if they have requested
1246  *				      the functionality.
1247  *				      TODO: We are assuming that the caller is
1248  *				      an SP. To support retrieval from the
1249  *				      normal world this function will need to be
1250  *				      expanded accordingly.
1251  * @resp:       Descriptor populated in callers RX buffer.
1252  * @sp_ctx:     Context of the calling SP.
1253  */
spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd * resp,struct secure_partition_desc * sp_ctx)1254 void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1255 			 struct secure_partition_desc *sp_ctx)
1256 {
1257 	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1258 	    sp_ctx->ns_bit_requested) {
1259 		/*
1260 		 * Currently memory senders must reside in the normal
1261 		 * world, and we do not have the functionlaity to change
1262 		 * the state of memory dynamically. Therefore we can always set
1263 		 * the NS bit to 1.
1264 		 */
1265 		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1266 	}
1267 }
1268 
1269 /**
1270  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1271  * @smc_fid:            FID of SMC
1272  * @total_length:       Total length of retrieve request descriptor if this is
1273  *                      the first call. Otherwise (unsupported) must be 0.
1274  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1275  *                      in this call. Only @fragment_length == @length is
1276  *                      supported by this implementation.
1277  * @address:            Not supported, must be 0.
1278  * @page_count:         Not supported, must be 0.
1279  * @smc_handle:         Handle passed to smc call. Used to return
1280  *                      FFA_MEM_RETRIEVE_RESP.
1281  *
1282  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1283  * Used by secure os to retrieve memory already shared by non-secure os.
1284  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1285  * the client must call FFA_MEM_FRAG_RX until the full response has been
1286  * received.
1287  *
1288  * Return: @handle on success, error code on failure.
1289  */
1290 long
spmc_ffa_mem_retrieve_req(uint32_t smc_fid,bool secure_origin,uint32_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1291 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1292 			  bool secure_origin,
1293 			  uint32_t total_length,
1294 			  uint32_t fragment_length,
1295 			  uint64_t address,
1296 			  uint32_t page_count,
1297 			  void *cookie,
1298 			  void *handle,
1299 			  uint64_t flags)
1300 {
1301 	int ret;
1302 	size_t buf_size;
1303 	size_t copy_size = 0;
1304 	size_t min_desc_size;
1305 	size_t out_desc_size = 0;
1306 
1307 	/*
1308 	 * Currently we are only accessing fields that are the same in both the
1309 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1310 	 * here. We only need validate against the appropriate struct size.
1311 	 */
1312 	struct ffa_mtd *resp;
1313 	const struct ffa_mtd *req;
1314 	struct spmc_shmem_obj *obj = NULL;
1315 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1316 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1317 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1318 
1319 	if (!secure_origin) {
1320 		WARN("%s: unsupported retrieve req direction.\n", __func__);
1321 		return spmc_ffa_error_return(handle,
1322 					     FFA_ERROR_INVALID_PARAMETER);
1323 	}
1324 
1325 	if (address != 0U || page_count != 0U) {
1326 		WARN("%s: custom memory region not supported.\n", __func__);
1327 		return spmc_ffa_error_return(handle,
1328 					     FFA_ERROR_INVALID_PARAMETER);
1329 	}
1330 
1331 	spin_lock(&mbox->lock);
1332 
1333 	req = mbox->tx_buffer;
1334 	resp = mbox->rx_buffer;
1335 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1336 
1337 	if (mbox->rxtx_page_count == 0U) {
1338 		WARN("%s: buffer pair not registered.\n", __func__);
1339 		ret = FFA_ERROR_INVALID_PARAMETER;
1340 		goto err_unlock_mailbox;
1341 	}
1342 
1343 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1344 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1345 		ret = FFA_ERROR_DENIED;
1346 		goto err_unlock_mailbox;
1347 	}
1348 
1349 	if (fragment_length != total_length) {
1350 		WARN("%s: fragmented retrieve request not supported.\n",
1351 		     __func__);
1352 		ret = FFA_ERROR_INVALID_PARAMETER;
1353 		goto err_unlock_mailbox;
1354 	}
1355 
1356 	if (req->emad_count == 0U) {
1357 		WARN("%s: unsupported attribute desc count %u.\n",
1358 		     __func__, obj->desc.emad_count);
1359 		ret = FFA_ERROR_INVALID_PARAMETER;
1360 		goto err_unlock_mailbox;
1361 	}
1362 
1363 	/* Determine the appropriate minimum descriptor size. */
1364 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1365 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1366 	} else {
1367 		min_desc_size = sizeof(struct ffa_mtd);
1368 	}
1369 	if (total_length < min_desc_size) {
1370 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1371 		     min_desc_size);
1372 		ret = FFA_ERROR_INVALID_PARAMETER;
1373 		goto err_unlock_mailbox;
1374 	}
1375 
1376 	spin_lock(&spmc_shmem_obj_state.lock);
1377 
1378 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1379 	if (obj == NULL) {
1380 		ret = FFA_ERROR_INVALID_PARAMETER;
1381 		goto err_unlock_all;
1382 	}
1383 
1384 	if (obj->desc_filled != obj->desc_size) {
1385 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1386 		     __func__, obj->desc_filled, obj->desc_size);
1387 		ret = FFA_ERROR_INVALID_PARAMETER;
1388 		goto err_unlock_all;
1389 	}
1390 
1391 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1392 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1393 		     __func__, req->sender_id, obj->desc.sender_id);
1394 		ret = FFA_ERROR_INVALID_PARAMETER;
1395 		goto err_unlock_all;
1396 	}
1397 
1398 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1399 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1400 		     __func__, req->tag, obj->desc.tag);
1401 		ret = FFA_ERROR_INVALID_PARAMETER;
1402 		goto err_unlock_all;
1403 	}
1404 
1405 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1406 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1407 		     __func__, req->emad_count, obj->desc.emad_count);
1408 		ret = FFA_ERROR_INVALID_PARAMETER;
1409 		goto err_unlock_all;
1410 	}
1411 
1412 	/* Ensure the NS bit is set to 0 in the request. */
1413 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1414 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1415 		ret = FFA_ERROR_INVALID_PARAMETER;
1416 		goto err_unlock_all;
1417 	}
1418 
1419 	if (req->flags != 0U) {
1420 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1421 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1422 			/*
1423 			 * If the retrieve request specifies the memory
1424 			 * transaction ensure it matches what we expect.
1425 			 */
1426 			WARN("%s: wrong mem transaction flags %x != %x\n",
1427 			__func__, req->flags, obj->desc.flags);
1428 			ret = FFA_ERROR_INVALID_PARAMETER;
1429 			goto err_unlock_all;
1430 		}
1431 
1432 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1433 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1434 			/*
1435 			 * Current implementation does not support donate and
1436 			 * it supports no other flags.
1437 			 */
1438 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1439 			ret = FFA_ERROR_INVALID_PARAMETER;
1440 			goto err_unlock_all;
1441 		}
1442 	}
1443 
1444 	/* Validate the caller is a valid participant. */
1445 	if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
1446 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1447 			__func__, sp_ctx->sp_id);
1448 		ret = FFA_ERROR_INVALID_PARAMETER;
1449 		goto err_unlock_all;
1450 	}
1451 
1452 	/* Validate that the provided emad offset and structure is valid.*/
1453 	for (size_t i = 0; i < req->emad_count; i++) {
1454 		size_t emad_size;
1455 		struct ffa_emad_v1_0 *emad;
1456 
1457 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1458 					       &emad_size);
1459 		if (emad == NULL) {
1460 			WARN("%s: invalid emad structure.\n", __func__);
1461 			ret = FFA_ERROR_INVALID_PARAMETER;
1462 			goto err_unlock_all;
1463 		}
1464 
1465 		if ((uintptr_t) emad >= (uintptr_t)
1466 					((uint8_t *) req + total_length)) {
1467 			WARN("Invalid emad access.\n");
1468 			ret = FFA_ERROR_INVALID_PARAMETER;
1469 			goto err_unlock_all;
1470 		}
1471 	}
1472 
1473 	/*
1474 	 * Validate all the endpoints match in the case of multiple
1475 	 * borrowers. We don't mandate that the order of the borrowers
1476 	 * must match in the descriptors therefore check to see if the
1477 	 * endpoints match in any order.
1478 	 */
1479 	for (size_t i = 0; i < req->emad_count; i++) {
1480 		bool found = false;
1481 		size_t emad_size;
1482 		struct ffa_emad_v1_0 *emad;
1483 		struct ffa_emad_v1_0 *other_emad;
1484 
1485 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1486 					       &emad_size);
1487 		if (emad == NULL) {
1488 			ret = FFA_ERROR_INVALID_PARAMETER;
1489 			goto err_unlock_all;
1490 		}
1491 
1492 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1493 			other_emad = spmc_shmem_obj_get_emad(
1494 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1495 					&emad_size);
1496 
1497 			if (other_emad == NULL) {
1498 				ret = FFA_ERROR_INVALID_PARAMETER;
1499 				goto err_unlock_all;
1500 			}
1501 
1502 			if (req->emad_count &&
1503 			    emad->mapd.endpoint_id ==
1504 			    other_emad->mapd.endpoint_id) {
1505 				found = true;
1506 				break;
1507 			}
1508 		}
1509 
1510 		if (!found) {
1511 			WARN("%s: invalid receiver id (0x%x).\n",
1512 			     __func__, emad->mapd.endpoint_id);
1513 			ret = FFA_ERROR_INVALID_PARAMETER;
1514 			goto err_unlock_all;
1515 		}
1516 	}
1517 
1518 	mbox->state = MAILBOX_STATE_FULL;
1519 
1520 	if (req->emad_count != 0U) {
1521 		obj->in_use++;
1522 	}
1523 
1524 	/*
1525 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1526 	 * directly.
1527 	 */
1528 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1529 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1530 							&copy_size,
1531 							&out_desc_size);
1532 		if (ret != 0U) {
1533 			ERROR("%s: Failed to process descriptor.\n", __func__);
1534 			goto err_unlock_all;
1535 		}
1536 	} else {
1537 		copy_size = MIN(obj->desc_size, buf_size);
1538 		out_desc_size = obj->desc_size;
1539 
1540 		memcpy(resp, &obj->desc, copy_size);
1541 	}
1542 
1543 	/* Set the NS bit in the response if applicable. */
1544 	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1545 
1546 	spin_unlock(&spmc_shmem_obj_state.lock);
1547 	spin_unlock(&mbox->lock);
1548 
1549 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1550 		 copy_size, 0, 0, 0, 0, 0);
1551 
1552 err_unlock_all:
1553 	spin_unlock(&spmc_shmem_obj_state.lock);
1554 err_unlock_mailbox:
1555 	spin_unlock(&mbox->lock);
1556 	return spmc_ffa_error_return(handle, ret);
1557 }
1558 
1559 /**
1560  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1561  * @client:             Client state.
1562  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1563  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1564  * @fragment_offset:    Byte offset in descriptor to resume at.
1565  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1566  *                      hypervisor. 0 otherwise.
1567  * @smc_handle:         Handle passed to smc call. Used to return
1568  *                      FFA_MEM_FRAG_TX.
1569  *
1570  * Return: @smc_handle on success, error code on failure.
1571  */
spmc_ffa_mem_frag_rx(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1572 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1573 			  bool secure_origin,
1574 			  uint32_t handle_low,
1575 			  uint32_t handle_high,
1576 			  uint32_t fragment_offset,
1577 			  uint32_t sender_id,
1578 			  void *cookie,
1579 			  void *handle,
1580 			  uint64_t flags)
1581 {
1582 	int ret;
1583 	void *src;
1584 	size_t buf_size;
1585 	size_t copy_size;
1586 	size_t full_copy_size;
1587 	uint32_t desc_sender_id;
1588 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1589 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1590 	struct spmc_shmem_obj *obj;
1591 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1592 
1593 	if (!secure_origin) {
1594 		WARN("%s: can only be called from swld.\n",
1595 		     __func__);
1596 		return spmc_ffa_error_return(handle,
1597 					     FFA_ERROR_INVALID_PARAMETER);
1598 	}
1599 
1600 	spin_lock(&spmc_shmem_obj_state.lock);
1601 
1602 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1603 	if (obj == NULL) {
1604 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1605 		     __func__, mem_handle);
1606 		ret = FFA_ERROR_INVALID_PARAMETER;
1607 		goto err_unlock_shmem;
1608 	}
1609 
1610 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1611 	if (sender_id != 0U && sender_id != desc_sender_id) {
1612 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1613 		     sender_id, desc_sender_id);
1614 		ret = FFA_ERROR_INVALID_PARAMETER;
1615 		goto err_unlock_shmem;
1616 	}
1617 
1618 	if (fragment_offset >= obj->desc_size) {
1619 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1620 		     __func__, fragment_offset, obj->desc_size);
1621 		ret = FFA_ERROR_INVALID_PARAMETER;
1622 		goto err_unlock_shmem;
1623 	}
1624 
1625 	spin_lock(&mbox->lock);
1626 
1627 	if (mbox->rxtx_page_count == 0U) {
1628 		WARN("%s: buffer pair not registered.\n", __func__);
1629 		ret = FFA_ERROR_INVALID_PARAMETER;
1630 		goto err_unlock_all;
1631 	}
1632 
1633 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1634 		WARN("%s: RX Buffer is full!\n", __func__);
1635 		ret = FFA_ERROR_DENIED;
1636 		goto err_unlock_all;
1637 	}
1638 
1639 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1640 
1641 	mbox->state = MAILBOX_STATE_FULL;
1642 
1643 	/*
1644 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1645 	 * directly.
1646 	 */
1647 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1648 		size_t out_desc_size;
1649 
1650 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1651 							buf_size,
1652 							fragment_offset,
1653 							&copy_size,
1654 							&out_desc_size);
1655 		if (ret != 0U) {
1656 			ERROR("%s: Failed to process descriptor.\n", __func__);
1657 			goto err_unlock_all;
1658 		}
1659 	} else {
1660 		full_copy_size = obj->desc_size - fragment_offset;
1661 		copy_size = MIN(full_copy_size, buf_size);
1662 
1663 		src = &obj->desc;
1664 
1665 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1666 	}
1667 
1668 	spin_unlock(&mbox->lock);
1669 	spin_unlock(&spmc_shmem_obj_state.lock);
1670 
1671 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1672 		 copy_size, sender_id, 0, 0, 0);
1673 
1674 err_unlock_all:
1675 	spin_unlock(&mbox->lock);
1676 err_unlock_shmem:
1677 	spin_unlock(&spmc_shmem_obj_state.lock);
1678 	return spmc_ffa_error_return(handle, ret);
1679 }
1680 
1681 /**
1682  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1683  * @client:             Client state.
1684  *
1685  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1686  * Used by secure os release previously shared memory to non-secure os.
1687  *
1688  * The handle to release must be in the client's (secure os's) transmit buffer.
1689  *
1690  * Return: 0 on success, error code on failure.
1691  */
spmc_ffa_mem_relinquish(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1692 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1693 			    bool secure_origin,
1694 			    uint32_t handle_low,
1695 			    uint32_t handle_high,
1696 			    uint32_t fragment_offset,
1697 			    uint32_t sender_id,
1698 			    void *cookie,
1699 			    void *handle,
1700 			    uint64_t flags)
1701 {
1702 	int ret;
1703 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1704 	struct spmc_shmem_obj *obj;
1705 	const struct ffa_mem_relinquish_descriptor *req;
1706 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1707 
1708 	if (!secure_origin) {
1709 		WARN("%s: unsupported relinquish direction.\n", __func__);
1710 		return spmc_ffa_error_return(handle,
1711 					     FFA_ERROR_INVALID_PARAMETER);
1712 	}
1713 
1714 	spin_lock(&mbox->lock);
1715 
1716 	if (mbox->rxtx_page_count == 0U) {
1717 		WARN("%s: buffer pair not registered.\n", __func__);
1718 		ret = FFA_ERROR_INVALID_PARAMETER;
1719 		goto err_unlock_mailbox;
1720 	}
1721 
1722 	req = mbox->tx_buffer;
1723 
1724 	if (req->flags != 0U) {
1725 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1726 		ret = FFA_ERROR_INVALID_PARAMETER;
1727 		goto err_unlock_mailbox;
1728 	}
1729 
1730 	if (req->endpoint_count == 0) {
1731 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1732 		ret = FFA_ERROR_INVALID_PARAMETER;
1733 		goto err_unlock_mailbox;
1734 	}
1735 
1736 	spin_lock(&spmc_shmem_obj_state.lock);
1737 
1738 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1739 	if (obj == NULL) {
1740 		ret = FFA_ERROR_INVALID_PARAMETER;
1741 		goto err_unlock_all;
1742 	}
1743 
1744 	/*
1745 	 * Validate the endpoint ID was populated correctly. We don't currently
1746 	 * support proxy endpoints so the endpoint count should always be 1.
1747 	 */
1748 	if (req->endpoint_count != 1U) {
1749 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1750 		     req->endpoint_count);
1751 		ret = FFA_ERROR_INVALID_PARAMETER;
1752 		goto err_unlock_all;
1753 	}
1754 
1755 	/* Validate provided endpoint ID matches the partition ID. */
1756 	if (req->endpoint_array[0] != sp_ctx->sp_id) {
1757 		WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1758 		     req->endpoint_array[0], sp_ctx->sp_id);
1759 		ret = FFA_ERROR_INVALID_PARAMETER;
1760 		goto err_unlock_all;
1761 	}
1762 
1763 	/* Validate the caller is a valid participant. */
1764 	if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
1765 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1766 			__func__, req->endpoint_array[0]);
1767 		ret = FFA_ERROR_INVALID_PARAMETER;
1768 		goto err_unlock_all;
1769 	}
1770 
1771 	if (obj->in_use == 0U) {
1772 		ret = FFA_ERROR_INVALID_PARAMETER;
1773 		goto err_unlock_all;
1774 	}
1775 	obj->in_use--;
1776 
1777 	spin_unlock(&spmc_shmem_obj_state.lock);
1778 	spin_unlock(&mbox->lock);
1779 
1780 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1781 
1782 err_unlock_all:
1783 	spin_unlock(&spmc_shmem_obj_state.lock);
1784 err_unlock_mailbox:
1785 	spin_unlock(&mbox->lock);
1786 	return spmc_ffa_error_return(handle, ret);
1787 }
1788 
1789 /**
1790  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1791  * @client:         Client state.
1792  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1793  * @handle_high:    Unique handle of shared memory object to reclaim.
1794  *                  Bit[63:32].
1795  * @flags:          Unsupported, ignored.
1796  *
1797  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1798  * Used by non-secure os reclaim memory previously shared with secure os.
1799  *
1800  * Return: 0 on success, error code on failure.
1801  */
spmc_ffa_mem_reclaim(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t mem_flags,uint64_t x4,void * cookie,void * handle,uint64_t flags)1802 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1803 			 bool secure_origin,
1804 			 uint32_t handle_low,
1805 			 uint32_t handle_high,
1806 			 uint32_t mem_flags,
1807 			 uint64_t x4,
1808 			 void *cookie,
1809 			 void *handle,
1810 			 uint64_t flags)
1811 {
1812 	int ret;
1813 	struct spmc_shmem_obj *obj;
1814 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1815 
1816 	if (secure_origin) {
1817 		WARN("%s: unsupported reclaim direction.\n", __func__);
1818 		return spmc_ffa_error_return(handle,
1819 					     FFA_ERROR_INVALID_PARAMETER);
1820 	}
1821 
1822 	if (mem_flags != 0U) {
1823 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1824 		return spmc_ffa_error_return(handle,
1825 					     FFA_ERROR_INVALID_PARAMETER);
1826 	}
1827 
1828 	spin_lock(&spmc_shmem_obj_state.lock);
1829 
1830 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1831 	if (obj == NULL) {
1832 		ret = FFA_ERROR_INVALID_PARAMETER;
1833 		goto err_unlock;
1834 	}
1835 	if (obj->in_use != 0U) {
1836 		ret = FFA_ERROR_DENIED;
1837 		goto err_unlock;
1838 	}
1839 
1840 	if (obj->desc_filled != obj->desc_size) {
1841 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1842 		     __func__, obj->desc_filled, obj->desc_size);
1843 		ret = FFA_ERROR_INVALID_PARAMETER;
1844 		goto err_unlock;
1845 	}
1846 
1847 	/* Allow for platform specific operations to be performed. */
1848 	ret = plat_spmc_shmem_reclaim(&obj->desc);
1849 	if (ret != 0) {
1850 		goto err_unlock;
1851 	}
1852 
1853 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1854 	spin_unlock(&spmc_shmem_obj_state.lock);
1855 
1856 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1857 
1858 err_unlock:
1859 	spin_unlock(&spmc_shmem_obj_state.lock);
1860 	return spmc_ffa_error_return(handle, ret);
1861 }
1862