1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2018, EPAM Systems. All rights reserved.
4 * Copyright (c) 2023-2024, Linaro Limited
5 */
6
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/phys_mem.h>
22 #include <mm/tee_mm.h>
23 #include <platform_config.h>
24 #include <sm/optee_smc.h>
25 #include <string.h>
26 #include <string_ext.h>
27 #include <util.h>
28
29 LIST_HEAD(prtn_list_head, guest_partition);
30
31 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
32
33 static struct prtn_list_head prtn_list __nex_data =
34 LIST_HEAD_INITIALIZER(prtn_list);
35 static struct prtn_list_head prtn_destroy_list __nex_data =
36 LIST_HEAD_INITIALIZER(prtn_destroy_list);
37
38 /* Memory used by OP-TEE core */
39 struct memory_map *kmem_map __nex_bss;
40
41 struct guest_spec_data {
42 size_t size;
43 void (*destroy)(void *data);
44 };
45
46 static bool add_disabled __nex_bss;
47 static unsigned gsd_count __nex_bss;
48 static struct guest_spec_data *gsd_array __nex_bss;
49
50 struct guest_partition {
51 LIST_ENTRY(guest_partition) link;
52 struct mmu_partition *mmu_prtn;
53 struct memory_map mem_map;
54 struct mutex mutex;
55 void *tables_va;
56 tee_mm_entry_t *tee_ram;
57 tee_mm_entry_t *ta_ram;
58 tee_mm_entry_t *tables;
59 bool runtime_initialized;
60 bool got_guest_destroyed;
61 bool shutting_down;
62 uint16_t id;
63 struct refcount refc;
64 #ifdef CFG_CORE_SEL1_SPMC
65 uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
66 uint8_t cookie_count;
67 bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
68 #endif
69 void **data_array;
70 };
71
72 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
73
get_current_prtn(void)74 static struct guest_partition *get_current_prtn(void)
75 {
76 struct guest_partition *ret;
77 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
78
79 ret = current_partition[get_core_pos()];
80
81 thread_unmask_exceptions(exceptions);
82
83 return ret;
84 }
85
virt_get_current_guest_id(void)86 uint16_t virt_get_current_guest_id(void)
87 {
88 struct guest_partition *prtn = get_current_prtn();
89
90 if (!prtn)
91 return 0;
92 return prtn->id;
93 }
94
set_current_prtn(struct guest_partition * prtn)95 static void set_current_prtn(struct guest_partition *prtn)
96 {
97 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
98
99 current_partition[get_core_pos()] = prtn;
100
101 thread_unmask_exceptions(exceptions);
102 }
103
get_ta_ram_size(void)104 static size_t get_ta_ram_size(void)
105 {
106 size_t ta_size = nex_phys_mem_get_ta_size();
107
108 return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
109 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
110 }
111
prepare_memory_map(struct memory_map * mem_map,paddr_t tee_data)112 static TEE_Result prepare_memory_map(struct memory_map *mem_map,
113 paddr_t tee_data)
114 {
115 struct tee_mmap_region *map = NULL;
116 vaddr_t max_va = 0;
117 size_t n = 0;
118 /*
119 * This function assumes that at time of operation,
120 * kmemory_map (aka static_memory_map from core_mmu.c)
121 * will not be altered. This is true, because all
122 * changes to static_memory_map are done during
123 * OP-TEE initialization, while this function will
124 * called when hypervisor creates a guest.
125 */
126
127 /* Allocate entries for virtual guest map */
128 mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
129 if (!mem_map->map)
130 return TEE_ERROR_OUT_OF_MEMORY;
131 mem_map->count = kmem_map->count;
132 mem_map->alloc_count = kmem_map->count + 1;
133
134 memcpy(mem_map->map, kmem_map->map,
135 sizeof(*mem_map->map) * mem_map->count);
136
137 /* Map TEE .data and .bss sections */
138 for (n = 0; n < mem_map->count; n++) {
139 map = mem_map->map + n;
140 if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
141 map->type = MEM_AREA_TEE_RAM_RW;
142 map->attr = core_mmu_type_to_attr(map->type);
143 map->pa = tee_data;
144 }
145 if (map->va + map->size > max_va)
146 max_va = map->va + map->size;
147 }
148
149 DMSG("New map (%08lx):", (vaddr_t)(VCORE_UNPG_RW_PA));
150
151 for (n = 0; n < mem_map->count; n++)
152 DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
153 teecore_memtype_name(mem_map->map[n].type),
154 mem_map->map[n].region_size, mem_map->map[n].pa,
155 mem_map->map[n].va, mem_map->map[n].size,
156 mem_map->map[n].attr);
157 return TEE_SUCCESS;
158 }
159
virt_init_memory(struct memory_map * mem_map,paddr_t secmem0_base,paddr_size_t secmem0_size,paddr_t secmem1_base,paddr_size_t secmem1_size)160 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
161 paddr_size_t secmem0_size, paddr_t secmem1_base,
162 paddr_size_t secmem1_size)
163 {
164 size_t n = 0;
165
166 /* Init page pool that covers all secure RAM */
167 nex_phys_mem_init(secmem0_base, secmem0_size, secmem1_base,
168 secmem1_size);
169
170 /* Carve out areas that are used by OP-TEE core */
171 for (n = 0; n < mem_map->count; n++) {
172 struct tee_mmap_region *map = mem_map->map + n;
173
174 switch (map->type) {
175 case MEM_AREA_TEE_RAM_RX:
176 case MEM_AREA_TEE_RAM_RO:
177 case MEM_AREA_NEX_RAM_RO:
178 case MEM_AREA_NEX_RAM_RW:
179 DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
180 map->type, map->pa, map->pa + map->size);
181 if (!nex_phys_mem_alloc2(map->pa, map->size))
182 panic("Can't carve out used area");
183 break;
184 default:
185 continue;
186 }
187 }
188
189 kmem_map = mem_map;
190 }
191
192
configure_guest_prtn_mem(struct guest_partition * prtn)193 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
194 {
195 TEE_Result res = TEE_SUCCESS;
196 paddr_t original_data_pa = 0;
197
198 prtn->tee_ram = nex_phys_mem_core_alloc(VCORE_UNPG_RW_SZ);
199 if (!prtn->tee_ram) {
200 EMSG("Can't allocate memory for TEE runtime context");
201 res = TEE_ERROR_OUT_OF_MEMORY;
202 goto err;
203 }
204 DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
205
206 prtn->ta_ram = nex_phys_mem_ta_alloc(get_ta_ram_size());
207 if (!prtn->ta_ram) {
208 EMSG("Can't allocate memory for TA data");
209 res = TEE_ERROR_OUT_OF_MEMORY;
210 goto err;
211 }
212 DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
213
214 prtn->tables = nex_phys_mem_core_alloc(core_mmu_get_total_pages_size());
215 if (!prtn->tables) {
216 EMSG("Can't allocate memory for page tables");
217 res = TEE_ERROR_OUT_OF_MEMORY;
218 goto err;
219 }
220
221 prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
222 MEM_AREA_SEC_RAM_OVERALL,
223 core_mmu_get_total_pages_size());
224 assert(prtn->tables_va);
225
226 prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
227 if (!prtn->mmu_prtn) {
228 res = TEE_ERROR_OUT_OF_MEMORY;
229 goto err;
230 }
231
232 res = prepare_memory_map(&prtn->mem_map,
233 tee_mm_get_smem(prtn->tee_ram));
234 if (res)
235 goto err;
236
237 core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
238
239 original_data_pa = virt_to_phys(__data_start);
240 /* Switch to guest's mappings */
241 core_mmu_set_prtn(prtn->mmu_prtn);
242
243 /* clear .bss */
244 memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
245
246 /* copy .data section from R/O original */
247 memcpy(__data_start,
248 phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
249 __data_end - __data_start),
250 __data_end - __data_start);
251
252 return TEE_SUCCESS;
253
254 err:
255 if (prtn->tee_ram)
256 tee_mm_free(prtn->tee_ram);
257 if (prtn->ta_ram)
258 tee_mm_free(prtn->ta_ram);
259 if (prtn->tables)
260 tee_mm_free(prtn->tables);
261 nex_free(prtn->mmu_prtn);
262 nex_free(prtn->mem_map.map);
263
264 return res;
265 }
266
destroy_gsd(struct guest_partition * prtn,bool free_only)267 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
268 {
269 size_t n = 0;
270
271 for (n = 0; n < gsd_count; n++) {
272 if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
273 gsd_array[n].destroy(prtn->data_array[n]);
274 nex_free(prtn->data_array[n]);
275 }
276 nex_free(prtn->data_array);
277 prtn->data_array = NULL;
278 }
279
alloc_gsd(struct guest_partition * prtn)280 static TEE_Result alloc_gsd(struct guest_partition *prtn)
281 {
282 unsigned int n = 0;
283
284 if (!gsd_count)
285 return TEE_SUCCESS;
286
287 prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
288 if (!prtn->data_array)
289 return TEE_ERROR_OUT_OF_MEMORY;
290
291 for (n = 0; n < gsd_count; n++) {
292 prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
293 if (!prtn->data_array[n]) {
294 destroy_gsd(prtn, true /*free_only*/);
295 return TEE_ERROR_OUT_OF_MEMORY;
296 }
297 }
298
299 return TEE_SUCCESS;
300 }
virt_guest_created(uint16_t guest_id)301 TEE_Result virt_guest_created(uint16_t guest_id)
302 {
303 struct guest_partition *prtn = NULL;
304 TEE_Result res = TEE_SUCCESS;
305 uint32_t exceptions = 0;
306
307 if (guest_id == HYP_CLNT_ID)
308 return TEE_ERROR_BAD_PARAMETERS;
309
310 prtn = nex_calloc(1, sizeof(*prtn));
311 if (!prtn)
312 return TEE_ERROR_OUT_OF_MEMORY;
313
314 res = alloc_gsd(prtn);
315 if (res)
316 goto err_free_prtn;
317
318 prtn->id = guest_id;
319 mutex_init(&prtn->mutex);
320 refcount_set(&prtn->refc, 1);
321 res = configure_guest_prtn_mem(prtn);
322 if (res)
323 goto err_free_gsd;
324
325 set_current_prtn(prtn);
326
327 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
328 phys_mem_init(0, 0, tee_mm_get_smem(prtn->ta_ram),
329 tee_mm_get_bytes(prtn->ta_ram));
330 /* Initialize threads */
331 thread_init_threads();
332 /* Do the preinitcalls */
333 call_preinitcalls();
334
335 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
336 LIST_INSERT_HEAD(&prtn_list, prtn, link);
337 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
338
339 IMSG("Added guest %d", guest_id);
340
341 set_current_prtn(NULL);
342 core_mmu_set_default_prtn();
343
344 return TEE_SUCCESS;
345
346 err_free_gsd:
347 destroy_gsd(prtn, true /*free_only*/);
348 err_free_prtn:
349 nex_free(prtn);
350 return res;
351 }
352
353 static bool
prtn_have_remaining_resources(struct guest_partition * prtn __maybe_unused)354 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
355 {
356 #ifdef CFG_CORE_SEL1_SPMC
357 int i = 0;
358
359 if (prtn->cookie_count)
360 return true;
361 bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
362 return i >= 0;
363 #else
364 return false;
365 #endif
366 }
367
get_prtn(struct guest_partition * prtn)368 static void get_prtn(struct guest_partition *prtn)
369 {
370 if (!refcount_inc(&prtn->refc))
371 panic();
372 }
373
virt_get_guest_id(struct guest_partition * prtn)374 uint16_t virt_get_guest_id(struct guest_partition *prtn)
375 {
376 if (!prtn)
377 return 0;
378 return prtn->id;
379 }
380
find_guest_by_id_unlocked(uint16_t guest_id)381 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
382 {
383 struct guest_partition *prtn = NULL;
384
385 LIST_FOREACH(prtn, &prtn_list, link)
386 if (!prtn->shutting_down && prtn->id == guest_id)
387 return prtn;
388
389 return NULL;
390 }
391
virt_next_guest(struct guest_partition * prtn)392 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
393 {
394 struct guest_partition *ret = NULL;
395 uint32_t exceptions = 0;
396
397 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
398 if (prtn)
399 ret = LIST_NEXT(prtn, link);
400 else
401 ret = LIST_FIRST(&prtn_list);
402
403 while (ret && ret->shutting_down)
404 ret = LIST_NEXT(prtn, link);
405 if (ret)
406 get_prtn(ret);
407 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
408
409 virt_put_guest(prtn);
410
411 return ret;
412 }
413
virt_get_current_guest(void)414 struct guest_partition *virt_get_current_guest(void)
415 {
416 struct guest_partition *prtn = get_current_prtn();
417
418 if (prtn)
419 get_prtn(prtn);
420 return prtn;
421 }
422
virt_get_guest(uint16_t guest_id)423 struct guest_partition *virt_get_guest(uint16_t guest_id)
424 {
425 struct guest_partition *prtn = NULL;
426 uint32_t exceptions = 0;
427
428 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
429 prtn = find_guest_by_id_unlocked(guest_id);
430 if (prtn)
431 get_prtn(prtn);
432 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
433
434 return prtn;
435 }
436
virt_put_guest(struct guest_partition * prtn)437 void virt_put_guest(struct guest_partition *prtn)
438 {
439 if (prtn && refcount_dec(&prtn->refc)) {
440 uint32_t exceptions = 0;
441 bool do_free = true;
442
443 assert(prtn->shutting_down);
444
445 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
446 LIST_REMOVE(prtn, link);
447 if (prtn_have_remaining_resources(prtn)) {
448 LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
449 /*
450 * Delay the nex_free() until
451 * virt_reclaim_cookie_from_destroyed_guest()
452 * is done with this partition.
453 */
454 do_free = false;
455 }
456 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
457
458 destroy_gsd(prtn, false /*!free_only*/);
459 tee_mm_free(prtn->tee_ram);
460 prtn->tee_ram = NULL;
461 tee_mm_free(prtn->ta_ram);
462 prtn->ta_ram = NULL;
463 tee_mm_free(prtn->tables);
464 prtn->tables = NULL;
465 core_free_mmu_prtn(prtn->mmu_prtn);
466 prtn->mmu_prtn = NULL;
467 nex_free(prtn->mem_map.map);
468 prtn->mem_map.map = NULL;
469 if (do_free)
470 nex_free(prtn);
471 }
472 }
473
virt_guest_destroyed(uint16_t guest_id)474 TEE_Result virt_guest_destroyed(uint16_t guest_id)
475 {
476 struct guest_partition *prtn = NULL;
477 uint32_t exceptions = 0;
478
479 IMSG("Removing guest %"PRId16, guest_id);
480
481 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
482
483 prtn = find_guest_by_id_unlocked(guest_id);
484 if (prtn && !prtn->got_guest_destroyed)
485 prtn->got_guest_destroyed = true;
486 else
487 prtn = NULL;
488
489 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
490
491 if (prtn) {
492 notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
493
494 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
495 prtn->shutting_down = true;
496 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
497
498 virt_put_guest(prtn);
499 } else {
500 EMSG("Client with id %d is not found", guest_id);
501 }
502
503 return TEE_SUCCESS;
504 }
505
virt_set_guest(uint16_t guest_id)506 TEE_Result virt_set_guest(uint16_t guest_id)
507 {
508 struct guest_partition *prtn = get_current_prtn();
509
510 /* This can be true only if we return from IRQ RPC */
511 if (prtn && prtn->id == guest_id)
512 return TEE_SUCCESS;
513
514 if (prtn)
515 panic("Virtual guest partition is already set");
516
517 prtn = virt_get_guest(guest_id);
518 if (!prtn)
519 return TEE_ERROR_ITEM_NOT_FOUND;
520
521 set_current_prtn(prtn);
522 core_mmu_set_prtn(prtn->mmu_prtn);
523
524 return TEE_SUCCESS;
525 }
526
virt_unset_guest(void)527 void virt_unset_guest(void)
528 {
529 struct guest_partition *prtn = get_current_prtn();
530
531 if (!prtn)
532 return;
533
534 set_current_prtn(NULL);
535 core_mmu_set_default_prtn();
536 virt_put_guest(prtn);
537 }
538
virt_on_stdcall(void)539 void virt_on_stdcall(void)
540 {
541 struct guest_partition *prtn = get_current_prtn();
542
543 /* Initialize runtime on first std call */
544 if (!prtn->runtime_initialized) {
545 mutex_lock(&prtn->mutex);
546 if (!prtn->runtime_initialized) {
547 init_tee_runtime();
548 call_driver_initcalls();
549 prtn->runtime_initialized = true;
550 }
551 mutex_unlock(&prtn->mutex);
552 }
553 }
554
virt_get_memory_map(void)555 struct memory_map *virt_get_memory_map(void)
556 {
557 struct guest_partition *prtn;
558
559 prtn = get_current_prtn();
560
561 if (!prtn)
562 return NULL;
563
564 return &prtn->mem_map;
565 }
566
567 #ifdef CFG_CORE_SEL1_SPMC
find_cookie(struct guest_partition * prtn,uint64_t cookie)568 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
569 {
570 int i = 0;
571
572 for (i = 0; i < prtn->cookie_count; i++)
573 if (prtn->cookies[i] == cookie)
574 return i;
575 return -1;
576 }
577
find_prtn_cookie(uint64_t cookie,int * idx)578 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
579 {
580 struct guest_partition *prtn = NULL;
581 int i = 0;
582
583 LIST_FOREACH(prtn, &prtn_list, link) {
584 i = find_cookie(prtn, cookie);
585 if (i >= 0) {
586 if (idx)
587 *idx = i;
588 return prtn;
589 }
590 }
591
592 return NULL;
593 }
594
virt_add_cookie_to_current_guest(uint64_t cookie)595 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
596 {
597 TEE_Result res = TEE_ERROR_ACCESS_DENIED;
598 struct guest_partition *prtn = NULL;
599 uint32_t exceptions = 0;
600
601 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
602 if (find_prtn_cookie(cookie, NULL))
603 goto out;
604
605 prtn = current_partition[get_core_pos()];
606 if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
607 prtn->cookies[prtn->cookie_count] = cookie;
608 prtn->cookie_count++;
609 res = TEE_SUCCESS;
610 }
611 out:
612 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
613
614 return res;
615 }
616
virt_remove_cookie(uint64_t cookie)617 void virt_remove_cookie(uint64_t cookie)
618 {
619 struct guest_partition *prtn = NULL;
620 uint32_t exceptions = 0;
621 int i = 0;
622
623 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
624 prtn = find_prtn_cookie(cookie, &i);
625 if (prtn) {
626 memmove(prtn->cookies + i, prtn->cookies + i + 1,
627 sizeof(uint64_t) * (prtn->cookie_count - i - 1));
628 prtn->cookie_count--;
629 }
630 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
631 }
632
virt_find_guest_by_cookie(uint64_t cookie)633 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
634 {
635 struct guest_partition *prtn = NULL;
636 uint32_t exceptions = 0;
637 uint16_t ret = 0;
638
639 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
640 prtn = find_prtn_cookie(cookie, NULL);
641 if (prtn)
642 ret = prtn->id;
643
644 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
645
646 return ret;
647 }
648
virt_get_shm_bits(void)649 bitstr_t *virt_get_shm_bits(void)
650 {
651 return get_current_prtn()->shm_bits;
652 }
653
reclaim_cookie(struct guest_partition * prtn,uint64_t cookie)654 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
655 {
656 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
657 size_t n = 0;
658
659 for (n = 0; n < prtn->cookie_count; n++) {
660 if (prtn->cookies[n] == cookie) {
661 memmove(prtn->cookies + n,
662 prtn->cookies + n + 1,
663 sizeof(uint64_t) *
664 (prtn->cookie_count - n - 1));
665 prtn->cookie_count--;
666 return TEE_SUCCESS;
667 }
668 }
669 } else {
670 uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
671 SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
672 FFA_MEMORY_HANDLE_PRTN_SHIFT);
673 int64_t i = cookie & ~mask;
674
675 if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
676 bit_test(prtn->shm_bits, i)) {
677 bit_clear(prtn->shm_bits, i);
678 return TEE_SUCCESS;
679 }
680 }
681
682 return TEE_ERROR_ITEM_NOT_FOUND;
683 }
684
virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,uint64_t cookie)685 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
686 uint64_t cookie)
687
688 {
689 struct guest_partition *prtn = NULL;
690 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
691 uint32_t exceptions = 0;
692
693 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
694 LIST_FOREACH(prtn, &prtn_destroy_list, link) {
695 if (prtn->id == guest_id) {
696 res = reclaim_cookie(prtn, cookie);
697 if (prtn_have_remaining_resources(prtn))
698 prtn = NULL;
699 else
700 LIST_REMOVE(prtn, link);
701 break;
702 }
703 }
704 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
705
706 nex_free(prtn);
707
708 return res;
709 }
710 #endif /*CFG_CORE_SEL1_SPMC*/
711
virt_add_guest_spec_data(unsigned int * data_id,size_t data_size,void (* data_destroy)(void * data))712 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
713 void (*data_destroy)(void *data))
714 {
715 void *p = NULL;
716
717 /*
718 * This function only executes successfully in a single threaded
719 * environment before exiting to the normal world the first time.
720 * If add_disabled is true, it means we're not in this environment
721 * any longer.
722 */
723
724 if (add_disabled)
725 return TEE_ERROR_BAD_PARAMETERS;
726
727 p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
728 if (!p)
729 return TEE_ERROR_OUT_OF_MEMORY;
730 gsd_array = p;
731
732 gsd_array[gsd_count] = (struct guest_spec_data){
733 .size = data_size,
734 .destroy = data_destroy,
735 };
736 *data_id = gsd_count + 1;
737 gsd_count++;
738 return TEE_SUCCESS;
739 }
740
virt_get_guest_spec_data(struct guest_partition * prtn,unsigned int data_id)741 void *virt_get_guest_spec_data(struct guest_partition *prtn,
742 unsigned int data_id)
743 {
744 assert(data_id);
745 if (!data_id || !prtn || data_id > gsd_count)
746 return NULL;
747 return prtn->data_array[data_id - 1];
748 }
749
virt_disable_add(void)750 static TEE_Result virt_disable_add(void)
751 {
752 add_disabled = true;
753
754 return TEE_SUCCESS;
755 }
756 nex_release_init_resource(virt_disable_add);
757