/src/arch/aarch64/hypervisor/ |
A D | vm.c | 130 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, in arch_vm_identity_prepare() argument 135 if (vm_locked.vm->el0_partition) { in arch_vm_identity_prepare() 148 void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, in arch_vm_identity_commit() argument 154 if (vm_locked.vm->el0_partition) { in arch_vm_identity_commit() 177 bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, in arch_vm_unmap() argument 193 void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool) in arch_vm_ptable_defrag() argument 195 if (vm_locked.vm->el0_partition) { in arch_vm_ptable_defrag() 209 bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, in arch_vm_mem_get_mode() argument 243 static bool arch_vm_iommu_mm_prepare(struct vm_locked vm_locked, paddr_t begin, in arch_vm_iommu_mm_prepare() argument 258 static void arch_vm_iommu_mm_commit(struct vm_locked vm_locked, paddr_t begin, in arch_vm_iommu_mm_commit() argument [all …]
|
A D | other_world.c | 29 struct vm_locked other_world_vm_locked; in arch_other_world_vm_init() 110 static struct vm_locked lock_other_world(struct vm_locked owner_vm_locked) in lock_other_world() 125 static void unlock_other_world(struct vm_locked owner_vm_locked, in unlock_other_world() 126 struct vm_locked other_world_locked) in unlock_other_world() 147 struct vm_locked vm_locked, struct mpool *local_page_pool, in arch_other_world_vm_configure_rxtx_map() argument 154 struct vm_locked other_world_locked; in arch_other_world_vm_configure_rxtx_map() 158 other_world_locked = lock_other_world(vm_locked); in arch_other_world_vm_configure_rxtx_map() 214 unlock_other_world(vm_locked, other_world_locked); in arch_other_world_vm_configure_rxtx_map() 226 struct vm_locked vm_locked, struct mpool *local_page_pool, in arch_other_world_vm_configure_rxtx_unmap() argument 230 struct vm_locked other_world_locked = lock_other_world(vm_locked); in arch_other_world_vm_configure_rxtx_unmap() [all …]
|
/src/ |
A D | vm.c | 396 bool vm_mailbox_state_busy(struct vm_locked vm_locked) in vm_mailbox_state_busy() argument 510 bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked) in vm_are_fwk_notifications_pending() argument 647 bool vm_notifications_validate_binding(struct vm_locked vm_locked, in vm_notifications_validate_binding() argument 662 void vm_notifications_update_bindings(struct vm_locked vm_locked, in vm_notifications_update_bindings() argument 689 struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id, in vm_notifications_validate_bound_sender() argument 734 struct vm_locked vm_locked, bool is_from_vm, in vm_notifications_partition_set_pending() argument 819 struct vm_locked vm_locked) in vm_notifications_framework_get_pending() argument 988 struct vm_locked vm_locked, bool is_from_vm, uint16_t *ids, in vm_notifications_info_get_pending() argument 1090 struct vm_locked vm_locked, uint32_t id) in vm_find_interrupt_descriptor() argument 1112 struct vm_locked vm_locked, uint32_t id, uint32_t target_mpidr) in vm_interrupt_set_target_mpidr() argument [all …]
|
A D | load.c | 104 struct vm_locked vm_locked, struct rx_tx rxtx, in link_rxtx_to_mailbox() argument 123 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv); in link_rxtx_to_mailbox() 157 struct vm_locked vm_locked, in load_common() argument 263 vm_locked.vm->id, vm_locked.vm->vcpu_count)) { in load_common() 291 struct vm_locked vm_locked; in load_primary() local 339 vm_locked = vm_lock(vm); in load_primary() 419 vm_unlock(&vm_locked); in load_primary() 532 const struct vm_locked vm_locked, in ffa_map_memory_regions() argument 666 struct vm_locked vm_locked; in load_secondary() local 845 vm_locked); in load_secondary() [all …]
|
A D | api.c | 360 struct vm_locked vm_locked; in api_abort() local 919 struct vm_locked vm_locked; in api_ffa_partition_info_get() local 1127 struct vm_locked vm_locked; in api_ffa_msg_wait_rx_release() local 1222 struct vm_locked vm_locked; in api_vcpu_prepare_run() local 1741 struct vm_locked vm_locked; in api_get_rxtx_description() local 1869 struct vm_locked vm_locked; in api_ffa_rxtx_unmap() local 2226 struct vm_locked vm_locked; in api_ffa_rx_release() local 2348 struct vm_locked vm_locked; in api_interrupt_enable() local 4113 struct vm_locked vm_locked; in api_ffa_secondary_ep_register() local 4666 struct vm_locked vm_locked; in api_ffa_mem_perm_get() local [all …]
|
A D | vm_test.cc | 39 using struct_vm_locked = struct vm_locked; 80 struct vm_locked vm_locked; in TEST_F() local 84 vm_locked = vm_lock(vm); in TEST_F() 91 vm_unlock(&vm_locked); in TEST_F() 244 struct vm_locked current_vm_locked; in TEST_F() 307 struct vm_locked current_vm_locked; in TEST_F() 372 struct vm_locked current_vm_locked; in TEST_F() 599 struct vm_locked current_vm_locked; in TEST_F() 806 struct vm_locked vm_locked = vm_lock(vm_find_index(0)); in TEST_F() local 826 vm_unlock(&vm_locked); in TEST_F() [all …]
|
A D | ipi_test.cc | 31 using struct_vm_locked = struct vm_locked; 304 struct vm_locked vm_locked = vm_lock(test_service[i]); in TEST_F() local 316 vm_locked, is_from_vm, ids, &ids_count, lists_sizes, in TEST_F() 347 vm_unlock(&vm_locked); in TEST_F()
|
A D | ffa_memory.c | 1053 struct vm_locked vm_locked, paddr_t pa_begin, paddr_t pa_end, in ffa_region_group_check_actions() argument 1120 static void ffa_region_group_commit_actions(struct vm_locked vm_locked, in ffa_region_group_commit_actions() argument 1215 struct vm_locked vm_locked, in ffa_region_group_identity_map() argument 1225 if (vm_locked.vm->el0_partition) { in ffa_region_group_identity_map() 1253 vm_locked, pa_begin, pa_end, ppool, in ffa_region_group_identity_map() 1271 vm_locked, pa_begin, pa_end, ppool, in ffa_region_group_identity_map() 1467 struct vm_locked from_locked, in ffa_send_check_update() 1631 struct vm_locked to_locked, in ffa_retrieve_check_update() 1755 struct vm_locked from_locked, in ffa_relinquish_check_update() 3275 struct vm_locked from_locked, in ffa_partition_memory_retrieve_request_undo() [all …]
|
/src/ffa/spmc/ |
A D | vm.c | 78 struct vm_locked vm_locked; in ffa_vm_nwd_create() local 85 if (vm_locked.vm != NULL) { in ffa_vm_nwd_create() 91 if (vm_locked.vm == NULL) { in ffa_vm_nwd_create() 100 vm_locked.vm->id = vm_id; in ffa_vm_nwd_create() 105 return vm_locked; in ffa_vm_nwd_create() 153 struct vm_locked to_ret_locked; in ffa_vm_find_locked() 201 struct vm_locked vm_locked = vm_lock(&nwd_vms[i]); in ffa_vm_notifications_info_get() local 207 vm_unlock(&vm_locked); in ffa_vm_notifications_info_get() 216 void ffa_vm_disable_interrupts(struct vm_locked vm_locked) in ffa_vm_disable_interrupts() argument 222 vm_locked.vm->id); in ffa_vm_disable_interrupts() [all …]
|
A D | setup_and_discovery.c | 35 void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_map_forward() argument 37 (void)vm_locked; in ffa_setup_rxtx_map_forward() 40 void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_unmap_forward() argument 42 (void)vm_locked; in ffa_setup_rxtx_unmap_forward() 120 bool ffa_setup_rx_release_forward(struct vm_locked vm_locked, in ffa_setup_rx_release_forward() argument 123 (void)vm_locked; in ffa_setup_rx_release_forward() 129 bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked, in ffa_setup_acquire_receiver_rx()
|
A D | notifications.c | 231 struct vm_locked vm_locked; in ffa_notifications_bitmap_create() local 240 vm_locked = vm_find_locked(vm_id); in ffa_notifications_bitmap_create() 242 CHECK(vm_locked.vm != NULL); in ffa_notifications_bitmap_create() 245 if (vm_locked.vm->notifications.enabled) { in ffa_notifications_bitmap_create() 252 vm_locked.vm->notifications.enabled = true; in ffa_notifications_bitmap_create() 255 vm_locked = ffa_vm_nwd_create(vm_id); in ffa_notifications_bitmap_create() 258 if (vm_locked.vm == NULL) { in ffa_notifications_bitmap_create() 264 if (vm_locked.vm->notifications.enabled) { in ffa_notifications_bitmap_create() 270 vm_locked.vm->notifications.enabled = true; in ffa_notifications_bitmap_create() 271 vm_locked.vm->vcpu_count = vcpu_count; in ffa_notifications_bitmap_create() [all …]
|
A D | vm.h | 15 struct vm_locked ffa_vm_nwd_create(ffa_id_t vm_id); 17 void ffa_vm_disable_interrupts(struct vm_locked vm_locked);
|
A D | interrupts.c | 398 struct vm_locked target_vm_locked; in ffa_interrupts_handle_secure_interrupt() 491 struct vcpu_locked target_locked, struct vm_locked receiver_locked) in ffa_interrupts_inject_notification_pending_interrupt() 620 struct vm_locked vm_locked) in ffa_interrupts_enable_virtual_interrupts() argument 627 assert(vm == vm_locked.vm); in ffa_interrupts_enable_virtual_interrupts() 633 int_desc = vm_locked.vm->interrupt_desc[k]; in ffa_interrupts_enable_virtual_interrupts() 660 struct vm_locked vm_locked; in ffa_interrupts_reconfigure() local 668 vm_locked = vm_lock(vm); in ffa_interrupts_reconfigure() 695 vm_locked, int_id, cpu_find_index(value)->id); in ffa_interrupts_reconfigure() 707 int_desc = vm_interrupt_set_sec_state(vm_locked, int_id, value); in ffa_interrupts_reconfigure() 718 int_desc = vm_interrupt_set_enable(vm_locked, int_id, in ffa_interrupts_reconfigure() [all …]
|
A D | indirect_messaging.c | 19 bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked, in ffa_indirect_msg_is_supported() 20 struct vm_locked receiver_locked) in ffa_indirect_msg_is_supported()
|
A D | cpu_cycles.c | 797 struct vm_locked vm_locked; in ffa_cpu_cycles_error_32() local 801 vm_locked = vm_lock(current->vm); in ffa_cpu_cycles_error_32() 812 ffa_vm_free_resources(vm_locked); in ffa_cpu_cycles_error_32() 833 vm_unlock(&vm_locked); in ffa_cpu_cycles_error_32()
|
/src/iommu/ |
A D | absent.c | 21 bool plat_iommu_unmap_iommus(struct vm_locked vm_locked, struct mpool *ppool) in plat_iommu_unmap_iommus() argument 23 (void)vm_locked; in plat_iommu_unmap_iommus() 29 void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin, in plat_iommu_identity_map() argument 32 (void)vm_locked; in plat_iommu_identity_map() 39 struct vm_locked vm_locked, in plat_iommu_attach_peripheral() argument 44 (void)vm_locked; in plat_iommu_attach_peripheral()
|
/src/arch/fake/hypervisor/ |
A D | ffa.c | 84 void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_unmap_forward() argument 86 (void)vm_locked; in ffa_setup_rxtx_unmap_forward() 99 bool ffa_setup_rx_release_forward(struct vm_locked vm_locked, in ffa_setup_rx_release_forward() argument 102 (void)vm_locked; in ffa_setup_rx_release_forward() 167 void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_map_forward() argument 169 (void)vm_locked; in ffa_setup_rxtx_map_forward() 457 struct vm_locked vm_locked) in ffa_interrupts_enable_virtual_interrupts() argument 460 (void)vm_locked; in ffa_interrupts_enable_virtual_interrupts() 548 (void)vm_locked; in arch_vm_identity_prepare() 561 (void)vm_locked; in arch_vm_identity_commit() [all …]
|
/src/ffa/hypervisor/ |
A D | vm.c | 24 struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id) in ffa_vm_find_locked() 30 return (struct vm_locked){.vm = NULL}; in ffa_vm_find_locked() 33 struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id) in ffa_vm_find_locked_create() 52 void ffa_vm_destroy(struct vm_locked to_destroy_locked) in ffa_vm_destroy() 58 void ffa_vm_free_resources(struct vm_locked vm_locked) in ffa_vm_free_resources() argument 60 (void)vm_locked; in ffa_vm_free_resources()
|
A D | indirect_messaging.c | 18 bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked, in ffa_indirect_msg_is_supported() 19 struct vm_locked receiver_locked) in ffa_indirect_msg_is_supported() 82 static bool ffa_indirect_msg_return_pending_messages(struct vm_locked vm_locked, in ffa_indirect_msg_return_pending_messages() argument 86 if (vm_locked.vm->mailbox.state == MAILBOX_STATE_FULL) { in ffa_indirect_msg_return_pending_messages() 87 *ret = ffa_msg_recv_return(vm_locked.vm); in ffa_indirect_msg_return_pending_messages() 89 vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; in ffa_indirect_msg_return_pending_messages() 109 struct vm_locked vm_locked; in ffa_indirect_msg_recv() local 129 vm_locked = vm_lock(vm); in ffa_indirect_msg_recv() 132 if (ffa_indirect_msg_return_pending_messages(vm_locked, &return_code)) { in ffa_indirect_msg_recv() 164 vm_unlock(&vm_locked); in ffa_indirect_msg_recv() [all …]
|
A D | setup_and_discovery.c | 59 void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_map_forward() argument 61 struct vm *vm = vm_locked.vm; in ffa_setup_rxtx_map_forward() 65 vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; in ffa_setup_rxtx_map_forward() 86 vm_locked.vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED; in ffa_setup_rxtx_map_forward() 91 void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_unmap_forward() argument 97 assert(vm_locked.vm != NULL); in ffa_setup_rxtx_unmap_forward() 99 id = vm_locked.vm->id; in ffa_setup_rxtx_unmap_forward() 105 if (!ffa_vm_supports_indirect_messages(vm_locked.vm)) { in ffa_setup_rxtx_unmap_forward() 257 bool ffa_setup_rx_release_forward(struct vm_locked vm_locked, in ffa_setup_rx_release_forward() argument 260 struct vm *vm = vm_locked.vm; in ffa_setup_rx_release_forward() [all …]
|
A D | interrupts.c | 27 struct vcpu_locked target_locked, struct vm_locked receiver_locked) in ffa_interrupts_inject_notification_pending_interrupt() 39 struct vm_locked vm_locked) in ffa_interrupts_enable_virtual_interrupts() argument 41 if (vm_locked.vm->notifications.enabled) { in ffa_interrupts_enable_virtual_interrupts()
|
A D | ffa_memory.c | 82 struct vm_locked other_world_locked, uint32_t share_func, in memory_send_other_world_forward() 123 struct vm_locked from_locked, struct vm_locked to_locked, in ffa_memory_other_world_send() 321 struct vm_locked to_locked, ffa_memory_handle_t handle, in ffa_memory_other_world_reclaim() 455 struct vm_locked other_world_locked, ffa_id_t sender_vm_id, in memory_send_continue_other_world_forward() 495 struct vm_locked from_locked, struct vm_locked to_locked, in ffa_memory_other_world_send_continue()
|
A D | notifications.c | 228 struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id, in ffa_notifications_get_from_sp() 252 struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk, in ffa_notifications_get_framework_notifications()
|
/src/ffa/ |
A D | absent.c | 103 void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_map_forward() argument 105 (void)vm_locked; in ffa_setup_rxtx_map_forward() 113 void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked) in ffa_setup_rxtx_unmap_forward() argument 115 (void)vm_locked; in ffa_setup_rxtx_unmap_forward() 129 bool ffa_setup_rx_release_forward(struct vm_locked vm_locked, in ffa_setup_rx_release_forward() argument 132 (void)vm_locked; in ffa_setup_rx_release_forward() 148 struct vm_locked receiver_locked) in ffa_indirect_msg_is_supported() 479 struct vm_locked vm_locked) in ffa_interrupts_enable_virtual_interrupts() argument 482 (void)vm_locked; in ffa_interrupts_enable_virtual_interrupts() 574 void ffa_vm_free_resources(struct vm_locked vm_locked) in ffa_vm_free_resources() argument [all …]
|
/src/arch/aarch64/arm_smmuv3/ |
A D | arm_smmuv3.c | 1386 bool plat_iommu_unmap_iommus(struct vm_locked vm_locked, struct mpool *ppool) in plat_iommu_unmap_iommus() argument 1388 (void)vm_locked; in plat_iommu_unmap_iommus() 1394 void plat_iommu_identity_map(struct vm_locked vm_locked, paddr_t begin, in plat_iommu_identity_map() argument 1397 (void)vm_locked; in plat_iommu_identity_map() 1404 struct vm_locked vm_locked, in plat_iommu_attach_peripheral() argument 1419 vm_id = vm_locked.vm->id; in plat_iommu_attach_peripheral() 1420 iommu_ptable = vm_locked.vm->iommu_ptables; in plat_iommu_attach_peripheral() 1423 iommu_ptable_ns = vm_locked.vm->arch.iommu_ptables_ns; in plat_iommu_attach_peripheral() 1433 if (vm_locked.vm->el0_partition) { in plat_iommu_attach_peripheral()
|