/src/arch/aarch64/hypervisor/ |
A D | vm.c | 17 void arch_vm_features_set(struct vm *vm) in arch_vm_features_set() argument 37 if (!vm_is_primary(vm)) { in arch_vm_features_set() 76 bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool) in arch_vm_iommu_init_mm() argument 85 if (vm->el0_partition) { in arch_vm_iommu_init_mm() 95 ret = ret && mm_ptable_init(&vm->iommu_ptables[k], vm->id, in arch_vm_iommu_init_mm() 99 vm->id, false, ppool); in arch_vm_iommu_init_mm() 113 bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool) in arch_vm_init_mm() argument 117 if (vm->el0_partition) { in arch_vm_init_mm() 118 return mm_ptable_init(&vm->ptable, vm->id, true, ppool); in arch_vm_init_mm() 121 ret = mm_vm_init(&vm->ptable, vm->id, ppool); in arch_vm_init_mm() [all …]
|
A D | feature_id.c | 161 void feature_set_traps(struct vm *vm, struct arch_regs *regs) in feature_set_traps() argument 171 vm->arch.tid3_masks.id_aa64pfr0_el1 = ~0ULL; in feature_set_traps() 172 vm->arch.tid3_masks.id_aa64pfr1_el1 = ~0ULL; in feature_set_traps() 180 vm->arch.tid3_masks.id_aa64mmfr1_el1 &= in feature_set_traps() 184 vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~( in feature_set_traps() 189 vm->arch.tid3_masks.id_aa64pfr1_el1 &= ~( in feature_set_traps() 195 vm->arch.tid3_masks.id_aa64mmfr1_el1 &= in feature_set_traps() 220 vm->arch.tid3_masks.id_aa64dfr0_el1 &= in feature_set_traps() 227 vm->arch.tid3_masks.id_aa64dfr0_el1 &= in feature_set_traps() 229 vm->arch.tid3_masks.id_aa64dfr0_el1 &= in feature_set_traps() [all …]
|
A D | psci_handler.c | 170 vcpu_target = vm_get_vcpu(vcpu->vm, cpu_index(c)); in psci_primary_vm_handler() 264 struct vm *vm = vcpu->vm; in psci_secondary_vm_handler() local 275 if (target_vcpu_index >= vm->vcpu_count) { in psci_secondary_vm_handler() 280 target_vcpu = vcpu_lock(vm_get_vcpu(vm, target_vcpu_index)); in psci_secondary_vm_handler() 314 struct vm *vm = vcpu->vm; in psci_secondary_vm_handler() local 319 if (target_vcpu_index >= vm->vcpu_count) { in psci_secondary_vm_handler() 324 target_vcpu = vm_get_vcpu(vm, target_vcpu_index); in psci_secondary_vm_handler() 380 if (vm_is_primary(vcpu->vm)) { in psci_handler()
|
/src/ |
A D | vm.c | 61 struct vm *vm; in vm_init() local 77 memset_s(vm, sizeof(*vm), 0, sizeof(*vm)); in vm_init() 99 vcpu_init(vm_get_vcpu(vm, i), vm); in vm_init() 160 struct vm *vm = vm_find(id); in vm_find_locked() local 185 struct vm_locked vm_lock(struct vm *vm) in vm_lock() argument 188 .vm = vm, in vm_lock() 821 struct vm *vm = vm_locked.vm; in vm_notifications_framework_get_pending() local 949 struct vm *vm = vcpu->vm; in vm_interrupts_info_get() local 1188 struct vm *vm = vm_get_boot_vm(); in vm_get_boot_vm_secondary_core() local 1200 struct vm *vm_get_next_boot(struct vm *vm) in vm_get_next_boot() argument [all …]
|
A D | manifest.c | 453 struct manifest_vm *vm, in parse_vm_common() argument 474 vm->debug_name.data); in parse_vm_common() 491 struct manifest_vm *vm, in parse_vm() argument 495 &vm->kernel_filename)); in parse_vm() 1080 struct manifest_vm *vm) in sanity_check_ffa_manifest() argument 1232 vm, sid, &device_id)) { in map_dma_device_id_to_stream_ids() 1453 vm->partition.boot_info = in parse_ffa_manifest() 1491 vm->partition.mem_regions, in parse_ffa_manifest() 1492 &vm->partition.mem_region_count, &vm->partition.rxtx, in parse_ffa_manifest() 1534 if (!vm->is_ffa_partition) { in parse_ffa_partition_package() [all …]
|
A D | manifest_test.cc | 814 vm = &m->vm[0]; in TEST_F() 824 vm = &m->vm[1]; in TEST_F() 834 vm = &m->vm[2]; in TEST_F() 1128 vm = &m->vm[0]; in TEST_F() 1148 vm = &m->vm[0]; in TEST_F() 1168 vm = &m->vm[0]; in TEST_F() 1188 vm = &m->vm[0]; in TEST_F() 1207 vm = &m->vm[0]; in TEST_F() 1228 vm = &m->vm[0]; in TEST_F() 1255 vm = &m->vm[0]; in TEST_F() [all …]
|
A D | api.c | 394 struct vm *vm = vm_locked.vm; in send_versioned_partition_info_descriptors() local 1412 struct vm *vm; in api_ffa_run() local 1868 struct vm *vm = current->vm; in api_ffa_rxtx_unmap() local 1897 vm = vm_locked.vm; in api_ffa_rxtx_unmap() 2225 struct vm *vm; in api_ffa_rx_release() local 2248 vm = vm_locked.vm; in api_ffa_rx_release() 2347 struct vm *vm = current->vm; in api_interrupt_enable() local 3692 struct vm *to = current->vm; in api_ffa_mem_retrieve_req() 3948 struct vm *to = current->vm; in api_ffa_mem_reclaim() 3971 struct vm *to = current->vm; in api_ffa_mem_frag_rx() [all …]
|
A D | load.c | 123 vm_locked.vm->mailbox.send, vm_locked.vm->mailbox.recv); in link_rxtx_to_mailbox() 263 vm_locked.vm->id, vm_locked.vm->vcpu_count)) { in load_common() 290 struct vm *vm; in load_primary() local 334 if (!vm_is_primary(vm)) { in load_primary() 339 vm_locked = vm_lock(vm); in load_primary() 411 vm_update_boot(vm); in load_primary() 665 struct vm *vm; in load_secondary() local 737 vm_locked = vm_lock(vm); in load_secondary() 813 vcpu = vm_get_vcpu(vm, 0); in load_secondary() 938 struct vm *other_world_vm; in init_other_world_vm() [all …]
|
A D | vcpu.c | 60 void vcpu_init(struct vcpu *vcpu, struct vm *vm) in vcpu_init() argument 65 vcpu->vm = vm; in vcpu_init() 86 size_t index = vcpu - vcpu->vm->vcpus; in vcpu_index() 113 struct vm *vm = vcpu_locked.vcpu->vm; in vcpu_secondary_reset_and_start() local 116 CHECK(vm->id != HF_PRIMARY_VM_ID); in vcpu_secondary_reset_and_start() 143 struct vm *vm = current->vm; in vcpu_handle_page_fault() local 149 locked_vm = vm_lock(vm); in vcpu_handle_page_fault() 160 if (!locked_vm.vm->el0_partition) { in vcpu_handle_page_fault() 212 struct vm *vm = vcpu->vm; in vcpu_set_boot_info_gp_reg() local 215 if (vm->boot_info.blob_addr.ipa != 0U) { in vcpu_set_boot_info_gp_reg() [all …]
|
A D | vm_test.cc | 37 using struct_vm = struct vm; 44 class vm : public ::testing::Test class 79 struct_vm *vm; in TEST_F() local 84 vm_locked = vm_lock(vm); in TEST_F() 85 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool)); in TEST_F() 98 TEST_F(vm, vm_boot_order) in TEST_F() argument 101 struct_vm *vm; in TEST_F() local 153 vm = vm_get_boot_vm(); in TEST_F() 156 EXPECT_TRUE(vm != NULL); in TEST_F() 158 vm = vm_get_next_boot(vm); in TEST_F() [all …]
|
/src/ffa/spmc/ |
A D | vm.c | 24 static struct vm nwd_vms[MAX_VMS]; 36 struct vm *nwd_vms; 85 if (vm_locked.vm != NULL) { in ffa_vm_nwd_create() 91 if (vm_locked.vm == NULL) { in ffa_vm_nwd_create() 100 vm_locked.vm->id = vm_id; in ffa_vm_nwd_create() 110 struct vm *vm = to_destroy_locked.vm; in ffa_vm_destroy() local 115 if (!vm_id_is_current_world(vm->id) && vm->id != HF_HYPERVISOR_VM_ID && in ffa_vm_destroy() 116 !vm->notifications.enabled && vm->mailbox.send == NULL && in ffa_vm_destroy() 117 vm->mailbox.recv == NULL) { in ffa_vm_destroy() 146 bool ffa_vm_managed_exit_supported(struct vm *vm) in ffa_vm_managed_exit_supported() argument [all …]
|
A D | interrupts.c | 45 struct vm *vm = vm_find_index(index); in ffa_interrupts_find_target_vcpu_secure_interrupt() local 49 vm->interrupt_desc[j]; in ffa_interrupts_find_target_vcpu_secure_interrupt() 493 struct vm *next_vm = target_locked.vcpu->vm; in ffa_interrupts_inject_notification_pending_interrupt() 581 struct vm *vm; in ffa_interrupts_enable_virtual_maintenance_interrupts() local 585 vm = current->vm; in ffa_interrupts_enable_virtual_maintenance_interrupts() 623 struct vm *vm; in ffa_interrupts_enable_virtual_interrupts() local 626 vm = current->vm; in ffa_interrupts_enable_virtual_interrupts() 627 assert(vm == vm_locked.vm); in ffa_interrupts_enable_virtual_interrupts() 629 if (vm->el0_partition) { in ffa_interrupts_enable_virtual_interrupts() 659 struct vm *vm = current->vm; in ffa_interrupts_reconfigure() local [all …]
|
A D | direct_messaging.c | 359 struct vm *vm = next_vm; in find_next_vcpu_to_inform() local 362 while (vm != NULL) { in find_next_vcpu_to_inform() 367 if (!vm_is_up(vm) && in find_next_vcpu_to_inform() 378 vcpu->vm->id); in find_next_vcpu_to_inform() 382 vm = vm_get_next_boot(vm); in find_next_vcpu_to_inform() 397 struct vm *vm = vm_find_index(index); in psci_cpu_off_success_fwk_resp() local 399 if (vm_is_mp(vm)) { in psci_cpu_off_success_fwk_resp() 435 struct vm *vm = vm_get_boot_vm(); in handle_psci_framework_msg() local 593 struct vm *next_vm; in handle_sp_cpu_off_framework_resp() 617 current->vm->id); in handle_sp_cpu_off_framework_resp() [all …]
|
A D | notifications.c | 35 if (current->vm->id != HF_HYPERVISOR_VM_ID) { in ffa_notifications_is_bitmap_access_valid() 59 ffa_id_t current_vm_id = current->vm->id; in ffa_notifications_is_bind_valid() 129 ffa_id_t current_vm_id = current->vm->id; in ffa_notifications_is_set_valid() 190 ffa_id_t current_vm_id = current->vm->id; in ffa_notifications_is_get_valid() 242 CHECK(vm_locked.vm != NULL); in ffa_notifications_bitmap_create() 245 if (vm_locked.vm->notifications.enabled) { in ffa_notifications_bitmap_create() 258 if (vm_locked.vm == NULL) { in ffa_notifications_bitmap_create() 264 if (vm_locked.vm->notifications.enabled) { in ffa_notifications_bitmap_create() 271 vm_locked.vm->vcpu_count = vcpu_count; in ffa_notifications_bitmap_create() 294 if (to_destroy_locked.vm == NULL) { in ffa_notifications_bitmap_destroy() [all …]
|
A D | cpu_cycles.c | 45 struct vm *vm; in ffa_cpu_cycles_run_checks() local 49 vm = vm_find(target_vm_id); in ffa_cpu_cycles_run_checks() 50 if (vm == NULL) { in ffa_cpu_cycles_run_checks() 54 if (vm_is_mp(vm) && vm_is_mp(current->vm) && in ffa_cpu_cycles_run_checks() 71 if (!vm_is_primary(current->vm) && in ffa_cpu_cycles_run_checks() 82 if (vm_id_is_current_world(current->vm->id) && in ffa_cpu_cycles_run_checks() 286 struct vm *next_vm; in sp_boot_next() 300 current->vm->boot_order); in sp_boot_next() 304 next_vm = vm_get_next_boot(current->vm); in sp_boot_next() 414 current->rt_model, current->vm->id, in ffa_cpu_cycles_msg_wait_prepare() [all …]
|
A D | indirect_messaging.c | 22 struct vm *sender_vm = sender_locked.vm; in ffa_indirect_msg_is_supported() 23 struct vm *receiver_vm = receiver_locked.vm; in ffa_indirect_msg_is_supported()
|
A D | ffa_memory.c | 73 if (!current->vm->el0_partition) { in ffa_memory_is_mem_perm_get_valid() 75 current->vm->id); in ffa_memory_is_mem_perm_get_valid() 90 struct vm *from, uint32_t share_func, in ffa_memory_other_world_mem_send() 115 struct vm *to, ffa_memory_handle_t handle, in ffa_memory_other_world_mem_reclaim() 128 struct vm *from, void *fragment, uint32_t fragment_length, in ffa_memory_other_world_mem_send_continue()
|
/src/ffa/hypervisor/ |
A D | setup_and_discovery.c | 61 struct vm *vm = vm_locked.vm; in ffa_setup_rxtx_map_forward() local 62 struct vm *other_world; in ffa_setup_rxtx_map_forward() 69 if (!ffa_vm_supports_indirect_messages(vm)) { in ffa_setup_rxtx_map_forward() 81 vm->id, (uintptr_t)vm->mailbox.recv, in ffa_setup_rxtx_map_forward() 82 (uintptr_t)vm->mailbox.send); in ffa_setup_rxtx_map_forward() 97 assert(vm_locked.vm != NULL); in ffa_setup_rxtx_unmap_forward() 99 id = vm_locked.vm->id; in ffa_setup_rxtx_unmap_forward() 143 const struct vm *tee = vm_find(HF_TEE_VM_ID); in ffa_setup_partition_info_get_forward() 260 struct vm *vm = vm_locked.vm; in ffa_setup_rx_release_forward() local 261 ffa_id_t vm_id = vm->id; in ffa_setup_rx_release_forward() [all …]
|
A D | vm.c | 11 bool ffa_vm_supports_indirect_messages(struct vm *vm) in ffa_vm_supports_indirect_messages() argument 13 return vm->ffa_version >= FFA_VERSION_1_1 && in ffa_vm_supports_indirect_messages() 14 vm_supports_messaging_method(vm, FFA_PARTITION_INDIRECT_MSG); in ffa_vm_supports_indirect_messages() 17 bool ffa_vm_managed_exit_supported(struct vm *vm) in ffa_vm_managed_exit_supported() argument 19 (void)vm; in ffa_vm_managed_exit_supported() 30 return (struct vm_locked){.vm = NULL}; in ffa_vm_find_locked()
|
A D | indirect_messaging.c | 87 *ret = ffa_msg_recv_return(vm_locked.vm); in ffa_indirect_msg_return_pending_messages() 107 struct vm *vm = current_locked.vcpu->vm; in ffa_indirect_msg_recv() local 116 if (vm_is_primary(vm)) { in ffa_indirect_msg_recv() 129 vm_locked = vm_lock(vm); in ffa_indirect_msg_recv() 156 .arg1 = ffa_vm_vcpu(vm->id, in ffa_indirect_msg_recv() 184 if (vm_is_primary(to.vm)) { in deliver_msg() 190 primary_ret = ffa_msg_recv_return(to.vm); in deliver_msg() 197 to.vm->mailbox.state = MAILBOX_STATE_FULL; in deliver_msg() 200 if (to.vm->id == HF_TEE_VM_ID) { in deliver_msg() 236 struct vm *from = current->vm; in ffa_indirect_msg_send() [all …]
|
A D | ffa_memory.c | 65 if (!current->vm->el0_partition) { in ffa_memory_is_mem_perm_get_valid() 67 current->vm->id); in ffa_memory_is_mem_perm_get_valid() 273 struct vm *from, uint32_t share_func, in ffa_memory_other_world_mem_send() 277 struct vm *to; in ffa_memory_other_world_mem_send() 291 vm_to_from_lock.vm1.vm->id); in ffa_memory_other_world_mem_send() 427 struct vm *to, ffa_memory_handle_t handle, in ffa_memory_other_world_mem_reclaim() 431 struct vm *from = vm_find(HF_TEE_VM_ID); in ffa_memory_other_world_mem_reclaim() 506 from_locked.vm->id, page_pool); in ffa_memory_other_world_send_continue() 522 to_locked.vm->mailbox.recv == NULL) { in ffa_memory_other_world_send_continue() 627 to_locked, from_locked.vm->id, fragment, in ffa_memory_other_world_send_continue() [all …]
|
A D | direct_messaging.c | 24 ffa_id_t current_vm_id = current->vm->id; in ffa_direct_msg_is_direct_request_valid() 32 sender_vm_id == current_vm_id && vm_is_primary(current->vm); in ffa_direct_msg_is_direct_request_valid() 35 bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm, in ffa_direct_msg_is_direct_request_supported() 36 struct vm *receiver_vm, in ffa_direct_msg_is_direct_request_supported() 57 ffa_id_t current_vm_id = current->vm->id; in ffa_direct_msg_is_direct_response_valid()
|
/src/arch/fake/hypervisor/ |
A D | ffa.c | 48 struct vm *receiver_vm, in ffa_direct_msg_is_direct_request_supported() 173 ffa_id_t caller_id, const struct vm *target) in ffa_setup_partition_properties() 180 bool ffa_vm_managed_exit_supported(struct vm *vm) in ffa_vm_managed_exit_supported() argument 182 (void)vm; in ffa_vm_managed_exit_supported() 279 return (struct vm_locked){.vm = NULL}; in ffa_vm_find_locked() 285 return (struct vm_locked){.vm = NULL}; in ffa_vm_find_locked_create() 464 struct vm *from, uint32_t share_func, in ffa_memory_other_world_mem_send() 529 bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool) in arch_vm_init_mm() argument 531 (void)vm; in arch_vm_init_mm() 537 bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool) in arch_vm_iommu_init_mm() argument [all …]
|
/src/arch/aarch64/plat/psci/ |
A D | spmc.c | 61 struct vm *other_world_vm = vm_find(HF_OTHER_WORLD_ID); in plat_psci_switch_to_other_world() 90 static struct vm *plat_psci_get_boot_vm(struct cpu *c) in plat_psci_get_boot_vm() 92 struct vm *boot_vm; in plat_psci_get_boot_vm() 122 struct vm *boot_vm; in plat_psci_cpu_resume() 147 vcpu_secondary_reset_and_start(vcpu_locked, boot_vcpu->vm->secondary_ep, in plat_psci_cpu_resume()
|
A D | hypervisor.c | 63 struct vm *vm = vm_get_boot_vm(); in plat_psci_cpu_resume() local 66 vcpu = vm_get_vcpu(vm, cpu_index(c)); in plat_psci_cpu_resume()
|