/src/arch/aarch64/hypervisor/ |
A D | fpu.c | 13 void arch_fpu_state_save_to_vcpu(struct vcpu *vcpu) in arch_fpu_state_save_to_vcpu() argument 20 : "=r"(vcpu->regs.fpsr), "=r"(vcpu->regs.fpcr)); in arch_fpu_state_save_to_vcpu() 23 void arch_fpu_regs_save_to_vcpu(struct vcpu *vcpu) in arch_fpu_regs_save_to_vcpu() argument 45 : "r"(&vcpu->regs.fp)); in arch_fpu_regs_save_to_vcpu() 48 void arch_fpu_save_to_vcpu(struct vcpu *vcpu) in arch_fpu_save_to_vcpu() argument 51 arch_fpu_regs_save_to_vcpu(vcpu); in arch_fpu_save_to_vcpu() 54 void arch_fpu_state_restore_from_vcpu(struct vcpu *vcpu) in arch_fpu_state_restore_from_vcpu() argument 62 : "r"(vcpu->regs.fpsr), "r"(vcpu->regs.fpcr)); in arch_fpu_state_restore_from_vcpu() 65 void arch_fpu_regs_restore_from_vcpu(struct vcpu *vcpu) in arch_fpu_regs_restore_from_vcpu() argument 87 : "r"(&vcpu->regs.fp)); in arch_fpu_regs_restore_from_vcpu() [all …]
|
A D | handler.c | 78 void complete_saving_state(struct vcpu *vcpu) in complete_saving_state() argument 96 void begin_restoring_state(struct vcpu *vcpu) in begin_restoring_state() argument 147 void maybe_invalidate_tlb(struct vcpu *vcpu) in maybe_invalidate_tlb() argument 599 static void vcpu_update_virtual_interrupts(struct vcpu *vcpu) in vcpu_update_virtual_interrupts() argument 675 static struct vcpu *smc_handler(struct vcpu *vcpu) in smc_handler() argument 698 struct vcpu *smc_handler_from_nwd(struct vcpu *vcpu) in smc_handler_from_nwd() argument 887 static struct vcpu *hvc_handler(struct vcpu *vcpu) in hvc_handler() argument 1066 const struct vcpu *vcpu, in fault_info_init() argument 1114 struct vcpu *vcpu = current(); in sync_lower_exception() local 1210 struct vcpu *next = smc_handler(vcpu); in sync_lower_exception() [all …]
|
A D | offsets.c | 19 DEFINE_OFFSETOF(VCPU_VM, struct vcpu, vm) 20 DEFINE_OFFSETOF(VCPU_CPU, struct vcpu, cpu) 21 DEFINE_OFFSETOF(VCPU_REGS, struct vcpu, regs) 22 DEFINE_OFFSETOF(VCPU_LAZY, struct vcpu, regs.lazy) 23 DEFINE_OFFSETOF(VCPU_FREGS, struct vcpu, regs.fp) 24 DEFINE_OFFSETOF(VCPU_FPSR, struct vcpu, regs.fpsr) 25 DEFINE_OFFSETOF(VCPU_TIMER, struct vcpu, regs.arch_timer) 27 DEFINE_OFFSETOF(VCPU_PAC, struct vcpu, regs.pac) 31 DEFINE_OFFSETOF(VCPU_MTE, struct vcpu, regs.mte) 37 DEFINE_OFFSETOF(VCPU_GIC, struct vcpu, regs.gic)
|
A D | psci_handler.c | 36 bool psci_primary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, in psci_primary_vm_handler() argument 41 struct vcpu *vcpu_target; in psci_primary_vm_handler() 153 cpu_off(vcpu->cpu); in psci_primary_vm_handler() 226 bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, in psci_secondary_vm_handler() argument 228 struct vcpu **next) in psci_secondary_vm_handler() 264 struct vm *vm = vcpu->vm; in psci_secondary_vm_handler() 304 *next = api_vcpu_off(vcpu); in psci_secondary_vm_handler() 314 struct vm *vm = vcpu->vm; in psci_secondary_vm_handler() 315 struct vcpu *target_vcpu; in psci_secondary_vm_handler() 376 bool psci_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, in psci_handler() argument [all …]
|
A D | simd.c | 47 void plat_restore_ns_simd_context(struct vcpu *vcpu) in plat_restore_ns_simd_context() argument 56 assert(vcpu->vm->id == HF_HYPERVISOR_VM_ID); in plat_restore_ns_simd_context() 57 cpu_id = cpu_index(vcpu->cpu); in plat_restore_ns_simd_context() 97 arch_fpu_state_restore_from_vcpu(vcpu); in plat_restore_ns_simd_context() 199 arch_fpu_regs_restore_from_vcpu(vcpu); in plat_restore_ns_simd_context() 230 void plat_save_ns_simd_context(struct vcpu *vcpu) in plat_save_ns_simd_context() argument 240 assert(vcpu->vm->id == HF_HYPERVISOR_VM_ID); in plat_save_ns_simd_context() 241 cpu_id = cpu_index(vcpu->cpu); in plat_save_ns_simd_context() 244 smc_fid = vcpu->regs.r[0]; in plat_save_ns_simd_context() 282 arch_fpu_state_save_to_vcpu(vcpu); in plat_save_ns_simd_context() [all …]
|
A D | psci_handler.h | 17 bool psci_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, 19 struct vcpu **next);
|
A D | cpu.c | 101 void arch_regs_reset(struct vcpu *vcpu) in arch_regs_reset() argument 103 ffa_id_t vm_id = vcpu->vm->id; in arch_regs_reset() 104 bool is_primary = vm_is_primary(vcpu->vm); in arch_regs_reset() 105 cpu_id_t vcpu_id = is_primary ? vcpu->cpu->id : vcpu_index(vcpu); in arch_regs_reset() 108 struct arch_regs *r = &vcpu->regs; in arch_regs_reset() 127 if (vcpu->vm->el0_partition) { in arch_regs_reset() 147 get_hcr_el2_value(vm_id, vcpu->vm->el0_partition); in arch_regs_reset() 150 if (vcpu->vm->el0_partition) { in arch_regs_reset() 181 (uintpaddr_t)(vcpu->vm->arch.ptable_ns.root_tables) | in arch_regs_reset() 209 feature_set_traps(vcpu->vm, r); in arch_regs_reset() [all …]
|
A D | el1_physical_timer.h | 19 bool el1_physical_timer_process_access(struct vcpu *vcpu, uintreg_t esr);
|
A D | debug_el1.h | 19 bool debug_el1_process_access(struct vcpu *vcpu, ffa_id_t vm_id,
|
/src/arch/aarch64/inc/hf/arch/ |
A D | fpu.h | 15 void arch_fpu_save_to_vcpu(struct vcpu *vcpu); 16 void arch_fpu_regs_save_to_vcpu(struct vcpu *vcpu); 17 void arch_fpu_state_save_to_vcpu(struct vcpu *vcpu); 18 void arch_fpu_restore_from_vcpu(struct vcpu *vcpu); 19 void arch_fpu_regs_restore_from_vcpu(struct vcpu *vcpu); 20 void arch_fpu_state_restore_from_vcpu(struct vcpu *vcpu);
|
/src/ |
A D | timer_mgmt.c | 19 static void timer_list_add_vcpu(struct cpu *cpu, struct vcpu *vcpu) in timer_list_add_vcpu() argument 37 static void timer_list_remove_vcpu(struct cpu *cpu, struct vcpu *vcpu) in timer_list_remove_vcpu() argument 50 void timer_vcpu_manage(struct vcpu *vcpu) in timer_vcpu_manage() argument 52 assert(vcpu != NULL); in timer_vcpu_manage() 59 timer_list_add_vcpu(vcpu->cpu, vcpu); in timer_vcpu_manage() 61 timer_list_remove_vcpu(vcpu->cpu, vcpu); in timer_vcpu_manage() 69 static inline bool timer_is_list_end(struct vcpu *vcpu, in timer_is_list_end() argument 82 struct vcpu *it_vcpu = NULL; in timer_find_vcpu_nearest_deadline() 127 struct vcpu *timer_find_target_vcpu(struct vcpu *current) in timer_find_target_vcpu() 129 struct vcpu *target_vcpu; in timer_find_target_vcpu() [all …]
|
A D | vcpu.c | 24 struct vcpu_locked vcpu_lock(struct vcpu *vcpu) in vcpu_lock() argument 27 .vcpu = vcpu, in vcpu_lock() 60 void vcpu_init(struct vcpu *vcpu, struct vm *vm) in vcpu_init() argument 62 memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu)); in vcpu_init() 81 vcpu.vcpu->state = VCPU_STATE_WAITING; in vcpu_on() 86 size_t index = vcpu - vcpu->vm->vcpus; in vcpu_index() 101 return (vcpu.vcpu->state == VCPU_STATE_OFF); in vcpu_is_off() 200 void vcpu_set_phys_core_idx(struct vcpu *vcpu) in vcpu_set_phys_core_idx() argument 495 struct vcpu *vcpu; in vcpu_secure_interrupt_complete() local 497 vcpu = vcpu_locked.vcpu; in vcpu_secure_interrupt_complete() [all …]
|
A D | hf_ipi.c | 45 struct vcpu *hf_ipi_get_pending_target_vcpu(struct vcpu *current) in hf_ipi_get_pending_target_vcpu() 48 struct vcpu *target_vcpu; in hf_ipi_get_pending_target_vcpu() 84 target_vcpu = CONTAINER_OF(list, struct vcpu, ipi_list_node); in hf_ipi_get_pending_target_vcpu() 96 struct vcpu *target_vcpu = vm_get_vcpu(vm, target_vcpu_index); in hf_ipi_send_interrupt() 172 struct vcpu *target_vcpu = target_vcpu_locked.vcpu; in hf_ipi_handle_list_element() 254 for (struct vcpu *target_vcpu = in hf_ipi_handle() 255 hf_ipi_get_pending_target_vcpu(target_vcpu_locked.vcpu); in hf_ipi_handle()
|
A D | ipi_test.cc | 30 using struct_vcpu = struct vcpu; 61 struct vcpu *preempted_vcpu = in SetUp() 121 struct vcpu *vcpu = vm_get_vcpu(current_vm, i); in TEST_F() local 123 vcpu->cpu = cpu; in TEST_F() 135 (struct vcpu *)NULL); in TEST_F() 207 (struct vcpu *)NULL); in TEST_F() 229 (struct vcpu *)NULL); in TEST_F() 257 (struct vcpu *)NULL); in TEST_F() 271 struct vcpu *top_priority_vcpu; in TEST_F() 340 struct vcpu *vcpu = vm_get_vcpu(test_service[i], j); in TEST_F() local [all …]
|
A D | api.c | 109 struct vcpu *vcpu = NULL; in api_ffa_get_vm_vcpu() local 204 struct vcpu *api_preempt(struct vcpu *current) in api_preempt() 244 struct vcpu *api_vcpu_off(struct vcpu *current) in api_vcpu_off() 340 struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu) in api_wake_up() 355 struct vcpu *api_abort(struct vcpu *current) in api_abort() 1206 struct vcpu *next = next_locked.vcpu; in api_inject_arch_timer_interrupt() 1241 struct vcpu *vcpu = vcpu_next_locked.vcpu; in api_vcpu_prepare_run() local 1289 vcpu->vm->id, vcpu_index(vcpu)); in api_vcpu_prepare_run() 1391 vcpu->cpu = current_locked.vcpu->cpu; in api_vcpu_prepare_run() 1413 struct vcpu *vcpu; in api_ffa_run() local [all …]
|
/src/ffa/spmc/ |
A D | cpu_cycles.c | 43 struct vcpu *current = current_locked.vcpu; in ffa_cpu_cycles_run_checks() 190 struct vcpu *current = current_locked.vcpu; in ffa_cpu_cycles_preempted_vcpu_resume() 230 struct vcpu *current = current_locked.vcpu; in ffa_msg_wait_complete() 255 struct vcpu *current = current_locked.vcpu; in ffa_cpu_cycles_msg_wait_intercept() 285 struct vcpu *current = current_locked.vcpu; in sp_boot_next() 369 struct vcpu *current = current_locked.vcpu; in ffa_cpu_cycles_msg_wait_prepare() 430 struct vcpu *vcpu = target_locked.vcpu; in ffa_cpu_cycles_init_schedule_mode_ffa_run() local 431 struct vcpu *current = current_locked.vcpu; in ffa_cpu_cycles_init_schedule_mode_ffa_run() 470 struct vcpu *current = current_locked.vcpu; in ffa_cpu_cycles_yield_prepare() 650 struct vcpu *vcpu = locked_vcpu.vcpu; in ffa_cpu_cycles_check_rtm_sec_interrupt() local [all …]
|
A D | interrupts.c | 29 struct vcpu *current) in ffa_interrupts_deactivate() 67 static struct vcpu *ffa_interrupts_find_target_vcpu(struct vcpu *current, in ffa_interrupts_find_target_vcpu() 71 struct vcpu *target_vcpu; in ffa_interrupts_find_target_vcpu() 131 struct vcpu *target_vcpu = target_vcpu_locked.vcpu; in ffa_interrupts_set_preempted_vcpu() 132 struct vcpu *preempted_vcpu = current_locked.vcpu; in ffa_interrupts_set_preempted_vcpu() 157 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu; in ffa_interrupts_mask() 204 struct vcpu *target_vcpu = target_vcpu_locked.vcpu; in interrupt_resume_waiting() 248 struct vcpu *target_vcpu = target_vcpu_locked.vcpu; in ffa_interrupts_signal_secure_interrupt() 249 struct vcpu *current = current_locked.vcpu; in ffa_interrupts_signal_secure_interrupt() 516 struct vcpu *next; in ffa_interrupts_unwind_nwd_call_chain() [all …]
|
A D | direct_messaging.c | 145 struct vcpu *next = next_locked.vcpu; in ffa_direct_msg_unwind_call_chain_ffa_direct_resp() 147 struct vcpu *current = current_locked.vcpu; in ffa_direct_msg_unwind_call_chain_ffa_direct_resp() 174 struct vcpu *current = current_locked.vcpu; in ffa_direct_msg_wind_call_chain_ffa_direct_req() 175 struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu; in ffa_direct_msg_wind_call_chain_ffa_direct_req() 201 struct vcpu *current = current_locked.vcpu; in ffa_direct_msg_precedes_in_call_chain() 202 struct vcpu *target = target_locked.vcpu; in ffa_direct_msg_precedes_in_call_chain() 360 struct vcpu *vcpu = NULL; in find_next_vcpu_to_inform() local 385 return vcpu; in find_next_vcpu_to_inform() 400 struct vcpu *vcpu; in psci_cpu_off_success_fwk_resp() local 594 struct vcpu *current = current_locked.vcpu; in handle_sp_cpu_off_framework_resp() [all …]
|
/src/arch/aarch64/plat/psci/ |
A D | hypervisor.c | 61 struct vcpu *plat_psci_cpu_resume(struct cpu *c) in plat_psci_cpu_resume() 64 struct vcpu *vcpu; in plat_psci_cpu_resume() local 66 vcpu = vm_get_vcpu(vm, cpu_index(c)); in plat_psci_cpu_resume() 67 vcpu->cpu = c; in plat_psci_cpu_resume() 72 arch_regs_reset(vcpu); in plat_psci_cpu_resume() 76 return vcpu; in plat_psci_cpu_resume()
|
/src/ffa/ |
A D | absent.c | 58 bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current, in ffa_direct_msg_is_direct_request_valid() 331 struct ffa_value *run_ret, struct vcpu **next) in ffa_cpu_cycles_run_checks() 342 struct vcpu **next, in ffa_interrupts_handle_secure_interrupt() 425 struct vcpu_locked current_locked, struct vcpu **next) in ffa_cpu_cycles_msg_wait_prepare() 525 struct vcpu *current, struct vcpu **next) in ffa_indirect_msg_send() 537 struct vcpu **next, in ffa_cpu_cycles_yield_prepare() 549 struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next, in plat_ffa_error_32() 560 struct vcpu **next) in ffa_indirect_msg_recv() 581 struct vcpu *current, in ffa_direct_msg_handle_framework_msg() 582 struct vcpu **next) in ffa_direct_msg_handle_framework_msg() [all …]
|
/src/ffa/hypervisor/ |
A D | cpu_cycles.c | 36 struct ffa_value *run_ret, struct vcpu **next) in ffa_cpu_cycles_run_checks() 42 if (!vm_is_primary(current_locked.vcpu->vm)) { in ffa_cpu_cycles_run_checks() 61 struct vcpu_locked current_locked, struct vcpu **next) in ffa_cpu_cycles_msg_wait_prepare() 110 struct vcpu **next, in ffa_cpu_cycles_yield_prepare() 114 struct vcpu *current = current_locked.vcpu; in ffa_cpu_cycles_yield_prepare() 130 struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current, in ffa_cpu_cycles_error_32() 131 struct vcpu **next, in ffa_cpu_cycles_error_32()
|
A D | direct_messaging.c | 20 bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current, in ffa_direct_msg_is_direct_request_valid() 53 bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current, in ffa_direct_msg_is_direct_response_valid() 124 struct vcpu *current, in ffa_direct_msg_handle_framework_msg() 125 struct vcpu **next) in ffa_direct_msg_handle_framework_msg() 144 struct vcpu **next) in ffa_direct_msg_handle_framework_msg_resp()
|
A D | indirect_messaging.c | 105 struct vcpu **next) in ffa_indirect_msg_recv() 107 struct vm *vm = current_locked.vcpu->vm; in ffa_indirect_msg_recv() 108 struct vcpu *current = current_locked.vcpu; in ffa_indirect_msg_recv() 157 vcpu_index(current_locked.vcpu)), in ffa_indirect_msg_recv() 175 struct vcpu **next) in deliver_msg() 234 struct vcpu *current, struct vcpu **next) in ffa_indirect_msg_send()
|
/src/arch/fake/hypervisor/ |
A D | ffa.c | 36 bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current, in ffa_direct_msg_is_direct_request_valid() 58 bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current, in ffa_direct_msg_is_direct_response_valid() 319 struct ffa_value *run_ret, struct vcpu **next) in ffa_cpu_cycles_run_checks() 399 struct vcpu_locked current_locked, struct vcpu **next) in ffa_cpu_cycles_msg_wait_prepare() 505 struct vcpu *current, struct vcpu **next) in ffa_indirect_msg_send() 517 struct vcpu **next, in ffa_cpu_cycles_yield_prepare() 605 struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current, in ffa_cpu_cycles_error_32() 606 struct vcpu **next, in ffa_cpu_cycles_error_32() 644 struct vcpu *current, in ffa_direct_msg_handle_framework_msg() 645 struct vcpu **next) in ffa_direct_msg_handle_framework_msg() [all …]
|
A D | cpu.c | 25 void arch_regs_reset(struct vcpu *vcpu) in arch_regs_reset() argument 28 (void)vcpu; in arch_regs_reset()
|