| /arch/arm64/kvm/hyp/nvhe/ |
| A D | hyp-main.c | 235 cpu_reg(host_ctxt, 1) = ret; in handle___kvm_vcpu_run() 269 cpu_reg(host_ctxt, 1) = ret; in handle___pkvm_host_share_guest() 290 cpu_reg(host_ctxt, 1) = ret; in handle___pkvm_host_unshare_guest() 309 cpu_reg(host_ctxt, 1) = ret; in handle___pkvm_host_relax_perms_guest() 330 cpu_reg(host_ctxt, 1) = ret; in handle___pkvm_host_wrprotect_guest() 352 cpu_reg(host_ctxt, 1) = ret; in handle___pkvm_host_test_clear_young_guest() 370 cpu_reg(host_ctxt, 1) = ret; in handle___pkvm_host_mkyoung_guest() 541 cpu_reg(host_ctxt, 1) = haddr; in handle___pkvm_create_private_mapping() 646 hfn(host_ctxt); in handle_host_hcall() 681 handle_host_hcall(host_ctxt); in handle_trap() [all …]
|
| A D | psci-relay.c | 74 return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1), in psci_forward() 75 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3)); in psci_forward() 208 host_ctxt = host_data_ptr(host_ctxt); in __kvm_host_psci_cpu_entry() 224 __host_enter(host_ctxt); in __kvm_host_psci_cpu_entry() 230 return psci_forward(host_ctxt); in psci_0_1_handler() 248 return psci_forward(host_ctxt); in psci_0_2_handler() 255 return psci_forward(host_ctxt); in psci_0_2_handler() 303 cpu_reg(host_ctxt, 0) = ret; in kvm_host_psci_handler() 304 cpu_reg(host_ctxt, 1) = 0; in kvm_host_psci_handler() 305 cpu_reg(host_ctxt, 2) = 0; in kvm_host_psci_handler() [all …]
|
| A D | switch.c | 238 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run() local 255 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run() 256 host_ctxt->__hyp_running_vcpu = vcpu; in __kvm_vcpu_run() 261 __sysreg_save_state_nvhe(host_ctxt); in __kvm_vcpu_run() 326 __sysreg_restore_state_nvhe(host_ctxt); in __kvm_vcpu_run() 345 host_ctxt->__hyp_running_vcpu = NULL; in __kvm_vcpu_run() 355 struct kvm_cpu_context *host_ctxt; in hyp_panic() local 358 host_ctxt = host_data_ptr(host_ctxt); in hyp_panic() 359 vcpu = host_ctxt->__hyp_running_vcpu; in hyp_panic() 365 __sysreg_restore_state_nvhe(host_ctxt); in hyp_panic() [all …]
|
| A D | tlb.c | 24 struct kvm_cpu_context *host_ctxt; in enter_vmid_context() local 27 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in enter_vmid_context() 28 vcpu = host_ctxt->__hyp_running_vcpu; in enter_vmid_context() 121 struct kvm_cpu_context *host_ctxt; in exit_vmid_context() local 124 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in exit_vmid_context() 125 vcpu = host_ctxt->__hyp_running_vcpu; in exit_vmid_context()
|
| A D | ffa.c | 792 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_ffa_handler() argument 820 if (!do_ffa_features(&res, host_ctxt)) in kvm_host_ffa_handler() 825 do_ffa_rxtx_map(&res, host_ctxt); in kvm_host_ffa_handler() 828 do_ffa_rxtx_unmap(&res, host_ctxt); in kvm_host_ffa_handler() 832 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt); in kvm_host_ffa_handler() 835 do_ffa_mem_reclaim(&res, host_ctxt); in kvm_host_ffa_handler() 839 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); in kvm_host_ffa_handler() 842 do_ffa_mem_frag_tx(&res, host_ctxt); in kvm_host_ffa_handler() 845 do_ffa_version(&res, host_ctxt); in kvm_host_ffa_handler() 848 do_ffa_part_get(&res, host_ctxt); in kvm_host_ffa_handler() [all …]
|
| A D | setup.c | 281 struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt); in __pkvm_init_finalise() local 331 cpu_reg(host_ctxt, 1) = ret; in __pkvm_init_finalise() 333 __host_enter(host_ctxt); in __pkvm_init_finalise()
|
| A D | mem_protect.c | 593 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) in handle_host_mem_abort() argument
|
| /arch/arm64/kvm/hyp/include/hyp/ |
| A D | debug-sr.h | 136 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_guest_common() local 144 host_ctxt = host_data_ptr(host_ctxt); in __debug_switch_to_guest_common() 149 __debug_save_state(host_dbg, host_ctxt); in __debug_switch_to_guest_common() 155 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_host_common() local 163 host_ctxt = host_data_ptr(host_ctxt); in __debug_switch_to_host_common() 169 __debug_restore_state(host_dbg, host_ctxt); in __debug_switch_to_host_common()
|
| A D | switch.h | 325 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_hfgxtr() 360 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_hfgxtr() 415 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_common() 455 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_common() 655 __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs)); in kvm_hyp_save_fpsimd_host()
|
| A D | sysreg-sr.h | 33 return host_data_ptr(host_ctxt) != ctxt; in ctxt_is_guest()
|
| /arch/arm64/kvm/hyp/vhe/ |
| A D | switch.c | 208 host_data_ptr(host_ctxt)->__hyp_running_vcpu = vcpu; in kvm_vcpu_load_vhe() 220 host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL; in kvm_vcpu_put_vhe() 564 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe() local 568 host_ctxt = host_data_ptr(host_ctxt); in __kvm_vcpu_run_vhe() 573 sysreg_save_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 599 sysreg_restore_host_state_vhe(host_ctxt); in __kvm_vcpu_run_vhe() 650 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic() local 653 host_ctxt = host_data_ptr(host_ctxt); in __hyp_call_panic() 654 vcpu = host_ctxt->__hyp_running_vcpu; in __hyp_call_panic() 657 sysreg_restore_host_state_vhe(host_ctxt); in __hyp_call_panic()
|
| A D | sysreg-sr.c | 200 struct kvm_cpu_context *host_ctxt; in __vcpu_load_switch_sysregs() local 203 host_ctxt = host_data_ptr(host_ctxt); in __vcpu_load_switch_sysregs() 204 __sysreg_save_user_state(host_ctxt); in __vcpu_load_switch_sysregs() 261 struct kvm_cpu_context *host_ctxt; in __vcpu_put_switch_sysregs() local 263 host_ctxt = host_data_ptr(host_ctxt); in __vcpu_put_switch_sysregs() 274 __sysreg_restore_user_state(host_ctxt); in __vcpu_put_switch_sysregs()
|
| /arch/arm64/include/asm/ |
| A D | kvm_hyp.h | 121 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); 124 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 133 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
| A D | kvm_asm.h | 291 void handle_trap(struct kvm_cpu_context *host_ctxt);
|
| A D | kvm_host.h | 716 struct kvm_cpu_context host_ctxt; member
|
| /arch/arm64/kvm/hyp/include/nvhe/ |
| A D | ffa.h | 15 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|
| A D | mem_protect.h | 55 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|
| /arch/arm64/kvm/ |
| A D | pmu.c | 190 hctxt = host_data_ptr(host_ctxt); in kvm_set_pmuserenr()
|
| A D | arm.c | 2105 kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); in cpu_hyp_init_context()
|
| /arch/arm64/kernel/ |
| A D | asm-offsets.c | 117 DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); in main()
|