Lines Matching refs:svm

37 	struct vcpu_svm *svm = to_svm(vcpu);  in nested_svm_inject_npf_exit()  local
39 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit()
44 svm->vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit()
45 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit()
46 svm->vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit()
47 svm->vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit()
50 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit()
51 svm->vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit()
53 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
58 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_page_fault_nested() local
61 if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && in svm_inject_page_fault_nested()
62 !svm->nested.nested_run_pending) { in svm_inject_page_fault_nested()
63 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; in svm_inject_page_fault_nested()
64 svm->vmcb->control.exit_code_hi = 0; in svm_inject_page_fault_nested()
65 svm->vmcb->control.exit_info_1 = fault->error_code; in svm_inject_page_fault_nested()
66 svm->vmcb->control.exit_info_2 = fault->address; in svm_inject_page_fault_nested()
67 nested_svm_vmexit(svm); in svm_inject_page_fault_nested()
75 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
76 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr()
89 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
91 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3()
96 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_init_mmu_context() local
107 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, in nested_svm_init_mmu_context()
108 svm->vmcb01.ptr->save.efer, in nested_svm_init_mmu_context()
109 svm->nested.ctl.nested_cr3); in nested_svm_init_mmu_context()
122 void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
127 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
129 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
132 c = &svm->vmcb->control; in recalc_intercepts()
133 h = &svm->vmcb01.ptr->control; in recalc_intercepts()
134 g = &svm->nested.ctl; in recalc_intercepts()
197 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
206 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
217 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); in nested_svm_vmrun_msrpm()
219 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
222 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
225 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); in nested_svm_vmrun_msrpm()
329 void nested_load_control_from_vmcb12(struct vcpu_svm *svm, in nested_load_control_from_vmcb12() argument
332 copy_vmcb_control_area(&svm->nested.ctl, control); in nested_load_control_from_vmcb12()
335 svm->nested.ctl.asid = control->asid; in nested_load_control_from_vmcb12()
336 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; in nested_load_control_from_vmcb12()
337 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; in nested_load_control_from_vmcb12()
344 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) in nested_sync_control_from_vmcb02() argument
347 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; in nested_sync_control_from_vmcb02()
348 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; in nested_sync_control_from_vmcb02()
352 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && in nested_sync_control_from_vmcb02()
353 svm_is_intercept(svm, INTERCEPT_VINTR)) { in nested_sync_control_from_vmcb02()
364 svm->nested.ctl.int_ctl &= ~mask; in nested_sync_control_from_vmcb02()
365 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; in nested_sync_control_from_vmcb02()
372 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, in nested_save_pending_event_to_vmcb12() argument
375 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_save_pending_event_to_vmcb12()
405 static inline bool nested_npt_enabled(struct vcpu_svm *svm) in nested_npt_enabled() argument
407 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; in nested_npt_enabled()
455 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) in nested_vmcb02_compute_g_pat() argument
457 if (!svm->nested.vmcb02.ptr) in nested_vmcb02_compute_g_pat()
461 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; in nested_vmcb02_compute_g_pat()
464 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) in nested_vmcb02_prepare_save() argument
468 nested_vmcb02_compute_g_pat(svm); in nested_vmcb02_prepare_save()
471 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { in nested_vmcb02_prepare_save()
473 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; in nested_vmcb02_prepare_save()
477 svm->vmcb->save.es = vmcb12->save.es; in nested_vmcb02_prepare_save()
478 svm->vmcb->save.cs = vmcb12->save.cs; in nested_vmcb02_prepare_save()
479 svm->vmcb->save.ss = vmcb12->save.ss; in nested_vmcb02_prepare_save()
480 svm->vmcb->save.ds = vmcb12->save.ds; in nested_vmcb02_prepare_save()
481 svm->vmcb->save.cpl = vmcb12->save.cpl; in nested_vmcb02_prepare_save()
482 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); in nested_vmcb02_prepare_save()
486 svm->vmcb->save.gdtr = vmcb12->save.gdtr; in nested_vmcb02_prepare_save()
487 svm->vmcb->save.idtr = vmcb12->save.idtr; in nested_vmcb02_prepare_save()
488 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in nested_vmcb02_prepare_save()
491 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); in nested_vmcb02_prepare_save()
498 svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); in nested_vmcb02_prepare_save()
500 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); in nested_vmcb02_prepare_save()
501 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); in nested_vmcb02_prepare_save()
503 svm->vcpu.arch.cr2 = vmcb12->save.cr2; in nested_vmcb02_prepare_save()
505 kvm_rax_write(&svm->vcpu, vmcb12->save.rax); in nested_vmcb02_prepare_save()
506 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); in nested_vmcb02_prepare_save()
507 kvm_rip_write(&svm->vcpu, vmcb12->save.rip); in nested_vmcb02_prepare_save()
510 svm->vmcb->save.rax = vmcb12->save.rax; in nested_vmcb02_prepare_save()
511 svm->vmcb->save.rsp = vmcb12->save.rsp; in nested_vmcb02_prepare_save()
512 svm->vmcb->save.rip = vmcb12->save.rip; in nested_vmcb02_prepare_save()
516 svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; in nested_vmcb02_prepare_save()
517 svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; in nested_vmcb02_prepare_save()
518 vmcb_mark_dirty(svm->vmcb, VMCB_DR); in nested_vmcb02_prepare_save()
522 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) in nested_vmcb02_prepare_control() argument
529 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_vmcb02_prepare_control()
540 WARN_ON(kvm_apicv_activated(svm->vcpu.kvm)); in nested_vmcb02_prepare_control()
543 svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl; in nested_vmcb02_prepare_control()
544 svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa; in nested_vmcb02_prepare_control()
545 svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa; in nested_vmcb02_prepare_control()
550 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in nested_vmcb02_prepare_control()
553 if (nested_npt_enabled(svm)) in nested_vmcb02_prepare_control()
558 svm->nested.ctl.tsc_offset, in nested_vmcb02_prepare_control()
559 svm->tsc_ratio_msr); in nested_vmcb02_prepare_control()
561 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; in nested_vmcb02_prepare_control()
563 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { in nested_vmcb02_prepare_control()
564 WARN_ON(!svm->tsc_scaling_enabled); in nested_vmcb02_prepare_control()
568 svm->vmcb->control.int_ctl = in nested_vmcb02_prepare_control()
569 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | in nested_vmcb02_prepare_control()
570 (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits); in nested_vmcb02_prepare_control()
572 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in nested_vmcb02_prepare_control()
573 svm->vmcb->control.int_state = svm->nested.ctl.int_state; in nested_vmcb02_prepare_control()
574 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; in nested_vmcb02_prepare_control()
575 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_vmcb02_prepare_control()
586 recalc_intercepts(svm); in nested_vmcb02_prepare_control()
604 struct vcpu_svm *svm = to_svm(vcpu); in enter_svm_guest_mode() local
607 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, in enter_svm_guest_mode()
621 svm->nested.vmcb12_gpa = vmcb12_gpa; in enter_svm_guest_mode()
623 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
625 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
627 svm_switch_vmcb(svm, &svm->nested.vmcb02); in enter_svm_guest_mode()
628 nested_vmcb02_prepare_control(svm); in enter_svm_guest_mode()
629 nested_vmcb02_prepare_save(svm, vmcb12); in enter_svm_guest_mode()
631 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, in enter_svm_guest_mode()
632 nested_npt_enabled(svm), from_vmrun); in enter_svm_guest_mode()
642 svm_set_gif(svm, true); in enter_svm_guest_mode()
649 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_vmrun() local
655 if (!svm->nested.hsave_msr) { in nested_svm_vmrun()
665 vmcb12_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
678 if (WARN_ON_ONCE(!svm->nested.initialized)) in nested_svm_vmrun()
681 nested_load_control_from_vmcb12(svm, &vmcb12->control); in nested_svm_vmrun()
684 !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { in nested_svm_vmrun()
696 svm->vmcb01.ptr->save.efer = vcpu->arch.efer; in nested_svm_vmrun()
697 svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu); in nested_svm_vmrun()
698 svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4; in nested_svm_vmrun()
699 svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu); in nested_svm_vmrun()
700 svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu); in nested_svm_vmrun()
703 svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu); in nested_svm_vmrun()
705 svm->nested.nested_run_pending = 1; in nested_svm_vmrun()
710 if (nested_svm_vmrun_msrpm(svm)) in nested_svm_vmrun()
714 svm->nested.nested_run_pending = 0; in nested_svm_vmrun()
716 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in nested_svm_vmrun()
717 svm->vmcb->control.exit_code_hi = 0; in nested_svm_vmrun()
718 svm->vmcb->control.exit_info_1 = 0; in nested_svm_vmrun()
719 svm->vmcb->control.exit_info_2 = 0; in nested_svm_vmrun()
721 nested_svm_vmexit(svm); in nested_svm_vmrun()
766 int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
768 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_svm_vmexit()
770 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmexit()
777 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); in nested_svm_vmexit()
788 svm->nested.vmcb12_gpa = 0; in nested_svm_vmexit()
789 WARN_ON_ONCE(svm->nested.nested_run_pending); in nested_svm_vmexit()
794 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_svm_vmexit()
804 vmcb12->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
808 vmcb12->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
814 vmcb12->save.dr6 = svm->vcpu.arch.dr6; in nested_svm_vmexit()
824 nested_save_pending_event_to_vmcb12(svm, vmcb12); in nested_svm_vmexit()
826 if (svm->nrips_enabled) in nested_svm_vmexit()
829 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; in nested_svm_vmexit()
830 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; in nested_svm_vmexit()
831 vmcb12->control.event_inj = svm->nested.ctl.event_inj; in nested_svm_vmexit()
832 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_svm_vmexit()
834 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); in nested_svm_vmexit()
836 svm_switch_vmcb(svm, &svm->vmcb01); in nested_svm_vmexit()
842 svm_set_gif(svm, false); in nested_svm_vmexit()
843 svm->vmcb->control.exit_int_info = 0; in nested_svm_vmexit()
845 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; in nested_svm_vmexit()
846 if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) { in nested_svm_vmexit()
847 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; in nested_svm_vmexit()
848 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in nested_svm_vmexit()
851 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { in nested_svm_vmexit()
852 WARN_ON(!svm->tsc_scaling_enabled); in nested_svm_vmexit()
857 svm->nested.ctl.nested_cr3 = 0; in nested_svm_vmexit()
862 kvm_set_rflags(vcpu, svm->vmcb->save.rflags); in nested_svm_vmexit()
863 svm_set_efer(vcpu, svm->vmcb->save.efer); in nested_svm_vmexit()
864 svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
865 svm_set_cr4(vcpu, svm->vmcb->save.cr4); in nested_svm_vmexit()
866 kvm_rax_write(vcpu, svm->vmcb->save.rax); in nested_svm_vmexit()
867 kvm_rsp_write(vcpu, svm->vmcb->save.rsp); in nested_svm_vmexit()
868 kvm_rip_write(vcpu, svm->vmcb->save.rip); in nested_svm_vmexit()
870 svm->vcpu.arch.dr7 = DR7_FIXED_1; in nested_svm_vmexit()
871 kvm_update_dr7(&svm->vcpu); in nested_svm_vmexit()
886 rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true); in nested_svm_vmexit()
894 svm->vcpu.arch.nmi_injected = false; in nested_svm_vmexit()
904 if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF)) in nested_svm_vmexit()
905 kvm_queue_exception(&(svm->vcpu), DB_VECTOR); in nested_svm_vmexit()
915 int svm_allocate_nested(struct vcpu_svm *svm) in svm_allocate_nested() argument
919 if (svm->nested.initialized) in svm_allocate_nested()
925 svm->nested.vmcb02.ptr = page_address(vmcb02_page); in svm_allocate_nested()
926 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); in svm_allocate_nested()
928 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); in svm_allocate_nested()
929 if (!svm->nested.msrpm) in svm_allocate_nested()
931 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); in svm_allocate_nested()
933 svm->nested.initialized = true; in svm_allocate_nested()
941 void svm_free_nested(struct vcpu_svm *svm) in svm_free_nested() argument
943 if (!svm->nested.initialized) in svm_free_nested()
946 svm_vcpu_free_msrpm(svm->nested.msrpm); in svm_free_nested()
947 svm->nested.msrpm = NULL; in svm_free_nested()
949 __free_page(virt_to_page(svm->nested.vmcb02.ptr)); in svm_free_nested()
950 svm->nested.vmcb02.ptr = NULL; in svm_free_nested()
959 svm->nested.last_vmcb12_gpa = INVALID_GPA; in svm_free_nested()
961 svm->nested.initialized = false; in svm_free_nested()
967 void svm_leave_nested(struct vcpu_svm *svm) in svm_leave_nested() argument
969 struct kvm_vcpu *vcpu = &svm->vcpu; in svm_leave_nested()
972 svm->nested.nested_run_pending = 0; in svm_leave_nested()
973 svm->nested.vmcb12_gpa = INVALID_GPA; in svm_leave_nested()
977 svm_switch_vmcb(svm, &svm->vmcb01); in svm_leave_nested()
980 vmcb_mark_all_dirty(svm->vmcb); in svm_leave_nested()
986 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
991 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
994 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
996 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
1005 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) in nested_svm_exit_handled_msr()
1011 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
1018 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
1021 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
1022 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
1024 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); in nested_svm_intercept_ioio()
1030 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
1036 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
1038 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
1043 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
1046 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
1049 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1054 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1072 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1080 int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
1084 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
1087 nested_svm_vmexit(svm); in nested_svm_exit_handled()
1107 static bool nested_exit_on_exception(struct vcpu_svm *svm) in nested_exit_on_exception() argument
1109 unsigned int nr = svm->vcpu.arch.exception.nr; in nested_exit_on_exception()
1111 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr)); in nested_exit_on_exception()
1114 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) in nested_svm_inject_exception_vmexit() argument
1116 unsigned int nr = svm->vcpu.arch.exception.nr; in nested_svm_inject_exception_vmexit()
1118 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; in nested_svm_inject_exception_vmexit()
1119 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_exception_vmexit()
1121 if (svm->vcpu.arch.exception.has_error_code) in nested_svm_inject_exception_vmexit()
1122 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; in nested_svm_inject_exception_vmexit()
1129 if (svm->vcpu.arch.exception.nested_apf) in nested_svm_inject_exception_vmexit()
1130 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; in nested_svm_inject_exception_vmexit()
1131 else if (svm->vcpu.arch.exception.has_payload) in nested_svm_inject_exception_vmexit()
1132 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; in nested_svm_inject_exception_vmexit()
1134 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_inject_exception_vmexit()
1137 kvm_deliver_exception_payload(&svm->vcpu); in nested_svm_inject_exception_vmexit()
1138 if (svm->vcpu.arch.dr7 & DR7_GD) { in nested_svm_inject_exception_vmexit()
1139 svm->vcpu.arch.dr7 &= ~DR7_GD; in nested_svm_inject_exception_vmexit()
1140 kvm_update_dr7(&svm->vcpu); in nested_svm_inject_exception_vmexit()
1143 WARN_ON(svm->vcpu.arch.exception.has_payload); in nested_svm_inject_exception_vmexit()
1145 nested_svm_vmexit(svm); in nested_svm_inject_exception_vmexit()
1148 static inline bool nested_exit_on_init(struct vcpu_svm *svm) in nested_exit_on_init() argument
1150 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); in nested_exit_on_init()
1155 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_nested_events() local
1157 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; in svm_check_nested_events()
1164 if (!nested_exit_on_init(svm)) in svm_check_nested_events()
1166 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); in svm_check_nested_events()
1177 if (svm->nested.nested_run_pending) in svm_check_nested_events()
1179 if (!nested_exit_on_exception(svm)) in svm_check_nested_events()
1181 nested_svm_inject_exception_vmexit(svm); in svm_check_nested_events()
1188 if (!nested_exit_on_smi(svm)) in svm_check_nested_events()
1190 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); in svm_check_nested_events()
1197 if (!nested_exit_on_nmi(svm)) in svm_check_nested_events()
1199 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); in svm_check_nested_events()
1206 if (!nested_exit_on_intr(svm)) in svm_check_nested_events()
1208 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in svm_check_nested_events()
1209 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); in svm_check_nested_events()
1216 int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
1218 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
1228 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & in nested_svm_exit_special()
1232 svm->vcpu.arch.apf.host_apf_flags) in nested_svm_exit_special()
1246 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_update_tsc_ratio_msr() local
1250 svm->tsc_ratio_msr); in nested_svm_update_tsc_ratio_msr()
1258 struct vcpu_svm *svm; in svm_get_nested_state() local
1265 &user_kvm_nested_state->data.svm[0]; in svm_get_nested_state()
1270 svm = to_svm(vcpu); in svm_get_nested_state()
1277 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; in svm_get_nested_state()
1281 if (svm->nested.nested_run_pending) in svm_get_nested_state()
1285 if (gif_set(svm)) in svm_get_nested_state()
1300 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, in svm_get_nested_state()
1303 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, in svm_get_nested_state()
1314 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nested_state() local
1316 &user_kvm_nested_state->data.svm[0]; in svm_set_nested_state()
1348 svm_leave_nested(svm); in svm_set_nested_state()
1349 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1353 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) in svm_set_nested_state()
1399 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, in svm_set_nested_state()
1400 nested_npt_enabled(svm), false); in svm_set_nested_state()
1413 svm_leave_nested(svm); in svm_set_nested_state()
1415 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; in svm_set_nested_state()
1417 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1419 svm->nested.nested_run_pending = in svm_set_nested_state()
1422 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; in svm_set_nested_state()
1424 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); in svm_set_nested_state()
1425 nested_load_control_from_vmcb12(svm, ctl); in svm_set_nested_state()
1427 svm_switch_vmcb(svm, &svm->nested.vmcb02); in svm_set_nested_state()
1428 nested_vmcb02_prepare_control(svm); in svm_set_nested_state()
1440 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nested_state_pages() local
1446 !nested_npt_enabled(svm) && is_pae_paging(vcpu)) in svm_get_nested_state_pages()
1455 if (!nested_svm_vmrun_msrpm(svm)) { in svm_get_nested_state_pages()