Lines Matching refs:svm

39 	struct vcpu_svm *svm = to_svm(vcpu);  in nested_svm_inject_npf_exit()  local
40 struct vmcb *vmcb = svm->vmcb; in nested_svm_inject_npf_exit()
56 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
61 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
62 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr()
75 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
77 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3()
82 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_init_mmu_context() local
93 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, in nested_svm_init_mmu_context()
94 svm->vmcb01.ptr->save.efer, in nested_svm_init_mmu_context()
95 svm->nested.ctl.nested_cr3); in nested_svm_init_mmu_context()
108 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm) in nested_vmcb_needs_vls_intercept() argument
110 if (!svm->v_vmload_vmsave_enabled) in nested_vmcb_needs_vls_intercept()
113 if (!nested_npt_enabled(svm)) in nested_vmcb_needs_vls_intercept()
116 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)) in nested_vmcb_needs_vls_intercept()
122 void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
128 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
130 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
133 c = &svm->vmcb->control; in recalc_intercepts()
134 h = &svm->vmcb01.ptr->control; in recalc_intercepts()
135 g = &svm->nested.ctl; in recalc_intercepts()
155 if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu)) in recalc_intercepts()
165 if (nested_vmcb_needs_vls_intercept(svm)) { in recalc_intercepts()
183 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
185 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_vmrun_msrpm()
196 if (!svm->nested.force_msr_bitmap_recalc && in nested_svm_vmrun_msrpm()
197 kvm_hv_hypercall_enabled(&svm->vcpu) && in nested_svm_vmrun_msrpm()
199 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS))) in nested_svm_vmrun_msrpm()
202 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
218 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); in nested_svm_vmrun_msrpm()
220 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
223 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
226 svm->nested.force_msr_bitmap_recalc = false; in nested_svm_vmrun_msrpm()
229 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); in nested_svm_vmrun_msrpm()
320 struct vcpu_svm *svm = to_svm(vcpu); in nested_vmcb_check_save() local
321 struct vmcb_save_area_cached *save = &svm->nested.save; in nested_vmcb_check_save()
328 struct vcpu_svm *svm = to_svm(vcpu); in nested_vmcb_check_controls() local
329 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; in nested_vmcb_check_controls()
379 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, in nested_copy_vmcb_control_to_cache() argument
382 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control); in nested_copy_vmcb_control_to_cache()
401 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, in nested_copy_vmcb_save_to_cache() argument
404 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save); in nested_copy_vmcb_save_to_cache()
411 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) in nested_sync_control_from_vmcb02() argument
414 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; in nested_sync_control_from_vmcb02()
415 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; in nested_sync_control_from_vmcb02()
419 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && in nested_sync_control_from_vmcb02()
420 svm_is_intercept(svm, INTERCEPT_VINTR)) { in nested_sync_control_from_vmcb02()
432 if (nested_vgif_enabled(svm)) in nested_sync_control_from_vmcb02()
435 svm->nested.ctl.int_ctl &= ~mask; in nested_sync_control_from_vmcb02()
436 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; in nested_sync_control_from_vmcb02()
443 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, in nested_save_pending_event_to_vmcb12() argument
446 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_save_pending_event_to_vmcb12()
529 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) in nested_vmcb02_compute_g_pat() argument
531 if (!svm->nested.vmcb02.ptr) in nested_vmcb02_compute_g_pat()
535 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; in nested_vmcb02_compute_g_pat()
538 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) in nested_vmcb02_prepare_save() argument
541 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_vmcb02_prepare_save()
542 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_vmcb02_prepare_save()
544 nested_vmcb02_compute_g_pat(svm); in nested_vmcb02_prepare_save()
547 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { in nested_vmcb02_prepare_save()
549 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; in nested_vmcb02_prepare_save()
550 svm->nested.force_msr_bitmap_recalc = true; in nested_vmcb02_prepare_save()
568 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); in nested_vmcb02_prepare_save()
570 svm_set_efer(&svm->vcpu, svm->nested.save.efer); in nested_vmcb02_prepare_save()
572 svm_set_cr0(&svm->vcpu, svm->nested.save.cr0); in nested_vmcb02_prepare_save()
573 svm_set_cr4(&svm->vcpu, svm->nested.save.cr4); in nested_vmcb02_prepare_save()
575 svm->vcpu.arch.cr2 = vmcb12->save.cr2; in nested_vmcb02_prepare_save()
577 kvm_rax_write(&svm->vcpu, vmcb12->save.rax); in nested_vmcb02_prepare_save()
578 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); in nested_vmcb02_prepare_save()
579 kvm_rip_write(&svm->vcpu, vmcb12->save.rip); in nested_vmcb02_prepare_save()
588 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1; in nested_vmcb02_prepare_save()
589 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW; in nested_vmcb02_prepare_save()
593 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { in nested_vmcb02_prepare_save()
600 svm_update_lbrv(&svm->vcpu); in nested_vmcb02_prepare_save()
631 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, in nested_vmcb02_prepare_control() argument
638 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_vmcb02_prepare_control()
639 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_vmcb02_prepare_control()
640 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_vmcb02_prepare_control()
649 if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) in nested_vmcb02_prepare_control()
665 if (nested_npt_enabled(svm)) in nested_vmcb02_prepare_control()
670 svm->nested.ctl.tsc_offset, in nested_vmcb02_prepare_control()
671 svm->tsc_ratio_msr); in nested_vmcb02_prepare_control()
675 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) { in nested_vmcb02_prepare_control()
676 WARN_ON(!svm->tsc_scaling_enabled); in nested_vmcb02_prepare_control()
681 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | in nested_vmcb02_prepare_control()
684 vmcb02->control.int_vector = svm->nested.ctl.int_vector; in nested_vmcb02_prepare_control()
685 vmcb02->control.int_state = svm->nested.ctl.int_state; in nested_vmcb02_prepare_control()
686 vmcb02->control.event_inj = svm->nested.ctl.event_inj; in nested_vmcb02_prepare_control()
687 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_vmcb02_prepare_control()
697 if (svm->nrips_enabled) in nested_vmcb02_prepare_control()
698 vmcb02->control.next_rip = svm->nested.ctl.next_rip; in nested_vmcb02_prepare_control()
702 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj); in nested_vmcb02_prepare_control()
704 svm->soft_int_injected = true; in nested_vmcb02_prepare_control()
705 svm->soft_int_csbase = vmcb12_csbase; in nested_vmcb02_prepare_control()
706 svm->soft_int_old_rip = vmcb12_rip; in nested_vmcb02_prepare_control()
707 if (svm->nrips_enabled) in nested_vmcb02_prepare_control()
708 svm->soft_int_next_rip = svm->nested.ctl.next_rip; in nested_vmcb02_prepare_control()
710 svm->soft_int_next_rip = vmcb12_rip; in nested_vmcb02_prepare_control()
715 if (svm->lbrv_enabled) in nested_vmcb02_prepare_control()
717 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); in nested_vmcb02_prepare_control()
719 if (!nested_vmcb_needs_vls_intercept(svm)) in nested_vmcb02_prepare_control()
722 pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0; in nested_vmcb02_prepare_control()
723 pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0; in nested_vmcb02_prepare_control()
724 if (kvm_pause_in_guest(svm->vcpu.kvm)) { in nested_vmcb02_prepare_control()
735 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) { in nested_vmcb02_prepare_control()
752 recalc_intercepts(svm); in nested_vmcb02_prepare_control()
770 struct vcpu_svm *svm = to_svm(vcpu); in enter_svm_guest_mode() local
773 trace_kvm_nested_vmenter(svm->vmcb->save.rip, in enter_svm_guest_mode()
791 svm->nested.vmcb12_gpa = vmcb12_gpa; in enter_svm_guest_mode()
793 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
795 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
797 svm_switch_vmcb(svm, &svm->nested.vmcb02); in enter_svm_guest_mode()
798 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base); in enter_svm_guest_mode()
799 nested_vmcb02_prepare_save(svm, vmcb12); in enter_svm_guest_mode()
801 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3, in enter_svm_guest_mode()
802 nested_npt_enabled(svm), from_vmrun); in enter_svm_guest_mode()
809 svm_set_gif(svm, true); in enter_svm_guest_mode()
821 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_vmrun() local
826 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_svm_vmrun()
828 if (!svm->nested.hsave_msr) { in nested_svm_vmrun()
845 vmcb12_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
858 if (WARN_ON_ONCE(!svm->nested.initialized)) in nested_svm_vmrun()
861 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); in nested_svm_vmrun()
862 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); in nested_svm_vmrun()
886 svm->nested.nested_run_pending = 1; in nested_svm_vmrun()
891 if (nested_svm_vmrun_msrpm(svm)) in nested_svm_vmrun()
895 svm->nested.nested_run_pending = 0; in nested_svm_vmrun()
896 svm->nmi_l1_to_l2 = false; in nested_svm_vmrun()
897 svm->soft_int_injected = false; in nested_svm_vmrun()
899 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in nested_svm_vmrun()
900 svm->vmcb->control.exit_code_hi = 0; in nested_svm_vmrun()
901 svm->vmcb->control.exit_info_1 = 0; in nested_svm_vmrun()
902 svm->vmcb->control.exit_info_2 = 0; in nested_svm_vmrun()
904 nested_svm_vmexit(svm); in nested_svm_vmrun()
949 int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
951 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_svm_vmexit()
952 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_svm_vmexit()
953 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_svm_vmexit()
958 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); in nested_svm_vmexit()
969 svm->nested.vmcb12_gpa = 0; in nested_svm_vmexit()
970 WARN_ON_ONCE(svm->nested.nested_run_pending); in nested_svm_vmexit()
975 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_svm_vmexit()
985 vmcb12->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
989 vmcb12->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
995 vmcb12->save.dr6 = svm->vcpu.arch.dr6; in nested_svm_vmexit()
1005 nested_save_pending_event_to_vmcb12(svm, vmcb12); in nested_svm_vmexit()
1007 if (svm->nrips_enabled) in nested_svm_vmexit()
1010 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; in nested_svm_vmexit()
1011 vmcb12->control.event_inj = svm->nested.ctl.event_inj; in nested_svm_vmexit()
1012 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_svm_vmexit()
1020 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); in nested_svm_vmexit()
1022 svm_switch_vmcb(svm, &svm->vmcb01); in nested_svm_vmexit()
1024 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { in nested_svm_vmexit()
1036 svm_set_gif(svm, false); in nested_svm_vmexit()
1039 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; in nested_svm_vmexit()
1040 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) { in nested_svm_vmexit()
1041 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset; in nested_svm_vmexit()
1045 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) { in nested_svm_vmexit()
1046 WARN_ON(!svm->tsc_scaling_enabled); in nested_svm_vmexit()
1051 svm->nested.ctl.nested_cr3 = 0; in nested_svm_vmexit()
1064 svm->vcpu.arch.dr7 = DR7_FIXED_1; in nested_svm_vmexit()
1065 kvm_update_dr7(&svm->vcpu); in nested_svm_vmexit()
1088 svm->vcpu.arch.nmi_injected = false; in nested_svm_vmexit()
1099 kvm_queue_exception(&(svm->vcpu), DB_VECTOR); in nested_svm_vmexit()
1113 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_triple_fault() local
1115 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN)) in nested_svm_triple_fault()
1122 int svm_allocate_nested(struct vcpu_svm *svm) in svm_allocate_nested() argument
1126 if (svm->nested.initialized) in svm_allocate_nested()
1132 svm->nested.vmcb02.ptr = page_address(vmcb02_page); in svm_allocate_nested()
1133 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); in svm_allocate_nested()
1135 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); in svm_allocate_nested()
1136 if (!svm->nested.msrpm) in svm_allocate_nested()
1138 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); in svm_allocate_nested()
1140 svm->nested.initialized = true; in svm_allocate_nested()
1148 void svm_free_nested(struct vcpu_svm *svm) in svm_free_nested() argument
1150 if (!svm->nested.initialized) in svm_free_nested()
1153 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr)) in svm_free_nested()
1154 svm_switch_vmcb(svm, &svm->vmcb01); in svm_free_nested()
1156 svm_vcpu_free_msrpm(svm->nested.msrpm); in svm_free_nested()
1157 svm->nested.msrpm = NULL; in svm_free_nested()
1159 __free_page(virt_to_page(svm->nested.vmcb02.ptr)); in svm_free_nested()
1160 svm->nested.vmcb02.ptr = NULL; in svm_free_nested()
1169 svm->nested.last_vmcb12_gpa = INVALID_GPA; in svm_free_nested()
1171 svm->nested.initialized = false; in svm_free_nested()
1176 struct vcpu_svm *svm = to_svm(vcpu); in svm_leave_nested() local
1179 svm->nested.nested_run_pending = 0; in svm_leave_nested()
1180 svm->nested.vmcb12_gpa = INVALID_GPA; in svm_leave_nested()
1184 svm_switch_vmcb(svm, &svm->vmcb01); in svm_leave_nested()
1187 vmcb_mark_all_dirty(svm->vmcb); in svm_leave_nested()
1193 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
1198 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
1201 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
1203 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
1212 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) in nested_svm_exit_handled_msr()
1218 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
1225 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
1228 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
1229 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
1231 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); in nested_svm_intercept_ioio()
1237 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
1243 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
1245 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
1250 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
1253 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
1256 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1261 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1279 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1287 int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
1291 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
1294 nested_svm_vmexit(svm); in nested_svm_exit_handled()
1317 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_is_exception_vmexit() local
1319 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector)); in nested_svm_is_exception_vmexit()
1325 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_exception_vmexit() local
1326 struct vmcb *vmcb = svm->vmcb; in nested_svm_inject_exception_vmexit()
1355 nested_svm_vmexit(svm); in nested_svm_inject_exception_vmexit()
1358 static inline bool nested_exit_on_init(struct vcpu_svm *svm) in nested_exit_on_init() argument
1360 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); in nested_exit_on_init()
1366 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_nested_events() local
1372 bool block_nested_exceptions = svm->nested.nested_run_pending; in svm_check_nested_events()
1386 if (!nested_exit_on_init(svm)) in svm_check_nested_events()
1388 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); in svm_check_nested_events()
1409 if (!nested_exit_on_smi(svm)) in svm_check_nested_events()
1411 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); in svm_check_nested_events()
1419 if (!nested_exit_on_nmi(svm)) in svm_check_nested_events()
1421 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); in svm_check_nested_events()
1428 if (!nested_exit_on_intr(svm)) in svm_check_nested_events()
1430 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in svm_check_nested_events()
1431 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); in svm_check_nested_events()
1438 int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
1440 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
1441 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_svm_exit_special()
1451 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & in nested_svm_exit_special()
1455 svm->vcpu.arch.apf.host_apf_flags) in nested_svm_exit_special()
1476 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_update_tsc_ratio_msr() local
1480 svm->tsc_ratio_msr); in nested_svm_update_tsc_ratio_msr()
1524 struct vcpu_svm *svm; in svm_get_nested_state() local
1533 &user_kvm_nested_state->data.svm[0]; in svm_get_nested_state()
1538 svm = to_svm(vcpu); in svm_get_nested_state()
1545 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; in svm_get_nested_state()
1549 if (svm->nested.nested_run_pending) in svm_get_nested_state()
1553 if (gif_set(svm)) in svm_get_nested_state()
1573 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl); in svm_get_nested_state()
1580 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, in svm_get_nested_state()
1591 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nested_state() local
1593 &user_kvm_nested_state->data.svm[0]; in svm_set_nested_state()
1628 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1632 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) in svm_set_nested_state()
1684 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; in svm_set_nested_state()
1686 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1688 svm->nested.nested_run_pending = in svm_set_nested_state()
1691 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; in svm_set_nested_state()
1693 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); in svm_set_nested_state()
1694 nested_copy_vmcb_control_to_cache(svm, ctl); in svm_set_nested_state()
1696 svm_switch_vmcb(svm, &svm->nested.vmcb02); in svm_set_nested_state()
1697 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base); in svm_set_nested_state()
1706 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, in svm_set_nested_state()
1707 nested_npt_enabled(svm), false); in svm_set_nested_state()
1711 svm->nested.force_msr_bitmap_recalc = true; in svm_set_nested_state()
1724 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nested_state_pages() local
1730 !nested_npt_enabled(svm) && is_pae_paging(vcpu)) in svm_get_nested_state_pages()
1739 if (!nested_svm_vmrun_msrpm(svm)) { in svm_get_nested_state_pages()