Searched refs:walk_mmu (Results 1 – 9 of 9) sorted by relevance
143 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()148 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
171 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested()
797 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()862 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()7206 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()7216 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()7228 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()7237 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()7270 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()7329 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()7433 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()7443 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()[all …]
506 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()959 WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu); in FNAME()
5750 kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); in kvm_mmu_invlpg()5942 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
99 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_svm_init_mmu_context()105 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_svm_uninit_mmu_context()
787 struct kvm_mmu *walk_mmu; member
429 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()435 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
3228 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vmx_ept_load_pdptrs()3243 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()
Completed in 74 milliseconds