Searched refs:to_shadow_page (Results 1 – 3 of 3) sorted by relevance
222 static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) in to_shadow_page() function231 return to_shadow_page(spte & SPTE_BASE_ADDR_MASK); in spte_to_child_sp()236 return to_shadow_page(__pa(sptep)); in sptep_to_sp()
3540 if (to_shadow_page(mmu->root.hpa)) { in kvm_mmu_free_roots()3578 if (!to_shadow_page(root_hpa) || in kvm_mmu_free_guest_mode_roots()3579 to_shadow_page(root_hpa)->role.guest_mode) in kvm_mmu_free_guest_mode_roots()3934 sp = to_shadow_page(root); in is_unsync_root()3964 sp = to_shadow_page(root); in kvm_mmu_sync_roots()4298 struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); in is_page_fault_stale()4481 role.word == to_shadow_page(root->hpa)->role.word; in is_root_usable()4555 if (VALID_PAGE(mmu->root.hpa) && !to_shadow_page(mmu->root.hpa)) in fast_pgd_switch()4603 to_shadow_page(vcpu->arch.mmu->root.hpa)); in kvm_mmu_new_pgd()5468 sp = to_shadow_page(root_hpa); in is_obsolete_root()
791 for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
Completed in 19 milliseconds