Lines Matching refs:mmu
233 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \
235 return !!(mmu->mmu_role. base_or_ext . reg##_##name); \
701 if (is_tdp_mmu(vcpu->arch.mmu)) { in walk_shadow_page_lockless_begin()
720 if (is_tdp_mmu(vcpu->arch.mmu)) { in walk_shadow_page_lockless_end()
1906 int ret = vcpu->arch.mmu->sync_page(vcpu, sp); in kvm_sync_page()
2078 bool direct_mmu = vcpu->arch.mmu->direct_map; in kvm_mmu_get_page()
2086 role = vcpu->arch.mmu->mmu_role.base; in kvm_mmu_get_page()
2092 if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2178 iterator->level = vcpu->arch.mmu->shadow_root_level; in shadow_walk_init_using_root()
2181 vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && in shadow_walk_init_using_root()
2182 !vcpu->arch.mmu->direct_map) in shadow_walk_init_using_root()
2190 BUG_ON(root != vcpu->arch.mmu->root_hpa); in shadow_walk_init_using_root()
2193 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2204 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, in shadow_walk_init()
2558 if (vcpu->arch.mmu->direct_map) in kvm_mmu_unprotect_page_virt()
3156 if (is_tdp_mmu(vcpu->arch.mmu)) in fast_page_fault()
3260 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_mmu_free_roots() argument
3271 if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { in kvm_mmu_free_roots()
3274 VALID_PAGE(mmu->prev_roots[i].hpa)) in kvm_mmu_free_roots()
3285 mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, in kvm_mmu_free_roots()
3289 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && in kvm_mmu_free_roots()
3290 (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { in kvm_mmu_free_roots()
3291 mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); in kvm_mmu_free_roots()
3292 } else if (mmu->pae_root) { in kvm_mmu_free_roots()
3294 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i])) in kvm_mmu_free_roots()
3297 mmu_free_root_page(kvm, &mmu->pae_root[i], in kvm_mmu_free_roots()
3299 mmu->pae_root[i] = INVALID_PAE_ROOT; in kvm_mmu_free_roots()
3302 mmu->root_hpa = INVALID_PAGE; in kvm_mmu_free_roots()
3303 mmu->root_pgd = 0; in kvm_mmu_free_roots()
3311 void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in kvm_mmu_free_guest_mode_roots() argument
3321 WARN_ON_ONCE(mmu->mmu_role.base.guest_mode); in kvm_mmu_free_guest_mode_roots()
3324 root_hpa = mmu->prev_roots[i].hpa; in kvm_mmu_free_guest_mode_roots()
3333 kvm_mmu_free_roots(vcpu, mmu, roots_to_free); in kvm_mmu_free_guest_mode_roots()
3363 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_direct_roots() local
3364 u8 shadow_root_level = mmu->shadow_root_level; in mmu_alloc_direct_roots()
3376 mmu->root_hpa = root; in mmu_alloc_direct_roots()
3379 mmu->root_hpa = root; in mmu_alloc_direct_roots()
3381 if (WARN_ON_ONCE(!mmu->pae_root)) { in mmu_alloc_direct_roots()
3387 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); in mmu_alloc_direct_roots()
3391 mmu->pae_root[i] = root | PT_PRESENT_MASK | in mmu_alloc_direct_roots()
3394 mmu->root_hpa = __pa(mmu->pae_root); in mmu_alloc_direct_roots()
3402 mmu->root_pgd = 0; in mmu_alloc_direct_roots()
3471 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_shadow_roots() local
3478 root_pgd = mmu->get_guest_pgd(vcpu); in mmu_alloc_shadow_roots()
3488 if (mmu->root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3490 pdptrs[i] = mmu->get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3512 if (mmu->root_level >= PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3514 mmu->shadow_root_level, false); in mmu_alloc_shadow_roots()
3515 mmu->root_hpa = root; in mmu_alloc_shadow_roots()
3519 if (WARN_ON_ONCE(!mmu->pae_root)) { in mmu_alloc_shadow_roots()
3530 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3533 if (WARN_ON_ONCE(!mmu->pml4_root)) { in mmu_alloc_shadow_roots()
3537 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3539 if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) { in mmu_alloc_shadow_roots()
3540 if (WARN_ON_ONCE(!mmu->pml5_root)) { in mmu_alloc_shadow_roots()
3544 mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask; in mmu_alloc_shadow_roots()
3549 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); in mmu_alloc_shadow_roots()
3551 if (mmu->root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3553 mmu->pae_root[i] = INVALID_PAE_ROOT; in mmu_alloc_shadow_roots()
3561 mmu->pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3564 if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) in mmu_alloc_shadow_roots()
3565 mmu->root_hpa = __pa(mmu->pml5_root); in mmu_alloc_shadow_roots()
3566 else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) in mmu_alloc_shadow_roots()
3567 mmu->root_hpa = __pa(mmu->pml4_root); in mmu_alloc_shadow_roots()
3569 mmu->root_hpa = __pa(mmu->pae_root); in mmu_alloc_shadow_roots()
3572 mmu->root_pgd = root_pgd; in mmu_alloc_shadow_roots()
3581 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_special_roots() local
3582 bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL; in mmu_alloc_special_roots()
3593 if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL || in mmu_alloc_special_roots()
3594 mmu->shadow_root_level < PT64_ROOT_4LEVEL) in mmu_alloc_special_roots()
3604 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root)) in mmu_alloc_special_roots()
3611 if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root || in mmu_alloc_special_roots()
3612 (need_pml5 && mmu->pml5_root))) in mmu_alloc_special_roots()
3635 mmu->pae_root = pae_root; in mmu_alloc_special_roots()
3636 mmu->pml4_root = pml4_root; in mmu_alloc_special_roots()
3637 mmu->pml5_root = pml5_root; in mmu_alloc_special_roots()
3682 if (vcpu->arch.mmu->direct_map) in kvm_mmu_sync_roots()
3685 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) in kvm_mmu_sync_roots()
3690 if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { in kvm_mmu_sync_roots()
3691 hpa_t root = vcpu->arch.mmu->root_hpa; in kvm_mmu_sync_roots()
3711 hpa_t root = vcpu->arch.mmu->pae_root[i]; in kvm_mmu_sync_roots()
3730 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa)) in kvm_mmu_sync_prev_roots()
3734 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); in kvm_mmu_sync_prev_roots()
3804 if (is_tdp_mmu(vcpu->arch.mmu)) in get_mmio_spte()
3827 rsvd_check = &vcpu->arch.mmu->shadow_zero_check; in get_mmio_spte()
3915 arch.direct_map = vcpu->arch.mmu->direct_map; in kvm_arch_setup_async_pf()
3916 arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); in kvm_arch_setup_async_pf()
3990 struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root_hpa); in is_page_fault_stale()
4013 bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); in direct_page_fault()
4157 struct kvm_mmu *mmu = vcpu->arch.mmu; in cached_root_available() local
4159 root.pgd = mmu->root_pgd; in cached_root_available()
4160 root.hpa = mmu->root_hpa; in cached_root_available()
4166 swap(root, mmu->prev_roots[i]); in cached_root_available()
4172 mmu->root_hpa = root.hpa; in cached_root_available()
4173 mmu->root_pgd = root.pgd; in cached_root_available()
4181 struct kvm_mmu *mmu = vcpu->arch.mmu; in fast_pgd_switch() local
4188 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && in fast_pgd_switch()
4189 mmu->root_level >= PT64_ROOT_4LEVEL) in fast_pgd_switch()
4199 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); in __kvm_mmu_new_pgd()
4230 to_shadow_page(vcpu->arch.mmu->root_hpa)); in __kvm_mmu_new_pgd()
4534 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept) in update_permission_bitmask() argument
4542 bool cr4_smep = is_cr4_smep(mmu); in update_permission_bitmask()
4543 bool cr4_smap = is_cr4_smap(mmu); in update_permission_bitmask()
4544 bool cr0_wp = is_cr0_wp(mmu); in update_permission_bitmask()
4545 bool efer_nx = is_efer_nx(mmu); in update_permission_bitmask()
4547 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { in update_permission_bitmask()
4602 mmu->permissions[byte] = ff | uf | wf | smepf | smapf; in update_permission_bitmask()
4630 static void update_pkru_bitmask(struct kvm_mmu *mmu) in update_pkru_bitmask() argument
4635 mmu->pkru_mask = 0; in update_pkru_bitmask()
4637 if (!is_cr4_pke(mmu)) in update_pkru_bitmask()
4640 wp = is_cr0_wp(mmu); in update_pkru_bitmask()
4642 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { in update_pkru_bitmask()
4670 mmu->pkru_mask |= (pkey_bits & 3) << pfec; in update_pkru_bitmask()
4675 struct kvm_mmu *mmu) in reset_guest_paging_metadata() argument
4677 if (!is_cr0_pg(mmu)) in reset_guest_paging_metadata()
4680 reset_rsvds_bits_mask(vcpu, mmu); in reset_guest_paging_metadata()
4681 update_permission_bitmask(mmu, false); in reset_guest_paging_metadata()
4682 update_pkru_bitmask(mmu); in reset_guest_paging_metadata()
5093 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); in kvm_mmu_load()
5099 if (vcpu->arch.mmu->direct_map) in kvm_mmu_load()
5302 bool direct = vcpu->arch.mmu->direct_map; in kvm_mmu_page_fault()
5304 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) in kvm_mmu_page_fault()
5333 if (vcpu->arch.mmu->direct_map && in kvm_mmu_page_fault()
5358 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_mmu_invalidate_gva() argument
5364 if (mmu != &vcpu->arch.guest_mmu) { in kvm_mmu_invalidate_gva()
5372 if (!mmu->invlpg) in kvm_mmu_invalidate_gva()
5376 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invalidate_gva()
5390 if (VALID_PAGE(mmu->prev_roots[i].hpa)) in kvm_mmu_invalidate_gva()
5391 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invalidate_gva()
5393 mmu->invlpg(vcpu, gva, root_hpa); in kvm_mmu_invalidate_gva()
5407 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_invpcid_gva() local
5412 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invpcid_gva()
5417 if (VALID_PAGE(mmu->prev_roots[i].hpa) && in kvm_mmu_invpcid_gva()
5418 pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { in kvm_mmu_invpcid_gva()
5419 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invpcid_gva()
5511 static void free_mmu_pages(struct kvm_mmu *mmu) in free_mmu_pages() argument
5513 if (!tdp_enabled && mmu->pae_root) in free_mmu_pages()
5514 set_memory_encrypted((unsigned long)mmu->pae_root, 1); in free_mmu_pages()
5515 free_page((unsigned long)mmu->pae_root); in free_mmu_pages()
5516 free_page((unsigned long)mmu->pml4_root); in free_mmu_pages()
5517 free_page((unsigned long)mmu->pml5_root); in free_mmu_pages()
5520 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in __kvm_mmu_create() argument
5525 mmu->root_hpa = INVALID_PAGE; in __kvm_mmu_create()
5526 mmu->root_pgd = 0; in __kvm_mmu_create()
5527 mmu->translate_gpa = translate_gpa; in __kvm_mmu_create()
5529 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; in __kvm_mmu_create()
5549 mmu->pae_root = page_address(page); in __kvm_mmu_create()
5560 set_memory_decrypted((unsigned long)mmu->pae_root, 1); in __kvm_mmu_create()
5565 mmu->pae_root[i] = INVALID_PAE_ROOT; in __kvm_mmu_create()
5582 vcpu->arch.mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()