| /arch/x86/kernel/ |
| A D | paravirt.c | 206 .mmu.pmd_val = PTE_IDENT, 207 .mmu.make_pmd = PTE_IDENT, 209 .mmu.pud_val = PTE_IDENT, 210 .mmu.make_pud = PTE_IDENT, 214 .mmu.p4d_val = PTE_IDENT, 215 .mmu.make_p4d = PTE_IDENT, 219 .mmu.pte_val = PTE_IDENT, 220 .mmu.pgd_val = PTE_IDENT, 222 .mmu.make_pte = PTE_IDENT, 223 .mmu.make_pgd = PTE_IDENT, [all …]
|
| /arch/arm64/kvm/hyp/nvhe/ |
| A D | tlb.c | 14 struct kvm_s2_mmu *mmu; member 29 cxt->mmu = NULL; in enter_vmid_context() 62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context() 65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context() 68 if (mmu == host_s2_mmu) in enter_vmid_context() 71 cxt->mmu = host_s2_mmu; in enter_vmid_context() 113 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in enter_vmid_context() 120 struct kvm_s2_mmu *mmu = cxt->mmu; in exit_vmid_context() local 127 if (!mmu) in exit_vmid_context() 131 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in exit_vmid_context() [all …]
|
| A D | mem_protect.c | 145 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; in kvm_host_prepare_stage2() local 150 mmu->arch = &host_mmu.arch; in kvm_host_prepare_stage2() 163 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2() 164 atomic64_set(&mmu->vmid.id, 0); in kvm_host_prepare_stage2() 259 struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu; in kvm_guest_prepare_stage2() local 283 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL); in kvm_guest_prepare_stage2() 301 vm->kvm.arch.mmu.pgd_phys = 0ULL; in reclaim_pgtable_pages() 318 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; in __pkvm_prot_finalize() local 325 params->vtcr = mmu->vtcr; in __pkvm_prot_finalize() 1180 .mmu = { [all …]
|
| /arch/arm64/kvm/hyp/vhe/ |
| A D | tlb.c | 14 struct kvm_s2_mmu *mmu; member 29 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context() 31 cxt->mmu = NULL; in enter_vmid_context() 63 __load_stage2(mmu, mmu->arch); in enter_vmid_context() 80 if (cxt->mmu) in exit_vmid_context() 81 __load_stage2(cxt->mmu, cxt->mmu->arch); in exit_vmid_context() 100 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa() 132 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa_nsh() 172 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid_range() 244 if (mmu) in __kvm_tlbi_s1e2() [all …]
|
| /arch/arm64/include/asm/ |
| A D | kvm_mmu.h | 150 #define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr) 151 #define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu)) 152 #define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL)) 179 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); 305 struct kvm_vmid *vmid = &mmu->vmid; 309 baddr = mmu->pgd_phys; 322 write_sysreg(mmu->vtcr, vtcr_el2); 323 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); 335 return container_of(mmu->arch, struct kvm, arch); 346 return !(mmu->tlb_vttbr & VTTBR_CNP_BIT); [all …]
|
| A D | stage2_pgtable.h | 24 #define kvm_stage2_levels(mmu) VTCR_EL2_LVLS((mmu)->vtcr) argument 31 #define kvm_mmu_cache_min_pages(mmu) (kvm_stage2_levels(mmu) - 1) argument
|
| /arch/arc/mm/ |
| A D | tlb.c | 139 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all() 573 mmu->ver = (bcr >> 24); in arc_mmu_mumbojumbo() 578 mmu->sets = 1 << mmu3->sets; in arc_mmu_mumbojumbo() 592 mmu->pae = mmu4->pae; in arc_mmu_mumbojumbo() 595 if (mmu->s_pg_sz_m) in arc_mmu_mumbojumbo() 597 mmu->s_pg_sz_m, in arc_mmu_mumbojumbo() 602 mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS, in arc_mmu_mumbojumbo() 603 mmu->sets, mmu->ways, in arc_mmu_mumbojumbo() 682 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) argument 699 int set, n_ways = mmu->ways; in do_tlb_overlap_fault() [all …]
|
| /arch/x86/include/asm/ |
| A D | paravirt.h | 75 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local() 80 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global() 96 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap() 159 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2() 318 PVOP_VCALL1(mmu.enter_mmap, next); in paravirt_enter_mmap() 337 PVOP_VCALL1(mmu.release_pte, pfn); in paravirt_release_pte() 347 PVOP_VCALL1(mmu.release_pmd, pfn); in paravirt_release_pmd() 356 PVOP_VCALL1(mmu.release_pud, pfn); in paravirt_release_pud() 532 PVOP_VCALL0(mmu.lazy_mode.enter); in arch_enter_lazy_mmu_mode() 537 PVOP_VCALL0(mmu.lazy_mode.leave); in arch_leave_lazy_mmu_mode() [all …]
|
| /arch/um/kernel/skas/ |
| A D | mmu.c | 60 struct mm_context *mmu = &mm->context; in destroy_context() local 70 if (mmu->id.pid >= 0 && mmu->id.pid < 2) { in destroy_context() 72 mmu->id.pid); in destroy_context() 76 if (mmu->id.pid > 0) { in destroy_context() 77 os_kill_ptraced_process(mmu->id.pid, 1); in destroy_context() 78 mmu->id.pid = -1; in destroy_context() 81 if (using_seccomp && mmu->id.sock) in destroy_context() 82 os_close_file(mmu->id.sock); in destroy_context() 84 free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES)); in destroy_context()
|
| /arch/x86/kvm/mmu/ |
| A D | paging_tmpl.h | 31 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 45 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 58 #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled) argument 115 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME() 159 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && in FNAME() 198 struct kvm_mmu *mmu, in FNAME() 209 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME() 326 walker->level = mmu->cpu_role.base.level; in FNAME() 328 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); in FNAME() 478 if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu))) in FNAME() [all …]
|
| A D | mmu.c | 3862 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_direct_roots() local 3898 mmu->root.hpa = __pa(mmu->pae_root); in mmu_alloc_direct_roots() 4001 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_shadow_roots() local 4103 mmu->root.hpa = __pa(mmu->pml5_root); in mmu_alloc_shadow_roots() 4107 mmu->root.hpa = __pa(mmu->pae_root); in mmu_alloc_shadow_roots() 4119 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_special_roots() local 4143 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root)) in mmu_alloc_special_roots() 5044 swap(mmu->root, mmu->prev_roots[i]); in cached_root_find_and_keep_current() 5073 swap(mmu->root, mmu->prev_roots[i]); in cached_root_find_without_current() 5099 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_new_pgd() local [all …]
|
| A D | tdp_mmu.h | 52 return root_to_sp(vcpu->arch.mmu->mirror_root_hpa); in tdp_mmu_get_root_for_fault() 54 return root_to_sp(vcpu->arch.mmu->root.hpa); in tdp_mmu_get_root_for_fault() 61 return root_to_sp(vcpu->arch.mmu->mirror_root_hpa); in tdp_mmu_get_root() 63 return root_to_sp(vcpu->arch.mmu->root.hpa); in tdp_mmu_get_root()
|
| /arch/x86/kvm/ |
| A D | mmu.h | 96 struct kvm_mmu *mmu); 150 u64 root_hpa = vcpu->arch.mmu->root.hpa; in kvm_mmu_load_pgd() 156 vcpu->arch.mmu->root_role.level); in kvm_mmu_load_pgd() 160 struct kvm_mmu *mmu) in kvm_mmu_refresh_passthrough_bits() argument 171 if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) in kvm_mmu_refresh_passthrough_bits() 174 __kvm_mmu_refresh_passthrough_bits(vcpu, mmu); in kvm_mmu_refresh_passthrough_bits() 211 kvm_mmu_refresh_passthrough_bits(vcpu, mmu); in permission_fault() 216 if (unlikely(mmu->pkru_mask)) { in permission_fault() 230 pkru_bits &= mmu->pkru_mask >> offset; in permission_fault() 298 struct kvm_mmu *mmu, in kvm_translate_gpa() argument [all …]
|
| A D | Makefile | 9 debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o 11 kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
|
| /arch/arm64/kvm/ |
| A D | mmu.c | 150 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages() 936 if (mmu->pgt != NULL) { in kvm_init_stage2_mmu() 952 mmu->arch = &kvm->arch; in kvm_init_stage2_mmu() 957 mmu->pgt = pgt; in kvm_init_stage2_mmu() 961 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); in kvm_init_stage2_mmu() 1070 pgt = mmu->pgt; in kvm_free_stage2_pgd() 1072 mmu->pgd_phys = 0; in kvm_free_stage2_pgd() 1073 mmu->pgt = NULL; in kvm_free_stage2_pgd() 1145 struct kvm_s2_mmu *mmu = &kvm->arch.mmu; in kvm_phys_addr_ioremap() local 1804 struct kvm_s2_mmu *mmu; in handle_access_fault() local [all …]
|
| A D | nested.c | 606 tlbi_callback(mmu, info); in kvm_s2_mmu_iterate_by_vmid() 650 vtcr == mmu->tlb_vtcr) in lookup_s2_mmu() 651 return mmu; in lookup_s2_mmu() 656 return mmu; in lookup_s2_mmu() 1063 if (kvm_s2_mmu_valid(mmu)) in kvm_nested_s2_wp() 1064 kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu)); in kvm_nested_s2_wp() 1079 if (kvm_s2_mmu_valid(mmu)) in kvm_nested_s2_unmap() 1080 kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block); in kvm_nested_s2_unmap() 1096 kvm_stage2_flush_range(mmu, 0, kvm_phys_size(mmu)); in kvm_nested_s2_flush() 1742 if (mmu->pending_unmap) { in check_nested_vcpu_requests() [all …]
|
| A D | ptdump.c | 99 struct kvm_s2_mmu *mmu = &kvm->arch.mmu; in kvm_ptdump_parser_create() local 100 struct kvm_pgtable *pgtable = mmu->pgt; in kvm_ptdump_parser_create() 134 struct kvm_s2_mmu *mmu = &kvm->arch.mmu; in kvm_ptdump_guest_show() local 145 ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker); in kvm_ptdump_guest_show() 220 pgtable = kvm->arch.mmu.pgt; in kvm_pgtable_debugfs_open()
|
| A D | pkvm.c | 141 pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.mmu.vtcr); in __pkvm_create_hyp_vm() 288 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, in pkvm_pgtable_stage2_init() argument 292 pgt->mmu = mmu; in pkvm_pgtable_stage2_init() 299 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in __pkvm_pgtable_stage2_unmap() 328 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_map() 372 lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock); in pkvm_pgtable_stage2_unmap() 379 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_wrprotect() 397 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_flush() 410 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_test_clear_young()
|
| /arch/powerpc/kvm/ |
| A D | book3s_32_mmu.c | 399 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_32_init() local 401 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; in kvmppc_mmu_book3s_32_init() 402 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; in kvmppc_mmu_book3s_32_init() 403 mmu->xlate = kvmppc_mmu_book3s_32_xlate; in kvmppc_mmu_book3s_32_init() 404 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; in kvmppc_mmu_book3s_32_init() 409 mmu->slbmte = NULL; in kvmppc_mmu_book3s_32_init() 410 mmu->slbmfee = NULL; in kvmppc_mmu_book3s_32_init() 411 mmu->slbmfev = NULL; in kvmppc_mmu_book3s_32_init() 412 mmu->slbfee = NULL; in kvmppc_mmu_book3s_32_init() 413 mmu->slbie = NULL; in kvmppc_mmu_book3s_32_init() [all …]
|
| A D | book3s_64_mmu.c | 653 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_64_init() local 655 mmu->mfsrin = NULL; in kvmppc_mmu_book3s_64_init() 656 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; in kvmppc_mmu_book3s_64_init() 657 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; in kvmppc_mmu_book3s_64_init() 658 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; in kvmppc_mmu_book3s_64_init() 659 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; in kvmppc_mmu_book3s_64_init() 660 mmu->slbfee = kvmppc_mmu_book3s_64_slbfee; in kvmppc_mmu_book3s_64_init() 661 mmu->slbie = kvmppc_mmu_book3s_64_slbie; in kvmppc_mmu_book3s_64_init() 662 mmu->slbia = kvmppc_mmu_book3s_64_slbia; in kvmppc_mmu_book3s_64_init() 663 mmu->xlate = kvmppc_mmu_book3s_64_xlate; in kvmppc_mmu_book3s_64_init() [all …]
|
| A D | book3s_emulate.c | 317 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr() 329 if (vcpu->arch.mmu.mfsrin) { in kvmppc_core_emulate_op_pr() 337 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 342 vcpu->arch.mmu.mtsrin(vcpu, in kvmppc_core_emulate_op_pr() 385 if (!vcpu->arch.mmu.slbmte) in kvmppc_core_emulate_op_pr() 388 vcpu->arch.mmu.slbmte(vcpu, in kvmppc_core_emulate_op_pr() 393 if (!vcpu->arch.mmu.slbie) in kvmppc_core_emulate_op_pr() 396 vcpu->arch.mmu.slbie(vcpu, in kvmppc_core_emulate_op_pr() 400 if (!vcpu->arch.mmu.slbia) in kvmppc_core_emulate_op_pr() 403 vcpu->arch.mmu.slbia(vcpu); in kvmppc_core_emulate_op_pr() [all …]
|
| /arch/m68k/kernel/ |
| A D | setup_mm.c | 378 const char *cpu, *mmu, *fpu; in show_cpuinfo() local 427 mmu = "68851"; in show_cpuinfo() 429 mmu = "68030"; in show_cpuinfo() 431 mmu = "68040"; in show_cpuinfo() 433 mmu = "68060"; in show_cpuinfo() 435 mmu = "Sun-3"; in show_cpuinfo() 437 mmu = "Apollo"; in show_cpuinfo() 439 mmu = "ColdFire"; in show_cpuinfo() 441 mmu = "unknown"; in show_cpuinfo() 451 cpu, mmu, fpu, in show_cpuinfo()
|
| /arch/arm64/kvm/hyp/ |
| A D | pgtable.c | 529 pgt->mmu = NULL; in kvm_pgtable_hyp_init() 571 struct kvm_s2_mmu *mmu; member 772 struct kvm_s2_mmu *mmu) in stage2_try_break_pte() argument 836 struct kvm_s2_mmu *mmu, in stage2_unmap_put_pte() argument 1040 .mmu = pgt->mmu, in kvm_pgtable_stage2_map() 1069 .mmu = pgt->mmu, in kvm_pgtable_stage2_set_owner() 1093 struct kvm_s2_mmu *mmu = pgt->mmu; in stage2_unmap_walker() local 1351 .mmu = pgt->mmu, in kvm_pgtable_stage2_create_unlinked() 1423 struct kvm_s2_mmu *mmu; in stage2_split_walker() local 1507 u64 vtcr = mmu->vtcr; in __kvm_pgtable_stage2_init() [all …]
|
| /arch/riscv/boot/dts/sophgo/ |
| A D | sg2042-cpus.dtsi | 274 mmu-type = "riscv,sv39"; 301 mmu-type = "riscv,sv39"; 328 mmu-type = "riscv,sv39"; 355 mmu-type = "riscv,sv39"; 382 mmu-type = "riscv,sv39"; 409 mmu-type = "riscv,sv39"; 436 mmu-type = "riscv,sv39"; 463 mmu-type = "riscv,sv39"; 490 mmu-type = "riscv,sv39"; 517 mmu-type = "riscv,sv39"; [all …]
|
| /arch/sh/mm/ |
| A D | Makefile | 18 mmu-y := nommu.o extable_32.o 19 mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \ 22 obj-y += $(mmu-y)
|