| /arch/arm64/kvm/hyp/ |
| A D | pgtable.c | 254 if (!pgt->pgd) in _kvm_pgtable_walk() 523 if (!pgt->pgd) in kvm_pgtable_hyp_init() 529 pgt->mmu = NULL; in kvm_pgtable_hyp_init() 558 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_hyp_destroy() 559 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd)); in kvm_pgtable_hyp_destroy() 560 pgt->pgd = NULL; in kvm_pgtable_hyp_destroy() 901 struct kvm_pgtable *pgt = data->mmu->pgt; in stage2_map_walker_try_leaf() local 1137 .arg = pgt, in kvm_pgtable_stage2_unmap() 1335 .arg = pgt, in kvm_pgtable_stage2_flush() 1514 if (!pgt->pgd) in __kvm_pgtable_stage2_init() [all …]
|
| /arch/arm64/kvm/ |
| A D | pkvm.c | 291 pgt->pkvm_mappings = RB_ROOT_CACHED; in pkvm_pgtable_stage2_init() 292 pgt->mmu = mmu; in pkvm_pgtable_stage2_init() 299 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in __pkvm_pgtable_stage2_unmap() 312 pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); in __pkvm_pgtable_stage2_unmap() 319 void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) in pkvm_pgtable_stage2_destroy() argument 321 __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL)); in pkvm_pgtable_stage2_destroy() 328 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_map() 365 pkvm_mapping_insert(mapping, &pgt->pkvm_mappings); in pkvm_pgtable_stage2_map() 379 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_wrprotect() 397 struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); in pkvm_pgtable_stage2_flush() [all …]
|
| A D | mmu.c | 71 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() local 72 if (!pgt) in stage2_apply_range() 150 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages() 151 if (!pgt) in kvm_mmu_split_huge_pages() 948 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); in kvm_init_stage2_mmu() 949 if (!pgt) in kvm_init_stage2_mmu() 957 mmu->pgt = pgt; in kvm_init_stage2_mmu() 1070 pgt = mmu->pgt; in kvm_free_stage2_pgd() 1071 if (pgt) { in kvm_free_stage2_pgd() 1146 struct kvm_pgtable *pgt = mmu->pgt; in kvm_phys_addr_ioremap() local [all …]
|
| A D | ptdump.c | 100 struct kvm_pgtable *pgtable = mmu->pgt; in kvm_ptdump_parser_create() 145 ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker); in kvm_ptdump_guest_show() 220 pgtable = kvm->arch.mmu.pgt; in kvm_pgtable_debugfs_open()
|
| A D | nested.c | 106 kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i]; in kvm_vcpu_init_nested() 516 if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL)) in get_guest_mapping_ttl() 1067 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); in kvm_nested_s2_wp() 1083 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); in kvm_nested_s2_unmap()
|
| /arch/arm64/include/asm/ |
| A D | kvm_pkvm.h | 180 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 182 void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 183 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 186 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 187 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); 188 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); 189 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold); 190 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot, 192 void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, 194 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, [all …]
|
| A D | kvm_pgtable.h | 444 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 454 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 497 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 542 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL); in kvm_pgtable_stage2_init() 552 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 586 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, 620 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 692 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, 735 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 794 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, [all …]
|
| A D | kvm_host.h | 166 struct kvm_pgtable *pgt; member
|
| /arch/arm64/kvm/hyp/nvhe/ |
| A D | mem_protect.c | 156 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, in kvm_host_prepare_stage2() 162 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2() 163 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2() 288 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2() 300 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_pgtable_pages() 355 struct kvm_pgtable *pgt = &host_mmu.pgt; in host_stage2_unmap_dev_all() local 367 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all() 642 return kvm_pgtable_walk(pgt, addr, size, &walker); in check_page_state_range() 1161 kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); in __pkvm_host_mkyoung_guest() 1182 .pgt = &selftest_vm.pgt, [all …]
|
| A D | pkvm.c | 527 mmu->pgt = &hyp_vm->pgt; in insert_vm_table_entry()
|
| /arch/s390/kvm/ |
| A D | gaccess.c | 1215 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables() argument 1271 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables() 1299 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables() 1328 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables() 1366 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables() 1393 *pgt = ptr; in kvm_s390_shadow_tables() 1424 *pgt = pt_index & ~GMAP_SHADOW_FAKE_TABLE; in shadow_pgt_lookup() 1454 unsigned long pgt = 0; in kvm_s390_shadow_fault() local 1476 pte.val = pgt + vaddr.px * PAGE_SIZE; in kvm_s390_shadow_fault() 1485 pgt |= PEI_NOT_PTE; in kvm_s390_shadow_fault() [all …]
|
| /arch/x86/kernel/acpi/ |
| A D | madt_wakeup.c | 68 static void __init free_pgt_page(void *pgt, void *dummy) in free_pgt_page() argument 70 return memblock_free(pgt, PAGE_SIZE); in free_pgt_page()
|
| /arch/s390/mm/ |
| A D | gmap.c | 1180 unsigned long *pgt) in __gmap_unshadow_pgt() argument 1186 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt() 1199 phys_addr_t sto, pgt; in gmap_unshadow_pgt() local 1209 pgt = *ste & _SEGMENT_ENTRY_ORIGIN; in gmap_unshadow_pgt() 1211 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in gmap_unshadow_pgt() 1213 ptdesc = page_ptdesc(phys_to_page(pgt)); in gmap_unshadow_pgt() 1229 phys_addr_t pgt; in __gmap_unshadow_sgt() local 1236 pgt = sgt[i] & _REGION_ENTRY_ORIGIN; in __gmap_unshadow_sgt() 1238 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in __gmap_unshadow_sgt() 1240 ptdesc = page_ptdesc(phys_to_page(pgt)); in __gmap_unshadow_sgt() [all …]
|
| /arch/arm64/kvm/hyp/include/nvhe/ |
| A D | pkvm.h | 41 struct kvm_pgtable pgt; member
|
| A D | mem_protect.h | 20 struct kvm_pgtable pgt; member
|
| /arch/powerpc/kvm/ |
| A D | book3s_64_mmu_radix.c | 1288 pgd_t *pgt; in debugfs_radix_read() local 1326 pgt = NULL; in debugfs_radix_read() 1330 pgt = NULL; in debugfs_radix_read() 1340 if (!pgt) { in debugfs_radix_read() 1342 pgt = kvm->arch.pgtable; in debugfs_radix_read() 1349 pgt = nested->shadow_pgtable; in debugfs_radix_read() 1358 "pgdir: %lx\n", (unsigned long)pgt); in debugfs_radix_read() 1363 pgdp = pgt + pgd_index(gpa); in debugfs_radix_read()
|
| /arch/s390/include/asm/ |
| A D | gmap.h | 125 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
|
| A D | pgtable.h | 2044 static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt) in gmap_pgste_get_pgt_addr() argument 2048 pgstes = pgt + _PAGE_ENTRIES; in gmap_pgste_get_pgt_addr()
|
| /arch/x86/events/intel/ |
| A D | uncore_nhmex.c | 879 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
|