| /linux/drivers/net/ethernet/mellanox/mlxsw/ |
| A D | spectrum_pgt.c | 32 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 42 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc() 52 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free() 63 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc_range() 313 struct mlxsw_sp_pgt *pgt; in mlxsw_sp_pgt_init() local 318 pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL); in mlxsw_sp_pgt_init() 319 if (!pgt) in mlxsw_sp_pgt_init() 322 idr_init(&pgt->pgt_idr); in mlxsw_sp_pgt_init() 324 mutex_init(&pgt->lock); in mlxsw_sp_pgt_init() 326 mlxsw_sp->pgt = pgt; in mlxsw_sp_pgt_init() [all …]
|
| /linux/arch/arm64/kvm/hyp/ |
| A D | pgtable.c | 268 if (!pgt->pgd) in _kvm_pgtable_walk() 537 if (!pgt->pgd) in kvm_pgtable_hyp_init() 543 pgt->mmu = NULL; in kvm_pgtable_hyp_init() 572 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_hyp_destroy() 573 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd)); in kvm_pgtable_hyp_destroy() 574 pgt->pgd = NULL; in kvm_pgtable_hyp_destroy() 920 struct kvm_pgtable *pgt = data->mmu->pgt; in stage2_map_walker_try_leaf() local 1156 .arg = pgt, in kvm_pgtable_stage2_unmap() 1360 .arg = pgt, in kvm_pgtable_stage2_flush() 1539 if (!pgt->pgd) in __kvm_pgtable_stage2_init() [all …]
|
| /linux/arch/arm64/include/asm/ |
| A D | kvm_pgtable.h | 434 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 444 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 487 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 529 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ argument 530 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL) 539 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 573 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, 607 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 721 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 779 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, [all …]
|
| A D | kvm_host.h | 162 struct kvm_pgtable *pgt; member
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| A D | vmm.c | 31 if (pgt) { in nvkm_vmm_pt_del() 32 kvfree(pgt->pde); in nvkm_vmm_pt_del() 33 kfree(pgt); in nvkm_vmm_pt_del() 56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL))) in nvkm_vmm_pt_new() 62 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL); in nvkm_vmm_pt_new() 63 if (!pgt->pde) { in nvkm_vmm_pt_new() 64 kfree(pgt); in nvkm_vmm_pt_new() 69 return pgt; in nvkm_vmm_pt_new() 216 if (!pgt->refs[0]) in nvkm_vmm_unref_sptes() 280 if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1])) in nvkm_vmm_unref_ptes() [all …]
|
| A D | vmmgp100.c | 238 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd0_pde() local 242 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) in gp100_vmm_pd0_pde() 244 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) in gp100_vmm_pd0_pde() 365 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd1_pde() local 369 if (!gp100_vmm_pde(pgt->pt[0], &data)) in gp100_vmm_pd1_pde()
|
| A D | vmmnv50.c | 106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument 110 if (pgt && (pt = pgt->pt[0])) { in nv50_vmm_pde() 111 switch (pgt->page) { in nv50_vmm_pde()
|
| A D | vmmgf100.c | 108 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gf100_vmm_pgd_pde() local 113 if ((pt = pgt->pt[0])) { in gf100_vmm_pgd_pde() 127 if ((pt = pgt->pt[1])) { in gf100_vmm_pgd_pde()
|
| /linux/arch/arm64/kvm/ |
| A D | mmu.c | 66 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() local 67 if (!pgt) in stage2_apply_range() 145 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages() 146 if (!pgt) in kvm_mmu_split_huge_pages() 940 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); in kvm_init_stage2_mmu() 941 if (!pgt) in kvm_init_stage2_mmu() 962 mmu->pgt = pgt; in kvm_init_stage2_mmu() 1059 pgt = mmu->pgt; in kvm_free_stage2_pgd() 1060 if (pgt) { in kvm_free_stage2_pgd() 1115 struct kvm_pgtable *pgt = mmu->pgt; in kvm_phys_addr_ioremap() local [all …]
|
| A D | ptdump.c | 100 struct kvm_pgtable *pgtable = mmu->pgt; in kvm_ptdump_parser_create() 145 ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker); in kvm_ptdump_guest_show() 220 pgtable = kvm->arch.mmu.pgt; in kvm_pgtable_debugfs_open()
|
| A D | nested.c | 76 tmp[i].pgt->mmu = &tmp[i]; in kvm_vcpu_init_nested() 460 if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL)) in get_guest_mapping_ttl()
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
| A D | usernv04.c | 52 struct nvkm_memory *pgt = in nv04_dmaobj_bind() local 55 return nvkm_gpuobj_wrap(pgt, pgpuobj); in nv04_dmaobj_bind() 56 nvkm_kmap(pgt); in nv04_dmaobj_bind() 57 offset = nvkm_ro32(pgt, 8 + (offset >> 10)); in nv04_dmaobj_bind() 59 nvkm_done(pgt); in nv04_dmaobj_bind()
|
| /linux/drivers/firmware/efi/libstub/ |
| A D | x86-5lvl.c | 68 u64 *pgt = (void *)la57_toggle + PAGE_SIZE; in efi_5level_switch() local 81 new_cr3 = memset(pgt, 0, PAGE_SIZE); in efi_5level_switch() 89 new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE); in efi_5level_switch()
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| A D | mem_protect.c | 151 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, in kvm_host_prepare_stage2() 157 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2() 158 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2() 258 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, in kvm_guest_prepare_stage2() 264 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2() 275 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_guest_pages() 328 struct kvm_pgtable *pgt = &host_mmu.pgt; in host_stage2_unmap_dev_all() local 336 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); in host_stage2_unmap_dev_all() 340 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all() 604 return kvm_pgtable_walk(pgt, addr, size, &walker); in check_page_state_range() [all …]
|
| A D | pkvm.c | 388 mmu->pgt = &hyp_vm->pgt; in insert_vm_table_entry()
|
| /linux/arch/s390/kvm/ |
| A D | gaccess.c | 1213 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables() argument 1269 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables() 1297 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables() 1326 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables() 1364 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables() 1391 *pgt = ptr; in kvm_s390_shadow_tables() 1414 unsigned long pgt = 0; in kvm_s390_shadow_fault() local 1433 pte.val = pgt + vaddr.px * PAGE_SIZE; in kvm_s390_shadow_fault() 1442 pgt |= PEI_NOT_PTE; in kvm_s390_shadow_fault() 1445 pgt += vaddr.px * 8; in kvm_s390_shadow_fault() [all …]
|
| /linux/arch/s390/include/asm/ |
| A D | gmap.h | 135 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, 138 unsigned long *pgt, int *dat_protection, int *fake);
|
| /linux/arch/s390/mm/ |
| A D | gmap.c | 1333 unsigned long *pgt) in __gmap_unshadow_pgt() argument 1339 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt() 1352 phys_addr_t sto, pgt; in gmap_unshadow_pgt() local 1362 pgt = *ste & _SEGMENT_ENTRY_ORIGIN; in gmap_unshadow_pgt() 1364 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in gmap_unshadow_pgt() 1366 ptdesc = page_ptdesc(phys_to_page(pgt)); in gmap_unshadow_pgt() 1383 phys_addr_t pgt; in __gmap_unshadow_sgt() local 1390 pgt = sgt[i] & _REGION_ENTRY_ORIGIN; in __gmap_unshadow_sgt() 1392 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in __gmap_unshadow_sgt() 1394 ptdesc = page_ptdesc(phys_to_page(pgt)); in __gmap_unshadow_sgt() [all …]
|
| /linux/arch/x86/kernel/acpi/ |
| A D | madt_wakeup.c | 68 static void __init free_pgt_page(void *pgt, void *dummy) in free_pgt_page() argument 70 return memblock_free(pgt, PAGE_SIZE); in free_pgt_page()
|
| /linux/arch/arm64/kvm/hyp/include/nvhe/ |
| A D | pkvm.h | 35 struct kvm_pgtable pgt; member
|
| A D | mem_protect.h | 50 struct kvm_pgtable pgt; member
|
| /linux/arch/powerpc/kvm/ |
| A D | book3s_64_mmu_radix.c | 1309 pgd_t *pgt; in debugfs_radix_read() local 1347 pgt = NULL; in debugfs_radix_read() 1351 pgt = NULL; in debugfs_radix_read() 1361 if (!pgt) { in debugfs_radix_read() 1363 pgt = kvm->arch.pgtable; in debugfs_radix_read() 1370 pgt = nested->shadow_pgtable; in debugfs_radix_read() 1379 "pgdir: %lx\n", (unsigned long)pgt); in debugfs_radix_read() 1384 pgdp = pgt + pgd_index(gpa); in debugfs_radix_read()
|
| /linux/arch/x86/events/intel/ |
| A D | uncore_nhmex.c | 878 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
|
| /linux/drivers/accel/habanalabs/common/mmu/ |
| A D | mmu.c | 953 u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, in hl_mmu_hr_pte_phys_to_virt() argument 959 return pgt->virt_addr + pte_offset; in hl_mmu_hr_pte_phys_to_virt()
|
| /linux/drivers/gpu/drm/panthor/ |
| A D | panthor_mmu.c | 831 const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); in panthor_vm_page_size() local 832 u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; in panthor_vm_page_size()
|