| /drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| A D | vmmnv44.c | 38 while (ptes--) { in nv44_vmm_pgt_fill() 84 ptes -= pten; in nv44_vmm_pgt_pte() 94 ptes -= 4; in nv44_vmm_pgt_pte() 97 if (ptes) { in nv44_vmm_pgt_pte() 121 ptes -= pten; in nv44_vmm_pgt_dma() 133 ptes -= 4; in nv44_vmm_pgt_dma() 136 if (ptes) { in nv44_vmm_pgt_dma() 138 map->dma += ptes; in nv44_vmm_pgt_dma() 155 ptes -= pten; in nv44_vmm_pgt_unmap() 163 ptes -= 4; in nv44_vmm_pgt_unmap() [all …]
|
| A D | vmmgp100.c | 41 while (ptes--) { in gp100_vmm_pfn_unmap() 60 while (ptes--) { in gp100_vmm_pfn_clear() 82 for (; ptes; ptes--, map->pfn++) { in gp100_vmm_pgt_pfn() 120 map->type += ptes * map->ctag; in gp100_vmm_pgt_pte() 122 while (ptes--) { in gp100_vmm_pgt_pte() 142 while (ptes--) { in gp100_vmm_pgt_dma() 203 map->type += ptes * map->ctag; in gp100_vmm_pd0_pte() 205 while (ptes--) { in gp100_vmm_pd0_pte() 275 while (ptes--) { in gp100_vmm_pd0_pfn_unmap() 296 while (ptes--) { in gp100_vmm_pd0_pfn_clear() [all …]
|
| A D | vmmgh100.c | 18 while (ptes--) { in gh100_vmm_pgt_pte() 28 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); in gh100_vmm_pgt_sgl() 36 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in gh100_vmm_pgt_dma() 39 while (ptes--) { in gh100_vmm_pgt_dma() 48 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); in gh100_vmm_pgt_dma() 60 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gh100_vmm_pgt_sparse() argument 64 VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes); in gh100_vmm_pgt_sparse() 78 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gh100_vmm_lpt_invalid() argument 95 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in gh100_vmm_pd0_pte() argument 99 while (ptes--) { in gh100_vmm_pd0_pte() [all …]
|
| A D | vmmnv41.c | 28 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv41_vmm_pgt_pte() argument 31 while (ptes--) { in nv41_vmm_pgt_pte() 39 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv41_vmm_pgt_sgl() argument 41 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); in nv41_vmm_pgt_sgl() 46 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv41_vmm_pgt_dma() argument 50 while (ptes--) { in nv41_vmm_pgt_dma() 56 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); in nv41_vmm_pgt_dma() 62 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in nv41_vmm_pgt_unmap() argument 64 VMM_FO032(pt, vmm, ptei * 4, 0, ptes); in nv41_vmm_pgt_unmap()
|
| A D | vmmnv04.c | 29 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in nv04_vmm_pgt_pte() argument 32 while (ptes--) { in nv04_vmm_pgt_pte() 40 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv04_vmm_pgt_sgl() argument 42 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); in nv04_vmm_pgt_sgl() 47 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv04_vmm_pgt_dma() argument 51 while (ptes--) in nv04_vmm_pgt_dma() 55 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); in nv04_vmm_pgt_dma() 61 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in nv04_vmm_pgt_unmap() argument 63 VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes); in nv04_vmm_pgt_unmap()
|
| A D | vmmnv50.c | 39 map->type += ptes * map->ctag; in nv50_vmm_pgt_pte() 41 while (ptes) { in nv50_vmm_pgt_pte() 44 if (ptes >= pten && IS_ALIGNED(ptei, pten)) in nv50_vmm_pgt_pte() 50 ptes -= pten; in nv50_vmm_pgt_pte() 59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv50_vmm_pgt_sgl() argument 66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv50_vmm_pgt_dma() argument 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in nv50_vmm_pgt_dma() 71 while (ptes--) { in nv50_vmm_pgt_dma() 85 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in nv50_vmm_pgt_mem() argument 92 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in nv50_vmm_pgt_unmap() argument [all …]
|
| A D | vmmgf100.c | 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) in gf100_vmm_pgt_pte() argument 39 while (ptes--) { in gf100_vmm_pgt_pte() 48 map->type += ptes * map->ctag; in gf100_vmm_pgt_pte() 50 while (ptes--) { in gf100_vmm_pgt_pte() 59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gf100_vmm_pgt_sgl() argument 66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gf100_vmm_pgt_dma() argument 69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in gf100_vmm_pgt_dma() 71 while (ptes--) { in gf100_vmm_pgt_dma() 85 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) in gf100_vmm_pgt_mem() argument 92 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gf100_vmm_pgt_unmap() argument [all …]
|
| A D | vmm.c | 212 ptes -= pten; in nvkm_vmm_unref_sptes() 222 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_unref_sptes() 236 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_unref_sptes() 311 ptes -= pten; in nvkm_vmm_ref_sptes() 321 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_ref_sptes() 335 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { in nvkm_vmm_ref_sptes() 385 while (ptes--) in nvkm_vmm_sparse_ptes() 423 u32 pteb, ptei, ptes; in nvkm_vmm_ref_hwpt() local 449 for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) { in nvkm_vmm_ref_hwpt() 463 while (ptes--) in nvkm_vmm_ref_hwpt() [all …]
|
| A D | vmmgk104.c | 26 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gk104_vmm_lpt_invalid() argument 29 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes); in gk104_vmm_lpt_invalid()
|
| A D | vmmgm200.c | 29 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) in gm200_vmm_pgt_sparse() argument 32 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes); in gm200_vmm_pgt_sparse()
|
| A D | vmm.h | 54 struct nvkm_mmu_pt *, u32 ptei, u32 ptes); 58 u32 ptei, u32 ptes, struct nvkm_vmm_map *); 72 bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
|
| /drivers/gpu/drm/xe/ |
| A D | xe_migrate.c | 567 u32 ptes; in emit_pte() local 578 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); in emit_pte() 580 while (ptes) { in emit_pte() 589 ptes -= chunk; in emit_pte() 1358 u32 ptes, ofs; in __xe_migrate_update_pgtables() local 1387 ptes = num_updates; in __xe_migrate_update_pgtables() 1389 while (ptes) { in __xe_migrate_update_pgtables() 1426 ptes -= chunk; in __xe_migrate_update_pgtables() 1577 u32 ptes; in build_pt_update_batch_sram() local 1581 while (ptes) { in build_pt_update_batch_sram() [all …]
|
| A D | xe_pt.c | 970 struct xe_pt_entry *ptes = update->pt_entries; in xe_vm_populate_pgtable() local 977 sizeof(u64), u64, ptes[i].pte); in xe_vm_populate_pgtable() 979 ptr[i] = ptes[i].pte; in xe_vm_populate_pgtable()
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ |
| A D | gsp.c | 1142 gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; in r535_gsp_shared_init() 1143 gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); in r535_gsp_shared_init() 1144 gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); in r535_gsp_shared_init() 1146 ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + in r535_gsp_shared_init() 1153 gsp->shm.ptes.ptr = gsp->shm.mem.data; in r535_gsp_shared_init() 1154 gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; in r535_gsp_shared_init() 1157 for (i = 0; i < gsp->shm.ptes.nr; i++) in r535_gsp_shared_init() 1158 gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); in r535_gsp_shared_init() 1187 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; in r535_gsp_set_rmargs() 1460 static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) in create_pte_array() argument [all …]
|
| /drivers/gpu/drm/msm/ |
| A D | msm_iommu.c | 271 msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]) in msm_iommu_pagetable_walk() 286 for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++) in msm_iommu_pagetable_walk() 287 ptes[i] = wd.ptes[i]; in msm_iommu_pagetable_walk()
|
| A D | msm_mmu.h | 92 int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
|
| A D | msm_gpu.h | 109 u64 ptes[4]; member
|
| A D | msm_gpu.c | 390 msm_iommu_pagetable_walk(mmu, info->iova, info->ptes); in msm_gpu_crashstate_capture()
|
| /drivers/gpu/drm/gma500/ |
| A D | mmu.c | 269 uint32_t *ptes; in psb_mmu_alloc_pt() local 285 ptes = (uint32_t *) v; in psb_mmu_alloc_pt() 287 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
|
| /drivers/gpu/drm/msm/adreno/ |
| A D | adreno_gpu.c | 974 info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]); in adreno_show()
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ |
| A D | gsp.c | 189 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; in r570_gsp_set_rmargs()
|
| /drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
| A D | gsp.h | 158 } ptes; member
|
| /drivers/iommu/ |
| A D | Kconfig | 261 depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
|
| A D | io-pgtable-arm.c | 761 data->ptes[lvl] = *ptep; in visit_pgtable_walk()
|