| /drivers/iommu/amd/ |
| A D | io_pgtable.c | 131 *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root)); in increase_address_space() 133 pgtable->root = pte; in increase_address_space() 134 pgtable->mode += 1; in increase_address_space() 173 level = pgtable->mode - 1; in alloc_pte() 259 level = pgtable->mode - 1; in fetch_pte() 524 if (pgtable->mode == PAGE_MODE_NONE) in v1_free_pgtable() 531 free_sub_pt(pgtable->root, pgtable->mode, &freelist); in v1_free_pgtable() 539 pgtable->root = in v1_alloc_pgtable() 541 if (!pgtable->root) in v1_alloc_pgtable() 543 pgtable->mode = PAGE_MODE_3_LEVEL; in v1_alloc_pgtable() [all …]
|
| A D | io_pgtable_v2.c | 201 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)]; in fetch_pte() 237 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; in iommu_v2_map_pages() 254 pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd, in iommu_v2_map_pages() 290 struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; in iommu_v2_unmap_pages() 300 pte = fetch_pte(pgtable, iova, &unmap_size); in iommu_v2_unmap_pages() 319 pte = fetch_pte(pgtable, iova, &pte_pgsize); in iommu_v2_iova_to_phys() 336 if (!pgtable || !pgtable->pgd) in v2_free_pgtable() 340 free_pgtable(pgtable->pgd, get_pgtable_level()); in v2_free_pgtable() 341 pgtable->pgd = NULL; in v2_free_pgtable() 350 if (!pgtable->pgd) in v2_alloc_pgtable() [all …]
|
| A D | iommu.c | 2528 static inline u64 dma_max_address(enum protection_domain_mode pgtable) in dma_max_address() argument 2530 if (pgtable == PD_MODE_V1) in dma_max_address() 2560 enum protection_domain_mode pgtable) in do_iommu_domain_alloc() argument 2571 domain->pd_mode = pgtable; in do_iommu_domain_alloc() 2580 domain->domain.geometry.aperture_end = dma_max_address(pgtable); in do_iommu_domain_alloc()
|
| /drivers/accel/ivpu/ |
| A D | ivpu_mmu_context.c | 124 kfree(pgtable->pmd_ptrs[pgd_idx]); in ivpu_mmu_pgtables_free() 125 kfree(pgtable->pte_ptrs[pgd_idx]); in ivpu_mmu_pgtables_free() 129 ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); in ivpu_mmu_pgtables_free() 130 pgtable->pgd_dma_ptr = NULL; in ivpu_mmu_pgtables_free() 131 pgtable->pgd_dma = 0; in ivpu_mmu_pgtables_free() 137 u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr; in ivpu_mmu_ensure_pgd() 147 pgtable->pgd_dma_ptr = pgd_dma_ptr; in ivpu_mmu_ensure_pgd() 148 pgtable->pgd_dma = pgd_dma; in ivpu_mmu_ensure_pgd() 168 if (!pgtable->pmd_ptrs[pgd_idx]) in ivpu_mmu_ensure_pud() 173 if (!pgtable->pte_ptrs[pgd_idx]) in ivpu_mmu_ensure_pud() [all …]
|
| A D | ivpu_mmu.h | 43 int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
|
| A D | ivpu_mmu_context.h | 28 struct ivpu_mmu_pgtable pgtable; member
|
| A D | ivpu_mmu.c | 1021 int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable) in ivpu_mmu_cd_set() argument 1023 return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true); in ivpu_mmu_cd_set()
|
| /drivers/gpu/drm/etnaviv/ |
| A D | etnaviv_iommu.c | 93 u32 pgtable; in etnaviv_iommuv1_restore() local 107 pgtable = (u32)v1_context->pgtable_dma; in etnaviv_iommuv1_restore() 109 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); in etnaviv_iommuv1_restore() 110 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); in etnaviv_iommuv1_restore() 111 gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable); in etnaviv_iommuv1_restore() 112 gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable); in etnaviv_iommuv1_restore() 113 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); in etnaviv_iommuv1_restore()
|
| /drivers/iommu/ |
| A D | exynos-iommu.c | 183 return pgtable + lv1ent_offset(iova); in section_entry() 907 if (!domain->pgtable) in exynos_iommu_domain_alloc_paging() 916 domain->pgtable[i] = ZERO_LV2LINK; in exynos_iommu_domain_alloc_paging() 940 iommu_free_pages(domain->pgtable); in exynos_iommu_domain_alloc_paging() 960 data->pgtable = 0; in exynos_iommu_domain_free() 972 if (lv1ent_page(domain->pgtable + i)) { in exynos_iommu_domain_free() 981 iommu_free_pages(domain->pgtable); in exynos_iommu_domain_free() 1013 data->pgtable = 0; in exynos_iommu_identity_attach() 1056 data->pgtable = pagetable; in exynos_iommu_attach_device() 1245 BUG_ON(domain->pgtable == NULL); in exynos_iommu_map() [all …]
|
| A D | Makefile | 12 obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o 13 obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o 14 obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o 15 obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
|
| A D | omap-iommu.c | 1405 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC); in omap_iommu_attach_init() 1406 if (!iommu->pgtable) in omap_iommu_attach_init() 1413 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable, in omap_iommu_attach_init() 1427 kfree(iommu->pgtable); in omap_iommu_detach_fini() 1469 ret = omap_iommu_attach(oiommu, iommu->pgtable); in omap_iommu_attach_dev()
|
| A D | omap-iommu.h | 34 u32 *pgtable; member
|
| /drivers/iommu/arm/arm-smmu/ |
| A D | arm-smmu-qcom.c | 216 struct io_pgtable *pgtable = in qcom_adreno_smmu_get_ttbr1_cfg() local 218 return &pgtable->cfg; in qcom_adreno_smmu_get_ttbr1_cfg() 231 struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); in qcom_adreno_smmu_set_ttbr0_cfg() local 246 cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg); in qcom_adreno_smmu_set_ttbr0_cfg()
|
| A D | qcom_iommu.c | 475 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, in qcom_iommu_flush_iotlb_all() local 481 qcom_iommu_tlb_sync(pgtable->cookie); in qcom_iommu_flush_iotlb_all()
|
| /drivers/iommu/intel/ |
| A D | iommu.c | 627 struct dma_pte *pgtable; in dmar_fault_dump_ptes() local 661 pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes() 699 pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK); in dmar_fault_dump_ptes() 702 pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK); in dmar_fault_dump_ptes() 706 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level); in dmar_fault_dump_ptes()
|