| /linux/drivers/media/platform/nvidia/tegra-vde/ |
| A D | iommu.c | 24 struct iova *iova; in tegra_vde_iommu_map() local 34 if (!iova) in tegra_vde_iommu_map() 37 addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_map() 42 __free_iova(&vde->iova, iova); in tegra_vde_iommu_map() 46 *iovap = iova; in tegra_vde_iommu_map() 55 dma_addr_t addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_unmap() 58 __free_iova(&vde->iova, iova); in tegra_vde_iommu_unmap() 64 struct iova *iova; in tegra_vde_iommu_init() local 103 iova = reserve_iova(&vde->iova, 0x60000000 >> shift, in tegra_vde_iommu_init() 105 if (!iova) { in tegra_vde_iommu_init() [all …]
|
| A D | dmabuf-cache.c | 28 struct iova *iova; member 39 tegra_vde_iommu_unmap(entry->vde, entry->iova); in tegra_vde_release_entry() 73 struct iova *iova; in tegra_vde_dmabuf_cache_map() local 91 *addrp = iova_dma_addr(&vde->iova, entry->iova); in tegra_vde_dmabuf_cache_map() 125 err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size); in tegra_vde_dmabuf_cache_map() 129 *addrp = iova_dma_addr(&vde->iova, iova); in tegra_vde_dmabuf_cache_map() 132 iova = NULL; in tegra_vde_dmabuf_cache_map() 139 entry->iova = iova; in tegra_vde_dmabuf_cache_map()
|
| /linux/include/linux/ |
| A D | iova.h | 18 struct iova { struct 37 struct iova anchor; /* rbtree lookup anchor */ 43 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument 45 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size() 60 return iova & iova_mask(iovad); in iova_offset() 73 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument 75 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); in iova_dma_addr() 80 return iova >> iova_shift(iovad); in iova_pfn() 90 void __free_iova(struct iova_domain *iovad, struct iova *iova); 119 static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument [all …]
|
| /linux/drivers/iommu/ |
| A D | iova.c | 233 static void free_iova_mem(struct iova *iova) in free_iova_mem() argument 282 struct iova *iova = to_iova(node); in private_find_iova() local 312 struct iova *iova; in find_iova() local 351 struct iova *iova; in free_iova() local 447 struct iova *iova, *tmp; in put_iova_domain() local 461 struct iova *iova = to_iova(node); in __is_range_overlap() local 471 struct iova *iova; in alloc_and_init_iova() local 486 struct iova *iova; in __insert_new_range() local 489 if (iova) in __insert_new_range() 496 __adjust_overlap_range(struct iova *iova, in __adjust_overlap_range() argument [all …]
|
| A D | io-pgtable-arm-v7s.c | 564 iova += pgsize; in arm_v7s_map_pages() 753 iova += pgsize; in arm_v7s_unmap_pages() 969 iova = 0; in arm_v7s_do_selftests() 972 if (ops->map_pages(ops, iova, iova, size, 1, in arm_v7s_do_selftests() 979 if (!ops->map_pages(ops, iova, iova + size, size, 1, in arm_v7s_do_selftests() 984 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_v7s_do_selftests() 987 iova += SZ_16M; in arm_v7s_do_selftests() 1011 iova = 0; in arm_v7s_do_selftests() 1022 if (ops->map_pages(ops, iova, iova, size, 1, IOMMU_WRITE, in arm_v7s_do_selftests() 1026 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) in arm_v7s_do_selftests() [all …]
|
| A D | io-pgtable-dart.c | 119 unsigned long iova, phys_addr_t paddr, in dart_init_pte() argument 191 int tbl = dart_get_table(data, iova); in dart_get_l2() 197 ptep += dart_get_l1_index(data, iova); in dart_get_l2() 251 tbl = dart_get_table(data, iova); in dart_map_pages() 254 ptep += dart_get_l1_index(data, iova); in dart_map_pages() 304 ptep = dart_get_l2(data, iova); in dart_unmap_pages() 326 iova + i * pgsize, pgsize); in dart_unmap_pages() 336 unsigned long iova) in dart_iova_to_phys() argument 341 ptep = dart_get_l2(data, iova); in dart_iova_to_phys() 347 ptep += dart_get_l2_index(data, iova); in dart_iova_to_phys() [all …]
|
| A D | sun50i-iommu.c | 302 unsigned long iova) in sun50i_iommu_zap_iova() argument 319 unsigned long iova) in sun50i_iommu_zap_ptw_cache() argument 341 sun50i_iommu_zap_iova(iommu, iova); in sun50i_iommu_zap_range() 625 &iova, &page_phys, &paddr, prot); in sun50i_iommu_map() 663 dma_addr_t iova) in sun50i_iommu_iova_to_phys() argument 681 sun50i_iova_get_page_offset(iova); in sun50i_iommu_iova_to_phys() 881 phys_addr_t iova; in sun50i_iommu_handle_pt_irq() local 887 iova = iommu_read(iommu, addr_reg); in sun50i_iommu_handle_pt_irq() 898 return iova; in sun50i_iommu_handle_pt_irq() 904 phys_addr_t iova; in sun50i_iommu_handle_perm_irq() local [all …]
|
| A D | exynos-iommu.c | 100 #define section_offs(iova) (iova & (SECT_SIZE - 1)) argument 102 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) argument 104 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) argument 111 return iova >> SECT_ORDER; in lv1ent_offset() 462 iova += SPAGE_SIZE; in __sysmmu_tlb_invalidate_entry() 672 sysmmu_iova_t iova) in sysmmu_tlb_invalidate_flpdcache() argument 1140 iova); in lv1set_section() 1147 iova); in lv1set_section() 1267 __func__, ret, size, iova); in exynos_iommu_map() 1326 ent = page_entry(ent, iova); in exynos_iommu_unmap() [all …]
|
| A D | virtio-iommu.c | 324 mapping->iova.start = iova; in viommu_add_mapping() 325 mapping->iova.last = end; in viommu_add_mapping() 345 u64 iova, u64 end) in viommu_del_mappings() argument 360 if (mapping->iova.start < iova) in viommu_del_mappings() 367 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings() 391 iova = ALIGN(iova, granule); in viommu_domain_map_identity() 412 iova = resv_end + 1; in viommu_domain_map_identity() 415 ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova, in viommu_domain_map_identity() 868 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages() 897 node = interval_tree_iter_first(&vdomain->mappings, iova, iova); in viommu_iova_to_phys() [all …]
|
| /linux/drivers/fpga/ |
| A D | dfl-afu-dma-region.c | 125 u64 iova, u64 size) in dma_region_check_iova() argument 127 if (!size && region->iova != iova) in dma_region_check_iova() 130 return (region->iova <= iova) && in dma_region_check_iova() 131 (region->length + region->iova >= iova + size); in dma_region_check_iova() 150 (unsigned long long)region->iova); in afu_dma_region_add() 164 if (region->iova < this->iova) in afu_dma_region_add() 166 else if (region->iova > this->iova) in afu_dma_region_add() 217 if (region->iova) in afu_dma_region_destroy() 262 if (iova < region->iova) in afu_dma_region_find() 264 else if (iova > region->iova) in afu_dma_region_find() [all …]
|
| /linux/drivers/iommu/iommufd/ |
| A D | iova_bitmap.c | 36 unsigned long iova; member 115 unsigned long iova; member 131 unsigned long iova) in iova_bitmap_offset_to_index() 260 bitmap->iova = iova; in iova_bitmap_alloc() 262 mapped->iova = iova; in iova_bitmap_alloc() 347 (iova >= mapped->iova && in iova_bitmap_mapped_range() 348 (iova + length - 1) <= (mapped->iova + mapped->length - 1)); in iova_bitmap_mapped_range() 357 unsigned long iova) in iova_bitmap_advance_to() argument 361 index = iova_bitmap_offset_to_index(bitmap, iova - bitmap->iova); in iova_bitmap_advance_to() 421 cur_bit = ((iova - mapped->iova) >> in iova_bitmap_set() [all …]
|
| A D | selftest.c | 68 u64 *iova) in __iommufd_test_syz_conv_iova() argument 93 u64 *iova) in iommufd_test_syz_conv_iova() argument 119 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova); in iommufd_test_syz_conv_iova_id() 260 iova += pgsize; in mock_domain_read_and_clear_dirty() 271 iova += pgsize; in mock_domain_read_and_clear_dirty() 272 } while (iova < end); in mock_domain_read_and_clear_dirty() 480 dma_addr_t iova) in mock_domain_iova_to_phys() argument 975 unsigned long iova; member 1007 if (iova > item->iova + item->length - 1 || in iommufd_test_access_unmap() 1246 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages, in iommufd_test_access_pages() [all …]
|
| /linux/include/trace/events/ |
| A D | iommu.h | 83 TP_ARGS(iova, paddr, size), 86 __field(u64, iova) 92 __entry->iova = iova; 98 __entry->iova, __entry->iova + __entry->size, __entry->paddr, 110 __field(u64, iova) 116 __entry->iova = iova; 122 __entry->iova, __entry->iova + __entry->size, 131 TP_ARGS(dev, iova, flags), 136 __field(u64, iova) 143 __entry->iova = iova; [all …]
|
| /linux/drivers/staging/media/ipu3/ |
| A D | ipu3-dmamap.c | 102 struct iova *iova; in imgu_dmamap_alloc() local 109 if (!iova) in imgu_dmamap_alloc() 146 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 153 struct iova *iova; in imgu_dmamap_unmap() local 155 iova = find_iova(&imgu->iova_domain, in imgu_dmamap_unmap() 157 if (WARN_ON(!iova)) in imgu_dmamap_unmap() 163 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_unmap() 189 struct iova *iova; in imgu_dmamap_map_sg() local 209 if (!iova) in imgu_dmamap_map_sg() 213 iova->pfn_lo, iova->pfn_hi); in imgu_dmamap_map_sg() [all …]
|
| A D | ipu3-mmu.c | 157 iova >>= IPU3_PAGE_SHIFT; in address_to_pte_idx() 160 *l2pt_idx = iova & IPU3_L2PT_MASK; in address_to_pte_idx() 162 iova >>= IPU3_L2PT_SHIFT; in address_to_pte_idx() 165 *l1pt_idx = iova & IPU3_L1PT_MASK; in address_to_pte_idx() 264 iova, &paddr, size); in imgu_mmu_map() 269 iova, &paddr, size); in imgu_mmu_map() 278 iova += IPU3_PAGE_SIZE; in imgu_mmu_map() 333 imgu_mmu_unmap(info, iova, mapped); in imgu_mmu_map_sg() 392 iova, size); in imgu_mmu_unmap() 408 iova, unmapped_page); in imgu_mmu_unmap() [all …]
|
| /linux/drivers/vfio/ |
| A D | vfio_iommu_type1.c | 331 if (iova < vpfn->iova) in vfio_find_vpfn() 333 else if (iova > vpfn->iova) in vfio_find_vpfn() 352 if (new->iova < vpfn->iova) in vfio_link_pfn() 376 vpfn->iova = iova; in vfio_add_to_pfn_list() 980 entry->iova = *iova; in unmap_unpin_fast() 1024 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; in vfio_unmap_unpin() local 1204 if (dma->iova < iova) in vfio_iova_dirty_bitmap() 1444 dma_addr_t iova = dma->iova; in vfio_pin_map_dma() local 1620 dma->iova = iova; in vfio_dma_do_map() 1684 iova = dma->iova; in vfio_iommu_replay() [all …]
|
| /linux/drivers/media/pci/intel/ipu6/ |
| A D | ipu6-dma.c | 160 struct iova *iova; in ipu6_dma_alloc() local 173 if (!iova) in ipu6_dma_alloc() 181 size, iova->pfn_lo, iova->pfn_hi); in ipu6_dma_alloc() 182 for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { in ipu6_dma_alloc() 252 if (WARN_ON(!iova)) in ipu6_dma_free() 333 struct iova *iova = find_iova(&mmu->dmap->iovad, in ipu6_dma_unmap_sg() local 342 if (WARN_ON(!iova)) in ipu6_dma_unmap_sg() 378 iova->pfn_lo, iova->pfn_hi); in ipu6_dma_unmap_sg() 396 struct iova *iova; in ipu6_dma_map_sg() local 423 if (!iova) in ipu6_dma_map_sg() [all …]
|
| A D | ipu6-mmu.c | 105 l1_idx, iova, iova + ISP_PAGE_SIZE, in page_table_dump() 259 u32 iova_start = iova; in l2_map() 268 l1_idx, (u32)iova); in l2_map() 350 l1_idx, iova); in l2_unmap() 357 iova, l1_idx); in l2_unmap() 388 struct iova *iova; in allocate_trash_buffer() local 397 if (!iova) { in allocate_trash_buffer() 607 dma_addr_t iova) in ipu6_mmu_iova_to_phys() argument 733 iova += pgsize; in ipu6_mmu_map() 749 struct iova *iova; in ipu6_mmu_destroy() local [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| A D | rxe_mr.c | 35 if (iova < mr->ibmr.iova || in mr_check_range() 36 iova + length > mr->ibmr.iova + mr->ibmr.length) { in mr_check_range() 77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); in rxe_mr_iova_to_index() 82 return iova & (mr_page_size(mr) - 1); in rxe_mr_iova_to_page_offset() 345 u64 iova; in copy_data() local 397 iova = sge->addr + offset; in copy_data() 461 iova += bytes; in rxe_flush_pmem_iova() 485 page_offset = iova & (PAGE_SIZE - 1); in rxe_mr_do_atomic_op() 486 page = ib_virt_dma_to_page(iova); in rxe_mr_do_atomic_op() 543 page = ib_virt_dma_to_page(iova); in rxe_mr_do_atomic_write() [all …]
|
| /linux/drivers/vdpa/vdpa_user/ |
| A D | iova_domain.c | 109 while (iova <= last) { in vduse_domain_map_bounce_page() 118 iova += PAGE_SIZE; in vduse_domain_map_bounce_page() 129 while (iova <= last) { in vduse_domain_unmap_bounce_page() 132 iova += PAGE_SIZE; in vduse_domain_unmap_bounce_page() 189 iova += sz; in vduse_domain_bounce() 409 if (!iova) in vduse_domain_map_page() 425 return iova; in vduse_domain_map_page() 457 if (!iova || !orig) in vduse_domain_alloc_coherent() 461 if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1, in vduse_domain_alloc_coherent() 469 *dma_addr = iova; in vduse_domain_alloc_coherent() [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| A D | msm_iommu.c | 37 unsigned long iova, phys_addr_t paddr, in calc_pgsize() argument 43 unsigned long addr_merge = paddr | iova; in calc_pgsize() 73 if ((iova ^ paddr) & (pgsize_next - 1)) in calc_pgsize() 100 pgsize = calc_pgsize(pagetable, iova, iova, size, &count); in msm_iommu_pagetable_unmap() 106 iova += unmapped; in msm_iommu_pagetable_unmap() 121 u64 addr = iova; in msm_iommu_pagetable_map() 145 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map() 368 if (iova & BIT_ULL(48)) in msm_iommu_map() 369 iova |= GENMASK_ULL(63, 49); in msm_iommu_map() 381 if (iova & BIT_ULL(48)) in msm_iommu_unmap() [all …]
|
| /linux/drivers/iommu/amd/ |
| A D | io_pgtable_v2.c | 139 pte = &pgd[PM_LEVEL_INDEX(level, iova)]; in v2_alloc_pte() 140 iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE); in v2_alloc_pte() 171 pte = &pte[PM_LEVEL_INDEX(level, iova)]; in v2_alloc_pte() 212 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)]; in fetch_pte() 241 unsigned long o_iova = iova; in iommu_v2_map_pages() 255 iova, map_size, gfp, &updated); in iommu_v2_map_pages() 263 iova += map_size; in iommu_v2_map_pages() 282 unsigned long iova, in iommu_v2_unmap_pages() argument 297 pte = fetch_pte(pgtable, iova, &unmap_size); in iommu_v2_unmap_pages() 303 iova = (iova & ~(unmap_size - 1)) + unmap_size; in iommu_v2_unmap_pages() [all …]
|
| /linux/drivers/gpu/drm/etnaviv/ |
| A D | etnaviv_mmu.c | 17 unsigned long iova, size_t size) in etnaviv_context_unmap() argument 24 iova, size, pgsize); in etnaviv_context_unmap() 34 iova += unmapped_page; in etnaviv_context_unmap() 43 unsigned long orig_iova = iova; in etnaviv_context_map() 60 iova += pgsize; in etnaviv_context_map() 75 unsigned int da = iova; in etnaviv_iommu_map() 100 etnaviv_context_unmap(context, iova, da - iova); in etnaviv_iommu_map() 108 unsigned int da = iova; in etnaviv_iommu_unmap() 293 u32 iova; in etnaviv_iommu_map_gem() local 297 mapping->iova = iova; in etnaviv_iommu_map_gem() [all …]
|
| /linux/tools/testing/selftests/iommu/ |
| A D | iommufd_fail_nth.c | 234 __u64 iova; in TEST_FAIL_NTH() local 304 __u64 iova; in TEST_FAIL_NTH() local 341 __u64 iova; in TEST_FAIL_NTH() local 386 __u64 iova; in TEST_FAIL_NTH() local 413 .access_rw = { .iova = iova, in TEST_FAIL_NTH() 444 .access_rw = { .iova = iova, in TEST_FAIL_NTH() 465 __u64 iova; in TEST_FAIL_NTH() local 493 .access_pages = { .iova = iova, in TEST_FAIL_NTH() 521 __u64 iova; in TEST_FAIL_NTH() local 552 .access_pages = { .iova = iova, in TEST_FAIL_NTH() [all …]
|
| A D | iommufd_utils.h | 305 __u64 iova, size_t page_size, in _test_cmd_get_dirty_bitmap() argument 312 .iova = iova, in _test_cmd_get_dirty_bitmap() 331 __u64 iova, size_t page_size, in _test_cmd_mock_domain_set_dirty() argument 339 .iova = iova, in _test_cmd_mock_domain_set_dirty() 358 _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \ 362 __u64 iova, size_t page_size, in _test_mock_dirty_bitmaps() argument 493 size_t length, __u64 *iova, unsigned int flags) in _test_ioctl_ioas_map() argument 505 cmd.iova = *iova; in _test_ioctl_ioas_map() 508 *iova = cmd.iova; in _test_ioctl_ioas_map() 570 .iova = iova, in _test_ioctl_ioas_unmap() [all …]
|