| /drivers/iommu/amd/ |
| A D | io_pgtable_v2.c | 233 phys_addr_t paddr, size_t pgsize, size_t pgcount, in iommu_v2_map_pages() argument 242 size_t size = pgcount << __ffs(pgsize); in iommu_v2_map_pages() 246 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount) in iommu_v2_map_pages() 286 size_t pgsize, size_t pgcount, in iommu_v2_unmap_pages() argument 293 size_t size = pgcount << __ffs(pgsize); in iommu_v2_unmap_pages() 296 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in iommu_v2_unmap_pages()
|
| A D | io_pgtable.c | 322 phys_addr_t paddr, size_t pgsize, size_t pgcount, in iommu_v1_map_pages() argument 330 size_t size = pgcount << __ffs(pgsize); in iommu_v1_map_pages() 340 while (pgcount > 0) { in iommu_v1_map_pages() 370 pgcount--; in iommu_v1_map_pages() 400 size_t pgsize, size_t pgcount, in iommu_v1_unmap_pages() argument 407 size_t size = pgcount << __ffs(pgsize); in iommu_v1_unmap_pages()
|
| A D | iommu.c | 2748 phys_addr_t paddr, size_t pgsize, size_t pgcount, in amd_iommu_map_pages() argument 2767 pgcount, prot, gfp, mapped); in amd_iommu_map_pages() 2795 size_t pgsize, size_t pgcount, in amd_iommu_unmap_pages() argument 2806 r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0; in amd_iommu_unmap_pages()
|
| /drivers/iommu/ |
| A D | sprd-iommu.c | 287 phys_addr_t paddr, size_t pgsize, size_t pgcount, in sprd_iommu_map() argument 291 size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE; in sprd_iommu_map() 313 for (i = 0; i < pgcount; i++) { in sprd_iommu_map() 324 size_t pgsize, size_t pgcount, in sprd_iommu_unmap() argument 330 size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE; in sprd_iommu_unmap() 340 memset(pgt_base_iova, 0, pgcount * sizeof(u32)); in sprd_iommu_unmap()
|
| A D | io-pgtable-dart.c | 224 phys_addr_t paddr, size_t pgsize, size_t pgcount, in dart_map_pages() argument 269 num_entries = min_t(int, pgcount, max_entries); in dart_map_pages() 285 size_t pgsize, size_t pgcount, in dart_unmap_pages() argument 293 if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount)) in dart_unmap_pages() 306 num_entries = min_t(int, pgcount, max_entries); in dart_unmap_pages()
|
| A D | io-pgtable-arm.c | 336 unsigned long iova, size_t size, size_t pgcount, 425 phys_addr_t paddr, size_t size, size_t pgcount, in __arm_lpae_map() argument 442 num_entries = min_t(int, pgcount, max_entries); in __arm_lpae_map() 477 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, in __arm_lpae_map() 552 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_lpae_map_pages() argument 574 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, in arm_lpae_map_pages() 626 unsigned long iova, size_t size, size_t pgcount, in __arm_lpae_unmap() argument 648 num_entries = min_t(int, pgcount, max_entries); in __arm_lpae_unmap() 683 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); in __arm_lpae_unmap() 687 size_t pgsize, size_t pgcount, in arm_lpae_unmap_pages() argument [all …]
|
| A D | mtk_iommu_v1.c | 350 phys_addr_t paddr, size_t pgsize, size_t pgcount, in mtk_iommu_v1_map() argument 360 for (i = 0; i < pgcount; i++) { in mtk_iommu_v1_map() 372 return i == pgcount ? 0 : -EEXIST; in mtk_iommu_v1_map() 376 size_t pgsize, size_t pgcount, in mtk_iommu_v1_unmap() argument 382 size_t size = pgcount * MT2701_IOMMU_PAGE_SIZE; in mtk_iommu_v1_unmap() 385 memset(pgt_base_iova, 0, pgcount * sizeof(u32)); in mtk_iommu_v1_unmap()
|
| A D | s390-iommu.c | 895 size_t pgsize, size_t pgcount, in s390_iommu_map_pages() argument 899 size_t size = pgcount << __ffs(pgsize); in s390_iommu_map_pages() 916 pgcount, flags, gfp); in s390_iommu_map_pages() 919 atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages); in s390_iommu_map_pages() 1008 size_t pgsize, size_t pgcount, in s390_iommu_unmap_pages() argument 1012 size_t size = pgcount << __ffs(pgsize); in s390_iommu_unmap_pages() 1019 rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount); in s390_iommu_unmap_pages() 1024 atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages); in s390_iommu_unmap_pages()
|
| A D | msm_iommu.c | 486 phys_addr_t pa, size_t pgsize, size_t pgcount, in msm_iommu_map() argument 494 ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot, in msm_iommu_map() 511 size_t pgsize, size_t pgcount, in msm_iommu_unmap() argument 519 ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather); in msm_iommu_unmap()
|
| A D | io-pgtable-arm-v7s.c | 508 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_v7s_map_pages() argument 521 while (pgcount--) { in arm_v7s_map_pages() 623 size_t pgsize, size_t pgcount, in arm_v7s_unmap_pages() argument 632 while (pgcount--) { in arm_v7s_unmap_pages()
|
| A D | ipmmu-vmsa.c | 670 phys_addr_t paddr, size_t pgsize, size_t pgcount, in ipmmu_map() argument 675 return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount, in ipmmu_map() 680 size_t pgsize, size_t pgcount, in ipmmu_unmap() argument 685 return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather); in ipmmu_unmap()
|
| A D | virtio-iommu.c | 841 phys_addr_t paddr, size_t pgsize, size_t pgcount, in viommu_map_pages() argument 846 size_t size = pgsize * pgcount; in viommu_map_pages() 885 size_t pgsize, size_t pgcount, in viommu_unmap_pages() argument 892 size_t size = pgsize * pgcount; in viommu_unmap_pages()
|
| A D | apple-dart.c | 534 size_t pgcount, int prot, gfp_t gfp, in apple_dart_map_pages() argument 543 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, in apple_dart_map_pages() 549 size_t pgcount, in apple_dart_unmap_pages() argument 555 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in apple_dart_unmap_pages()
|
| A D | mtk_iommu.c | 798 phys_addr_t paddr, size_t pgsize, size_t pgcount, in mtk_iommu_map() argument 808 return dom->iop->map_pages(dom->iop, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in mtk_iommu_map() 812 unsigned long iova, size_t pgsize, size_t pgcount, in mtk_iommu_unmap() argument 817 iommu_iotlb_gather_add_range(gather, iova, pgsize * pgcount); in mtk_iommu_unmap() 818 return dom->iop->unmap_pages(dom->iop, iova, pgsize, pgcount, gather); in mtk_iommu_unmap()
|
| /drivers/gpu/drm/panfrost/ |
| A D | panfrost_mmu.c | 408 size_t pgcount, mapped = 0; in mmu_map_sg() local 409 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); in mmu_map_sg() 411 ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, in mmu_map_sg() 469 size_t unmapped_page, pgcount; in panfrost_mmu_unmap() local 470 size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount); in panfrost_mmu_unmap() 473 pgcount = 1; in panfrost_mmu_unmap() 475 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL); in panfrost_mmu_unmap() 476 WARN_ON(unmapped_page != pgsize * pgcount); in panfrost_mmu_unmap() 478 iova += pgsize * pgcount; in panfrost_mmu_unmap() 479 unmapped_len += pgsize * pgcount; in panfrost_mmu_unmap()
|
| /drivers/hv/ |
| A D | hv_common.c | 476 const int pgcount = hv_output_page_exists() ? 2 : 1; in hv_common_cpu_init() local 490 mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); in hv_common_cpu_init() 501 ret = set_memory_decrypted((unsigned long)mem, pgcount); in hv_common_cpu_init() 507 memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE); in hv_common_cpu_init()
|
| /drivers/iommu/arm/arm-smmu/ |
| A D | qcom_iommu.c | 429 phys_addr_t paddr, size_t pgsize, size_t pgcount, in qcom_iommu_map() argument 441 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped); in qcom_iommu_map() 447 size_t pgsize, size_t pgcount, in qcom_iommu_unmap() argument 465 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in qcom_iommu_unmap()
|
| A D | arm-smmu.c | 1267 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_smmu_map_pages() argument 1278 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages() 1285 size_t pgsize, size_t pgcount, in arm_smmu_unmap_pages() argument 1296 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather); in arm_smmu_unmap_pages()
|
| /drivers/iommu/iommufd/ |
| A D | selftest.c | 506 size_t pgsize, size_t pgcount, int prot, in mock_domain_map_pages() argument 522 for (; pgcount; pgcount--) { in mock_domain_map_pages() 528 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) in mock_domain_map_pages() 557 size_t pgcount, in mock_domain_unmap_pages() argument 568 for (; pgcount; pgcount--) { in mock_domain_unmap_pages() 592 if (pgcount == 1 && in mock_domain_unmap_pages()
|
| /drivers/gpu/drm/panthor/ |
| A D | panthor_mmu.c | 897 size_t unmapped_sz = 0, pgcount; in panthor_vm_unmap_pages() local 898 size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount); in panthor_vm_unmap_pages() 900 unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL); in panthor_vm_unmap_pages() 902 if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) { in panthor_vm_unmap_pages() 905 iova + offset + pgsize * pgcount, in panthor_vm_unmap_pages() 948 size_t pgcount, mapped = 0; in panthor_vm_map_pages() local 949 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); in panthor_vm_map_pages() 951 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, in panthor_vm_map_pages()
|
| /drivers/iommu/riscv/ |
| A D | iommu.c | 1190 size_t pgsize, size_t pgcount, int prot, in riscv_iommu_map_pages() argument 1207 while (pgcount) { in riscv_iommu_map_pages() 1224 --pgcount; in riscv_iommu_map_pages() 1246 size_t pgcount, in riscv_iommu_unmap_pages() argument 1250 size_t size = pgcount << __ffs(pgsize); in riscv_iommu_unmap_pages()
|
| /drivers/iommu/intel/ |
| A D | iommu.c | 3594 size_t pgsize, size_t pgcount, in intel_iommu_map_pages() argument 3598 size_t size = pgcount << pgshift; in intel_iommu_map_pages() 3651 size_t pgsize, size_t pgcount, in intel_iommu_unmap_pages() argument 3655 size_t size = pgcount << pgshift; in intel_iommu_unmap_pages()
|
| /drivers/iommu/arm/arm-smmu-v3/ |
| A D | arm-smmu-v3.c | 3358 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_smmu_map_pages() argument 3366 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages() 3370 size_t pgsize, size_t pgcount, in arm_smmu_unmap_pages() argument 3379 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in arm_smmu_unmap_pages()
|