Lines Matching refs:domain

49 	struct iommu_domain *domain;  member
81 static int __iommu_attach_device(struct iommu_domain *domain,
83 static int __iommu_attach_group(struct iommu_domain *domain,
85 static void __iommu_detach_group(struct iommu_domain *domain,
766 struct iommu_domain *domain = group->default_domain; in iommu_create_device_direct_mappings() local
772 if (!domain || !iommu_is_dma_domain(domain)) in iommu_create_device_direct_mappings()
775 BUG_ON(!domain->pgsize_bitmap); in iommu_create_device_direct_mappings()
777 pg_size = 1UL << __ffs(domain->pgsize_bitmap); in iommu_create_device_direct_mappings()
787 if (domain->ops->apply_resv_region) in iommu_create_device_direct_mappings()
788 domain->ops->apply_resv_region(dev, domain, entry); in iommu_create_device_direct_mappings()
803 phys_addr = iommu_iova_to_phys(domain, addr); in iommu_create_device_direct_mappings()
811 ret = iommu_map(domain, addr - map_size, in iommu_create_device_direct_mappings()
822 iommu_flush_iotlb_all(domain); in iommu_create_device_direct_mappings()
830 static bool iommu_is_attach_deferred(struct iommu_domain *domain, in iommu_is_attach_deferred() argument
833 if (domain->ops->is_attach_deferred) in iommu_is_attach_deferred()
834 return domain->ops->is_attach_deferred(domain, dev); in iommu_is_attach_deferred()
891 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) in iommu_group_add_device()
892 ret = __iommu_attach_device(group->domain, dev); in iommu_group_add_device()
1254 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); in iommu_page_response() local
1256 if (!domain || !domain->ops->page_response) in iommu_page_response()
1297 ret = domain->ops->page_response(dev, evt, msg); in iommu_page_response()
1551 if (!group->domain) in iommu_group_alloc_default_domain()
1552 group->domain = dom; in iommu_group_alloc_default_domain()
1743 struct iommu_domain *domain = data; in iommu_group_do_dma_attach() local
1746 if (!iommu_is_attach_deferred(domain, dev)) in iommu_group_do_dma_attach()
1747 ret = __iommu_attach_device(domain, dev); in iommu_group_do_dma_attach()
1760 struct iommu_domain *domain = data; in iommu_group_do_probe_finalize() local
1762 if (domain->ops->probe_finalize) in iommu_group_do_probe_finalize()
1763 domain->ops->probe_finalize(dev); in iommu_group_do_probe_finalize()
1929 void iommu_set_fault_handler(struct iommu_domain *domain, in iommu_set_fault_handler() argument
1933 BUG_ON(!domain); in iommu_set_fault_handler()
1935 domain->handler = handler; in iommu_set_fault_handler()
1936 domain->handler_token = token; in iommu_set_fault_handler()
1943 struct iommu_domain *domain; in __iommu_domain_alloc() local
1948 domain = bus->iommu_ops->domain_alloc(type); in __iommu_domain_alloc()
1949 if (!domain) in __iommu_domain_alloc()
1952 domain->ops = bus->iommu_ops; in __iommu_domain_alloc()
1953 domain->type = type; in __iommu_domain_alloc()
1955 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; in __iommu_domain_alloc()
1957 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { in __iommu_domain_alloc()
1958 iommu_domain_free(domain); in __iommu_domain_alloc()
1959 domain = NULL; in __iommu_domain_alloc()
1961 return domain; in __iommu_domain_alloc()
1970 void iommu_domain_free(struct iommu_domain *domain) in iommu_domain_free() argument
1972 iommu_put_dma_cookie(domain); in iommu_domain_free()
1973 domain->ops->domain_free(domain); in iommu_domain_free()
1977 static int __iommu_attach_device(struct iommu_domain *domain, in __iommu_attach_device() argument
1982 if (unlikely(domain->ops->attach_dev == NULL)) in __iommu_attach_device()
1985 ret = domain->ops->attach_dev(domain, dev); in __iommu_attach_device()
1991 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_attach_device() argument
2009 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2019 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) in iommu_deferred_attach() argument
2021 const struct iommu_ops *ops = domain->ops; in iommu_deferred_attach()
2023 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) in iommu_deferred_attach()
2024 return __iommu_attach_device(domain, dev); in iommu_deferred_attach()
2085 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, in iommu_uapi_cache_invalidate() argument
2092 if (unlikely(!domain->ops->cache_invalidate)) in iommu_uapi_cache_invalidate()
2133 return domain->ops->cache_invalidate(domain, dev, &inv_info); in iommu_uapi_cache_invalidate()
2195 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_uapi_sva_bind_gpasid() argument
2201 if (unlikely(!domain->ops->sva_bind_gpasid)) in iommu_uapi_sva_bind_gpasid()
2208 return domain->ops->sva_bind_gpasid(domain, dev, &data); in iommu_uapi_sva_bind_gpasid()
2212 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_sva_unbind_gpasid() argument
2215 if (unlikely(!domain->ops->sva_unbind_gpasid)) in iommu_sva_unbind_gpasid()
2218 return domain->ops->sva_unbind_gpasid(dev, pasid); in iommu_sva_unbind_gpasid()
2222 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_uapi_sva_unbind_gpasid() argument
2228 if (unlikely(!domain->ops->sva_bind_gpasid)) in iommu_uapi_sva_unbind_gpasid()
2235 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); in iommu_uapi_sva_unbind_gpasid()
2239 static void __iommu_detach_device(struct iommu_domain *domain, in __iommu_detach_device() argument
2242 if (iommu_is_attach_deferred(domain, dev)) in __iommu_detach_device()
2245 if (unlikely(domain->ops->detach_dev == NULL)) in __iommu_detach_device()
2248 domain->ops->detach_dev(domain, dev); in __iommu_detach_device()
2252 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_detach_device() argument
2266 __iommu_detach_group(domain, group); in iommu_detach_device()
2276 struct iommu_domain *domain; in iommu_get_domain_for_dev() local
2283 domain = group->domain; in iommu_get_domain_for_dev()
2287 return domain; in iommu_get_domain_for_dev()
2312 struct iommu_domain *domain = data; in iommu_group_do_attach_device() local
2314 return __iommu_attach_device(domain, dev); in iommu_group_do_attach_device()
2317 static int __iommu_attach_group(struct iommu_domain *domain, in __iommu_attach_group() argument
2322 if (group->default_domain && group->domain != group->default_domain) in __iommu_attach_group()
2325 ret = __iommu_group_for_each_dev(group, domain, in __iommu_attach_group()
2328 group->domain = domain; in __iommu_attach_group()
2333 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2338 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2347 struct iommu_domain *domain = data; in iommu_group_do_detach_device() local
2349 __iommu_detach_device(domain, dev); in iommu_group_do_detach_device()
2354 static void __iommu_detach_group(struct iommu_domain *domain, in __iommu_detach_group() argument
2360 __iommu_group_for_each_dev(group, domain, in __iommu_detach_group()
2362 group->domain = NULL; in __iommu_detach_group()
2366 if (group->domain == group->default_domain) in __iommu_detach_group()
2375 group->domain = group->default_domain; in __iommu_detach_group()
2378 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2381 __iommu_detach_group(domain, group); in iommu_detach_group()
2386 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
2388 if (domain->type == IOMMU_DOMAIN_IDENTITY) in iommu_iova_to_phys()
2391 if (domain->type == IOMMU_DOMAIN_BLOCKED) in iommu_iova_to_phys()
2394 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
2398 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, in iommu_pgsize() argument
2407 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); in iommu_pgsize()
2423 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in iommu_pgsize()
2452 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, in __iommu_map_pages() argument
2456 const struct iommu_ops *ops = domain->ops; in __iommu_map_pages()
2460 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); in __iommu_map_pages()
2466 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, in __iommu_map_pages()
2469 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); in __iommu_map_pages()
2476 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, in __iommu_map() argument
2479 const struct iommu_ops *ops = domain->ops; in __iommu_map()
2487 domain->pgsize_bitmap == 0UL)) in __iommu_map()
2490 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_map()
2494 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_map()
2512 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, in __iommu_map()
2529 iommu_unmap(domain, orig_iova, orig_size - size); in __iommu_map()
2536 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, in _iommu_map() argument
2539 const struct iommu_ops *ops = domain->ops; in _iommu_map()
2542 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); in _iommu_map()
2544 ops->iotlb_sync_map(domain, iova, size); in _iommu_map()
2549 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
2553 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); in iommu_map()
2557 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, in iommu_map_atomic() argument
2560 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); in iommu_map_atomic()
2564 static size_t __iommu_unmap_pages(struct iommu_domain *domain, in __iommu_unmap_pages() argument
2568 const struct iommu_ops *ops = domain->ops; in __iommu_unmap_pages()
2571 pgsize = iommu_pgsize(domain, iova, iova, size, &count); in __iommu_unmap_pages()
2573 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : in __iommu_unmap_pages()
2574 ops->unmap(domain, iova, pgsize, iotlb_gather); in __iommu_unmap_pages()
2577 static size_t __iommu_unmap(struct iommu_domain *domain, in __iommu_unmap() argument
2581 const struct iommu_ops *ops = domain->ops; in __iommu_unmap()
2587 domain->pgsize_bitmap == 0UL)) in __iommu_unmap()
2590 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_unmap()
2594 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_unmap()
2614 unmapped_page = __iommu_unmap_pages(domain, iova, in __iommu_unmap()
2631 size_t iommu_unmap(struct iommu_domain *domain, in iommu_unmap() argument
2638 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); in iommu_unmap()
2639 iommu_iotlb_sync(domain, &iotlb_gather); in iommu_unmap()
2645 size_t iommu_unmap_fast(struct iommu_domain *domain, in iommu_unmap_fast() argument
2649 return __iommu_unmap(domain, iova, size, iotlb_gather); in iommu_unmap_fast()
2653 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in __iommu_map_sg() argument
2657 const struct iommu_ops *ops = domain->ops; in __iommu_map_sg()
2667 ret = __iommu_map(domain, iova + mapped, start, in __iommu_map_sg()
2689 ops->iotlb_sync_map(domain, iova, mapped); in __iommu_map_sg()
2694 iommu_unmap(domain, iova, mapped); in __iommu_map_sg()
2699 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in iommu_map_sg() argument
2703 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); in iommu_map_sg()
2707 ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, in iommu_map_sg_atomic() argument
2710 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); in iommu_map_sg_atomic()
2737 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, in report_iommu_fault() argument
2746 if (domain->handler) in report_iommu_fault()
2747 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
2748 domain->handler_token); in report_iommu_fault()
2767 int iommu_enable_nesting(struct iommu_domain *domain) in iommu_enable_nesting() argument
2769 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_enable_nesting()
2771 if (!domain->ops->enable_nesting) in iommu_enable_nesting()
2773 return domain->ops->enable_nesting(domain); in iommu_enable_nesting()
2777 int iommu_set_pgtable_quirks(struct iommu_domain *domain, in iommu_set_pgtable_quirks() argument
2780 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_set_pgtable_quirks()
2782 if (!domain->ops->set_pgtable_quirks) in iommu_set_pgtable_quirks()
2784 return domain->ops->set_pgtable_quirks(domain, quirk); in iommu_set_pgtable_quirks()
2996 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_aux_attach_device() argument
3000 if (domain->ops->aux_attach_dev) in iommu_aux_attach_device()
3001 ret = domain->ops->aux_attach_dev(domain, dev); in iommu_aux_attach_device()
3010 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_aux_detach_device() argument
3012 if (domain->ops->aux_detach_dev) { in iommu_aux_detach_device()
3013 domain->ops->aux_detach_dev(domain, dev); in iommu_aux_detach_device()
3019 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) in iommu_aux_get_pasid() argument
3023 if (domain->ops->aux_get_pasid) in iommu_aux_get_pasid()
3024 ret = domain->ops->aux_get_pasid(domain, dev); in iommu_aux_get_pasid()
3146 if (group->default_domain != group->domain) { in iommu_change_dev_def_domain()
3232 group->domain = group->default_domain; in iommu_change_dev_def_domain()
3250 group->domain = prev_dom; in iommu_change_dev_def_domain()