Lines Matching refs:domain

235 	struct iommu_domain *domain;	/* domain this device is attached */  member
251 struct iommu_domain domain; /* generic domain data structure */ member
294 struct exynos_iommu_domain *domain; /* domain we belong to */ member
421 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
583 if (data->domain) { in exynos_sysmmu_irq()
584 ret = report_iommu_fault(&data->domain->domain, data->master, in exynos_sysmmu_irq()
840 if (data->domain) { in exynos_sysmmu_suspend()
858 if (data->domain) { in exynos_sysmmu_resume()
899 struct exynos_iommu_domain *domain; in exynos_iommu_domain_alloc() local
909 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in exynos_iommu_domain_alloc()
910 if (!domain) in exynos_iommu_domain_alloc()
913 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); in exynos_iommu_domain_alloc()
914 if (!domain->pgtable) in exynos_iommu_domain_alloc()
917 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); in exynos_iommu_domain_alloc()
918 if (!domain->lv2entcnt) in exynos_iommu_domain_alloc()
923 domain->pgtable[i] = ZERO_LV2LINK; in exynos_iommu_domain_alloc()
925 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, in exynos_iommu_domain_alloc()
928 BUG_ON(handle != virt_to_phys(domain->pgtable)); in exynos_iommu_domain_alloc()
932 spin_lock_init(&domain->lock); in exynos_iommu_domain_alloc()
933 spin_lock_init(&domain->pgtablelock); in exynos_iommu_domain_alloc()
934 INIT_LIST_HEAD(&domain->clients); in exynos_iommu_domain_alloc()
936 domain->domain.geometry.aperture_start = 0; in exynos_iommu_domain_alloc()
937 domain->domain.geometry.aperture_end = ~0UL; in exynos_iommu_domain_alloc()
938 domain->domain.geometry.force_aperture = true; in exynos_iommu_domain_alloc()
940 return &domain->domain; in exynos_iommu_domain_alloc()
943 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_alloc()
945 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_alloc()
947 kfree(domain); in exynos_iommu_domain_alloc()
953 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_domain_free() local
958 WARN_ON(!list_empty(&domain->clients)); in exynos_iommu_domain_free()
960 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_domain_free()
962 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_domain_free()
966 data->domain = NULL; in exynos_iommu_domain_free()
971 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_domain_free()
973 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, in exynos_iommu_domain_free()
977 if (lv1ent_page(domain->pgtable + i)) { in exynos_iommu_domain_free()
978 phys_addr_t base = lv2table_base(domain->pgtable + i); in exynos_iommu_domain_free()
986 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_free()
987 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_free()
988 kfree(domain); in exynos_iommu_domain_free()
994 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_detach_device() local
996 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_detach_device()
1000 if (!has_sysmmu(dev) || owner->domain != iommu_domain) in exynos_iommu_detach_device()
1012 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_detach_device()
1013 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_detach_device()
1016 data->domain = NULL; in exynos_iommu_detach_device()
1020 owner->domain = NULL; in exynos_iommu_detach_device()
1021 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_detach_device()
1032 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_attach_device() local
1035 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_attach_device()
1041 if (owner->domain) in exynos_iommu_attach_device()
1042 exynos_iommu_detach_device(owner->domain, dev); in exynos_iommu_attach_device()
1046 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_attach_device()
1050 data->domain = domain; in exynos_iommu_attach_device()
1051 list_add_tail(&data->domain_node, &domain->clients); in exynos_iommu_attach_device()
1054 owner->domain = iommu_domain; in exynos_iommu_attach_device()
1055 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_attach_device()
1072 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, in alloc_lv2entry() argument
1120 spin_lock(&domain->lock); in alloc_lv2entry()
1121 list_for_each_entry(data, &domain->clients, domain_node) in alloc_lv2entry()
1123 spin_unlock(&domain->lock); in alloc_lv2entry()
1130 static int lv1set_section(struct exynos_iommu_domain *domain, in lv1set_section() argument
1153 spin_lock(&domain->lock); in lv1set_section()
1160 list_for_each_entry(data, &domain->clients, domain_node) in lv1set_section()
1163 spin_unlock(&domain->lock); in lv1set_section()
1232 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_map() local
1238 BUG_ON(domain->pgtable == NULL); in exynos_iommu_map()
1241 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_map()
1243 entry = section_entry(domain->pgtable, iova); in exynos_iommu_map()
1246 ret = lv1set_section(domain, entry, iova, paddr, prot, in exynos_iommu_map()
1247 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1251 pent = alloc_lv2entry(domain, entry, iova, in exynos_iommu_map()
1252 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1258 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1265 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_map()
1270 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, in exynos_iommu_tlb_invalidate_entry() argument
1276 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1278 list_for_each_entry(data, &domain->clients, domain_node) in exynos_iommu_tlb_invalidate_entry()
1281 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1288 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_unmap() local
1294 BUG_ON(domain->pgtable == NULL); in exynos_iommu_unmap()
1296 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1298 ent = section_entry(domain->pgtable, iova); in exynos_iommu_unmap()
1330 domain->lv2entcnt[lv1ent_offset(iova)] += 1; in exynos_iommu_unmap()
1348 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; in exynos_iommu_unmap()
1350 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1352 exynos_iommu_tlb_invalidate_entry(domain, iova, size); in exynos_iommu_unmap()
1356 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1367 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_iova_to_phys() local
1372 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1374 entry = section_entry(domain->pgtable, iova); in exynos_iommu_iova_to_phys()
1387 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1423 if (owner->domain) { in exynos_iommu_release_device()
1428 WARN_ON(owner->domain != in exynos_iommu_release_device()
1431 exynos_iommu_detach_device(owner->domain, dev); in exynos_iommu_release_device()