Lines Matching refs:domain

139 	return container_of(dom, struct protection_domain, domain);  in to_pdomain()
377 if (dev_data->domain) in amd_iommu_uninit_device()
501 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
1260 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1267 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1270 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1280 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1291 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1295 __domain_flush_pages(domain, address, size, pde); in domain_flush_pages()
1328 __domain_flush_pages(domain, address, flush_size, pde); in domain_flush_pages()
1335 void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain) in amd_iommu_domain_flush_tlb_pde() argument
1337 domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); in amd_iommu_domain_flush_tlb_pde()
1340 void amd_iommu_domain_flush_complete(struct protection_domain *domain) in amd_iommu_domain_flush_complete() argument
1345 if (domain && !domain->dev_iommu[i]) in amd_iommu_domain_flush_complete()
1357 static void domain_flush_np_cache(struct protection_domain *domain, in domain_flush_np_cache() argument
1363 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1364 domain_flush_pages(domain, iova, size, 1); in domain_flush_np_cache()
1365 amd_iommu_domain_flush_complete(domain); in domain_flush_np_cache()
1366 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1374 static void domain_flush_devices(struct protection_domain *domain) in domain_flush_devices() argument
1378 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1446 static void free_gcr3_table(struct protection_domain *domain) in free_gcr3_table() argument
1448 if (domain->glx == 2) in free_gcr3_table()
1449 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1450 else if (domain->glx == 1) in free_gcr3_table()
1451 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1453 BUG_ON(domain->glx != 0); in free_gcr3_table()
1455 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1458 static void set_dte_entry(u16 devid, struct protection_domain *domain, in set_dte_entry() argument
1465 if (domain->iop.mode != PAGE_MODE_NONE) in set_dte_entry()
1466 pte_root = iommu_virt_to_phys(domain->iop.root); in set_dte_entry()
1468 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1484 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1485 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); in set_dte_entry()
1486 u64 glx = domain->glx; in set_dte_entry()
1511 flags |= domain->id; in set_dte_entry()
1539 struct protection_domain *domain) in do_attach() argument
1548 dev_data->domain = domain; in do_attach()
1549 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1552 domain->dev_iommu[iommu->index] += 1; in do_attach()
1553 domain->dev_cnt += 1; in do_attach()
1556 set_dte_entry(dev_data->devid, domain, in do_attach()
1565 struct protection_domain *domain = dev_data->domain; in do_detach() local
1571 dev_data->domain = NULL; in do_detach()
1580 amd_iommu_domain_flush_tlb_pde(domain); in do_detach()
1583 amd_iommu_domain_flush_complete(domain); in do_detach()
1586 domain->dev_iommu[iommu->index] -= 1; in do_detach()
1587 domain->dev_cnt -= 1; in do_detach()
1635 struct protection_domain *domain) in attach_device() argument
1642 spin_lock_irqsave(&domain->lock, flags); in attach_device()
1649 if (dev_data->domain != NULL) in attach_device()
1656 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
1680 do_attach(dev_data, domain); in attach_device()
1687 amd_iommu_domain_flush_tlb_pde(domain); in attach_device()
1689 amd_iommu_domain_flush_complete(domain); in attach_device()
1694 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
1704 struct protection_domain *domain; in detach_device() local
1709 domain = dev_data->domain; in detach_device()
1711 spin_lock_irqsave(&domain->lock, flags); in detach_device()
1721 if (WARN_ON(!dev_data->domain)) in detach_device()
1729 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
1739 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
1808 static void update_device_table(struct protection_domain *domain) in update_device_table() argument
1812 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
1813 set_dte_entry(dev_data->devid, domain, in update_device_table()
1819 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) in amd_iommu_update_and_flush_device_table() argument
1821 update_device_table(domain); in amd_iommu_update_and_flush_device_table()
1822 domain_flush_devices(domain); in amd_iommu_update_and_flush_device_table()
1825 void amd_iommu_domain_update(struct protection_domain *domain) in amd_iommu_domain_update() argument
1828 amd_iommu_update_and_flush_device_table(domain); in amd_iommu_domain_update()
1831 amd_iommu_domain_flush_tlb_pde(domain); in amd_iommu_domain_update()
1832 amd_iommu_domain_flush_complete(domain); in amd_iommu_domain_update()
1871 static void cleanup_domain(struct protection_domain *domain) in cleanup_domain() argument
1876 spin_lock_irqsave(&domain->lock, flags); in cleanup_domain()
1878 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
1879 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
1881 BUG_ON(!entry->domain); in cleanup_domain()
1885 spin_unlock_irqrestore(&domain->lock, flags); in cleanup_domain()
1888 static void protection_domain_free(struct protection_domain *domain) in protection_domain_free() argument
1890 if (!domain) in protection_domain_free()
1893 if (domain->id) in protection_domain_free()
1894 domain_id_free(domain->id); in protection_domain_free()
1896 if (domain->iop.pgtbl_cfg.tlb) in protection_domain_free()
1897 free_io_pgtable_ops(&domain->iop.iop.ops); in protection_domain_free()
1899 kfree(domain); in protection_domain_free()
1902 static int protection_domain_init_v1(struct protection_domain *domain, int mode) in protection_domain_init_v1() argument
1908 spin_lock_init(&domain->lock); in protection_domain_init_v1()
1909 domain->id = domain_id_alloc(); in protection_domain_init_v1()
1910 if (!domain->id) in protection_domain_init_v1()
1912 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init_v1()
1920 amd_iommu_domain_set_pgtable(domain, pt_root, mode); in protection_domain_init_v1()
1928 struct protection_domain *domain; in protection_domain_alloc() local
1933 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in protection_domain_alloc()
1934 if (!domain) in protection_domain_alloc()
1950 ret = protection_domain_init_v1(domain, mode); in protection_domain_alloc()
1959 pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); in protection_domain_alloc()
1963 return domain; in protection_domain_alloc()
1965 kfree(domain); in protection_domain_alloc()
1971 struct protection_domain *domain; in amd_iommu_domain_alloc() local
1973 domain = protection_domain_alloc(type); in amd_iommu_domain_alloc()
1974 if (!domain) in amd_iommu_domain_alloc()
1977 domain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
1978 domain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
1979 domain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
1981 return &domain->domain; in amd_iommu_domain_alloc()
1986 struct protection_domain *domain; in amd_iommu_domain_free() local
1988 domain = to_pdomain(dom); in amd_iommu_domain_free()
1990 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
1991 cleanup_domain(domain); in amd_iommu_domain_free()
1993 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
1998 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
1999 free_gcr3_table(domain); in amd_iommu_domain_free()
2001 protection_domain_free(domain); in amd_iommu_domain_free()
2014 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2033 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_attach_device() local
2048 if (dev_data->domain) in amd_iommu_attach_device()
2051 ret = attach_device(dev, domain); in amd_iommu_attach_device()
2070 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iotlb_sync_map() local
2071 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_iotlb_sync_map()
2074 domain_flush_np_cache(domain, iova, size); in amd_iommu_iotlb_sync_map()
2081 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_map() local
2082 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_map()
2087 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_map()
2101 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain, in amd_iommu_iotlb_gather_add_page() argument
2117 iommu_iotlb_sync(domain, gather); in amd_iommu_iotlb_gather_add_page()
2126 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_unmap() local
2127 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_unmap()
2131 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_unmap()
2144 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iova_to_phys() local
2145 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_iova_to_phys()
2218 bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, in amd_iommu_is_attach_deferred() argument
2227 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) in amd_iommu_flush_iotlb_all() argument
2229 struct protection_domain *dom = to_pdomain(domain); in amd_iommu_flush_iotlb_all()
2238 static void amd_iommu_iotlb_sync(struct iommu_domain *domain, in amd_iommu_iotlb_sync() argument
2241 struct protection_domain *dom = to_pdomain(domain); in amd_iommu_iotlb_sync()
2317 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_direct_map() local
2320 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
2322 if (domain->iop.pgtbl_cfg.tlb) in amd_iommu_domain_direct_map()
2323 free_io_pgtable_ops(&domain->iop.iop.ops); in amd_iommu_domain_direct_map()
2325 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
2331 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_enable_v2() local
2342 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2350 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
2354 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
2355 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
2358 domain->glx = levels; in amd_iommu_domain_enable_v2()
2359 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
2361 amd_iommu_domain_update(domain); in amd_iommu_domain_enable_v2()
2366 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2372 static int __flush_pasid(struct protection_domain *domain, u32 pasid, in __flush_pasid() argument
2379 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
2382 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
2389 if (domain->dev_iommu[i] == 0) in __flush_pasid()
2398 amd_iommu_domain_flush_complete(domain); in __flush_pasid()
2401 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2424 amd_iommu_domain_flush_complete(domain); in __flush_pasid()
2433 static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid, in __amd_iommu_flush_page() argument
2436 return __flush_pasid(domain, pasid, address, false); in __amd_iommu_flush_page()
2442 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_page() local
2446 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
2447 ret = __amd_iommu_flush_page(domain, pasid, address); in amd_iommu_flush_page()
2448 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
2454 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid) in __amd_iommu_flush_tlb() argument
2456 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in __amd_iommu_flush_tlb()
2462 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_tlb() local
2466 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
2467 ret = __amd_iommu_flush_tlb(domain, pasid); in amd_iommu_flush_tlb()
2468 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
2506 static int __set_gcr3(struct protection_domain *domain, u32 pasid, in __set_gcr3() argument
2511 if (domain->iop.mode != PAGE_MODE_NONE) in __set_gcr3()
2514 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
2520 return __amd_iommu_flush_tlb(domain, pasid); in __set_gcr3()
2523 static int __clear_gcr3(struct protection_domain *domain, u32 pasid) in __clear_gcr3() argument
2527 if (domain->iop.mode != PAGE_MODE_NONE) in __clear_gcr3()
2530 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
2536 return __amd_iommu_flush_tlb(domain, pasid); in __clear_gcr3()
2542 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_set_gcr3() local
2546 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
2547 ret = __set_gcr3(domain, pasid, cr3); in amd_iommu_domain_set_gcr3()
2548 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
2556 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_clear_gcr3() local
2560 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
2561 ret = __clear_gcr3(domain, pasid); in amd_iommu_domain_clear_gcr3()
2562 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3136 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, in irq_remapping_alloc() argument
3163 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in irq_remapping_alloc()
3205 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3238 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3245 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_alloc()
3249 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, in irq_remapping_free() argument
3258 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_free()
3267 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_free()
3275 static int irq_remapping_activate(struct irq_domain *domain, in irq_remapping_activate() argument
3292 static void irq_remapping_deactivate(struct irq_domain *domain, in irq_remapping_deactivate() argument