Lines Matching refs:iommu
309 struct intel_iommu *iommu; /* the corresponding iommu */ member
394 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
396 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
399 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
401 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
404 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
408 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
410 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
463 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) in get_iommu_domain() argument
468 domains = iommu->domains[idx]; in get_iommu_domain()
475 static void set_iommu_domain(struct intel_iommu *iommu, u16 did, in set_iommu_domain() argument
481 if (!iommu->domains[idx]) { in set_iommu_domain()
483 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); in set_iommu_domain()
486 domains = iommu->domains[idx]; in set_iommu_domain()
547 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
552 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
565 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
567 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
575 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
577 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
598 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
600 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
601 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
607 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
625 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
626 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
637 struct intel_iommu *iommu; in domain_update_iommu_snooping() local
641 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_snooping()
642 if (iommu != skip) { in domain_update_iommu_snooping()
649 if (!sm_supported(iommu) && in domain_update_iommu_snooping()
650 !ecap_sc_support(iommu->ecap)) { in domain_update_iommu_snooping()
665 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
673 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
674 if (iommu != skip) { in domain_update_iommu_superpage()
676 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
679 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
768 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
771 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
776 if (sm_supported(iommu)) { in iommu_context_addr()
790 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
794 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
797 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
864 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
866 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
885 struct intel_iommu *iommu; in device_to_iommu() local
907 for_each_iommu(iommu, drhd) { in device_to_iommu()
941 iommu = NULL; in device_to_iommu()
943 if (iommu_is_dummy(iommu, dev)) in device_to_iommu()
944 iommu = NULL; in device_to_iommu()
948 return iommu; in device_to_iommu()
958 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
964 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
965 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
968 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
972 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
978 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
979 if (!iommu->root_entry) { in free_context_table()
983 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
987 if (!sm_supported(iommu)) in free_context_table()
990 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
995 free_pgtable_page(iommu->root_entry); in free_context_table()
996 iommu->root_entry = NULL; in free_context_table()
998 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
1002 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u8 devfn) in pgtable_walk() argument
1009 info = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); in pgtable_walk()
1042 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, in dmar_fault_dump_ptes() argument
1053 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
1056 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
1062 if (sm_supported(iommu)) in dmar_fault_dump_ptes()
1069 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
1079 if (!sm_supported(iommu)) in dmar_fault_dump_ptes()
1108 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn); in dmar_fault_dump_ptes()
1417 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1422 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1425 iommu->name); in iommu_alloc_root_entry()
1429 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1431 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
1432 iommu->root_entry = root; in iommu_alloc_root_entry()
1433 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
1438 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1444 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1445 if (sm_supported(iommu)) in iommu_set_root_entry()
1448 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1449 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1451 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1454 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1457 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1459 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1460 if (sm_supported(iommu)) in iommu_set_root_entry()
1461 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1462 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1465 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1470 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1473 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1474 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1477 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1480 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1484 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1507 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1508 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1511 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1514 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1518 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1521 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1547 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1550 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1553 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1556 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1557 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1560 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1563 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1575 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1582 if (!iommu->qi) in iommu_support_dev_iotlb()
1586 if (info->iommu == iommu && info->bus == bus && in iommu_support_dev_iotlb()
1639 if (!ecap_dit(info->iommu->ecap)) in iommu_enable_dev_iotlb()
1709 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in __iommu_flush_dev_iotlb()
1734 static void domain_flush_piotlb(struct intel_iommu *iommu, in domain_flush_piotlb() argument
1738 u16 did = domain->iommu_did[iommu->seq_id]; in domain_flush_piotlb()
1741 qi_flush_piotlb(iommu, did, domain->default_pasid, in domain_flush_piotlb()
1745 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); in domain_flush_piotlb()
1748 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1755 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1763 domain_flush_piotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1770 if (!cap_pgsel_inv(iommu->cap) || in iommu_flush_iotlb_psi()
1771 mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1772 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1775 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1783 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1788 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1796 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) in __mapping_notify_one()
1797 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1799 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1808 struct intel_iommu *iommu = g_iommus[idx]; in intel_flush_iotlb_all() local
1809 u16 did = dmar_domain->iommu_did[iommu->seq_id]; in intel_flush_iotlb_all()
1812 domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0); in intel_flush_iotlb_all()
1814 iommu->flush.flush_iotlb(iommu, did, 0, 0, in intel_flush_iotlb_all()
1817 if (!cap_caching_mode(iommu->cap)) in intel_flush_iotlb_all()
1818 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), in intel_flush_iotlb_all()
1823 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1828 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1831 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1832 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1834 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1837 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1840 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1843 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1848 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1849 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1850 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1853 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1856 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1859 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1864 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1865 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1868 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1869 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1870 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1873 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1876 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1879 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1884 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1886 iommu->name, ndomains); in iommu_init_domains()
1889 spin_lock_init(&iommu->lock); in iommu_init_domains()
1891 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1892 if (!iommu->domain_ids) in iommu_init_domains()
1896 iommu->domains = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1898 if (iommu->domains) { in iommu_init_domains()
1900 iommu->domains[0] = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1903 if (!iommu->domains || !iommu->domains[0]) { in iommu_init_domains()
1905 iommu->name); in iommu_init_domains()
1906 kfree(iommu->domain_ids); in iommu_init_domains()
1907 kfree(iommu->domains); in iommu_init_domains()
1908 iommu->domain_ids = NULL; in iommu_init_domains()
1909 iommu->domains = NULL; in iommu_init_domains()
1919 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1928 if (sm_supported(iommu)) in iommu_init_domains()
1929 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1934 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1939 if (!iommu->domains || !iommu->domain_ids) in disable_dmar_iommu()
1944 if (info->iommu != iommu) in disable_dmar_iommu()
1954 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1955 iommu_disable_translation(iommu); in disable_dmar_iommu()
1958 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1960 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1961 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; in free_dmar_iommu()
1965 kfree(iommu->domains[i]); in free_dmar_iommu()
1966 kfree(iommu->domains); in free_dmar_iommu()
1967 kfree(iommu->domain_ids); in free_dmar_iommu()
1968 iommu->domains = NULL; in free_dmar_iommu()
1969 iommu->domain_ids = NULL; in free_dmar_iommu()
1972 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1975 free_context_table(iommu); in free_dmar_iommu()
1978 if (pasid_supported(iommu)) { in free_dmar_iommu()
1979 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1980 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1982 if (vccap_pasid(iommu->vccap)) in free_dmar_iommu()
1983 ioasid_unregister_allocator(&iommu->pasid_allocator); in free_dmar_iommu()
2027 struct intel_iommu *iommu) in domain_attach_iommu() argument
2033 assert_spin_locked(&iommu->lock); in domain_attach_iommu()
2035 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
2036 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
2037 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
2038 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
2041 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
2042 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
2046 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
2047 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
2049 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
2050 domain->nid = iommu->node; in domain_attach_iommu()
2059 struct intel_iommu *iommu) in domain_detach_iommu() argument
2064 assert_spin_locked(&iommu->lock); in domain_detach_iommu()
2066 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
2067 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
2068 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
2069 clear_bit(num, iommu->domain_ids); in domain_detach_iommu()
2070 set_iommu_domain(iommu, num, NULL); in domain_detach_iommu()
2073 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
2158 struct intel_iommu *iommu, in domain_context_mapping_one() argument
2162 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
2180 spin_lock(&iommu->lock); in domain_context_mapping_one()
2183 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
2203 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
2204 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
2208 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
2215 if (sm_supported(iommu)) { in domain_context_mapping_one()
2232 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2248 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2255 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2269 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2277 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
2286 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2287 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2291 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2293 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2300 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2308 struct intel_iommu *iommu; member
2317 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2327 struct intel_iommu *iommu; in domain_context_mapping() local
2330 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2331 if (!iommu) in domain_context_mapping()
2337 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2341 data.iommu = iommu; in domain_context_mapping()
2351 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
2353 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
2358 struct intel_iommu *iommu; in domain_context_mapped() local
2361 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
2362 if (!iommu) in domain_context_mapped()
2366 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
2369 domain_context_mapped_cb, iommu); in domain_context_mapped()
2543 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one() local
2548 if (!iommu) in domain_context_clear_one()
2551 spin_lock_irqsave(&iommu->lock, flags); in domain_context_clear_one()
2552 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2554 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2558 if (sm_supported(iommu)) { in domain_context_clear_one()
2562 did_old = info->domain->iommu_did[iommu->seq_id]; in domain_context_clear_one()
2568 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2569 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2570 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2576 if (sm_supported(iommu)) in domain_context_clear_one()
2577 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2579 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2612 if (unlikely(!dev || !dev->iommu)) in find_domain()
2639 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2652 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2670 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2671 domain->iommu_did[iommu->seq_id], in domain_setup_first_level()
2681 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, in dmar_insert_one_dev_info() argument
2698 info->segment = iommu->segment; in dmar_insert_one_dev_info()
2712 info->iommu = iommu; in dmar_insert_one_dev_info()
2720 if (ecap_dev_iotlb_support(iommu->ecap) && in dmar_insert_one_dev_info()
2725 if (sm_supported(iommu)) { in dmar_insert_one_dev_info()
2726 if (pasid_supported(iommu)) { in dmar_insert_one_dev_info()
2732 if (info->ats_supported && ecap_prs(iommu->ecap) && in dmar_insert_one_dev_info()
2759 spin_lock(&iommu->lock); in dmar_insert_one_dev_info()
2760 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2761 spin_unlock(&iommu->lock); in dmar_insert_one_dev_info()
2776 if (dev && dev_is_pci(dev) && sm_supported(iommu)) { in dmar_insert_one_dev_info()
2785 spin_lock_irqsave(&iommu->lock, flags); in dmar_insert_one_dev_info()
2787 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2790 ret = domain_setup_first_level(iommu, domain, dev, in dmar_insert_one_dev_info()
2793 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2795 spin_unlock_irqrestore(&iommu->lock, flags); in dmar_insert_one_dev_info()
2888 struct intel_iommu *iommu; in domain_add_dev_info() local
2891 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2892 if (!iommu) in domain_add_dev_info()
2895 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
3009 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
3017 if (!iommu->qi) { in intel_iommu_init_qi()
3021 dmar_fault(-1, iommu); in intel_iommu_init_qi()
3026 dmar_disable_qi(iommu); in intel_iommu_init_qi()
3029 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
3033 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
3034 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
3036 iommu->name); in intel_iommu_init_qi()
3038 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
3039 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
3040 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
3044 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
3066 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
3096 new_ce = alloc_pgtable_page(iommu->node); in copy_context_table()
3110 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
3111 set_bit(did, iommu->domain_ids); in copy_context_table()
3137 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
3146 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
3157 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
3159 new_ext = !!ecap_ecs(iommu->ecap); in copy_translation_tables()
3186 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
3190 iommu->name, bus); in copy_translation_tables()
3195 spin_lock_irqsave(&iommu->lock, flags); in copy_translation_tables()
3204 iommu->root_entry[bus].lo = val; in copy_translation_tables()
3211 iommu->root_entry[bus].hi = val; in copy_translation_tables()
3214 spin_unlock_irqrestore(&iommu->lock, flags); in copy_translation_tables()
3218 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
3231 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_alloc() local
3234 if (!iommu) in intel_vcmd_ioasid_alloc()
3244 if (vcmd_alloc_pasid(iommu, &ioasid)) in intel_vcmd_ioasid_alloc()
3252 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_free() local
3254 if (!iommu) in intel_vcmd_ioasid_free()
3264 vcmd_free_pasid(iommu, ioasid); in intel_vcmd_ioasid_free()
3267 static void register_pasid_allocator(struct intel_iommu *iommu) in register_pasid_allocator() argument
3273 if (!cap_caching_mode(iommu->cap)) in register_pasid_allocator()
3276 if (!sm_supported(iommu)) { in register_pasid_allocator()
3288 if (!vccap_pasid(iommu->vccap)) in register_pasid_allocator()
3292 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc; in register_pasid_allocator()
3293 iommu->pasid_allocator.free = intel_vcmd_ioasid_free; in register_pasid_allocator()
3294 iommu->pasid_allocator.pdata = (void *)iommu; in register_pasid_allocator()
3295 if (ioasid_register_allocator(&iommu->pasid_allocator)) { in register_pasid_allocator()
3310 struct intel_iommu *iommu; in init_dmars() local
3347 for_each_iommu(iommu, drhd) { in init_dmars()
3349 iommu_disable_translation(iommu); in init_dmars()
3358 if (pasid_supported(iommu)) { in init_dmars()
3359 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
3365 g_iommus[iommu->seq_id] = iommu; in init_dmars()
3367 intel_iommu_init_qi(iommu); in init_dmars()
3369 ret = iommu_init_domains(iommu); in init_dmars()
3373 init_translation_status(iommu); in init_dmars()
3375 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
3376 iommu_disable_translation(iommu); in init_dmars()
3377 clear_translation_pre_enabled(iommu); in init_dmars()
3379 iommu->name); in init_dmars()
3387 ret = iommu_alloc_root_entry(iommu); in init_dmars()
3391 if (translation_pre_enabled(iommu)) { in init_dmars()
3394 ret = copy_translation_tables(iommu); in init_dmars()
3406 iommu->name); in init_dmars()
3407 iommu_disable_translation(iommu); in init_dmars()
3408 clear_translation_pre_enabled(iommu); in init_dmars()
3411 iommu->name); in init_dmars()
3415 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
3417 intel_svm_check(iommu); in init_dmars()
3425 for_each_active_iommu(iommu, drhd) { in init_dmars()
3426 iommu_flush_write_buffer(iommu); in init_dmars()
3428 register_pasid_allocator(iommu); in init_dmars()
3430 iommu_set_root_entry(iommu); in init_dmars()
3453 for_each_iommu(iommu, drhd) { in init_dmars()
3460 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3464 iommu_flush_write_buffer(iommu); in init_dmars()
3467 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
3473 ret = intel_svm_enable_prq(iommu); in init_dmars()
3479 ret = dmar_set_interrupt(iommu); in init_dmars()
3487 for_each_active_iommu(iommu, drhd) { in init_dmars()
3488 disable_dmar_iommu(iommu); in init_dmars()
3489 free_dmar_iommu(iommu); in init_dmars()
3602 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
3604 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
3605 if (iommu->qi) in init_iommu_hw()
3606 dmar_reenable_qi(iommu); in init_iommu_hw()
3608 for_each_iommu(iommu, drhd) { in init_iommu_hw()
3615 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3619 iommu_flush_write_buffer(iommu); in init_iommu_hw()
3620 iommu_set_root_entry(iommu); in init_iommu_hw()
3621 iommu_enable_translation(iommu); in init_iommu_hw()
3622 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3631 struct intel_iommu *iommu; in iommu_flush_all() local
3633 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3634 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3636 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3644 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3647 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3648 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), in iommu_suspend()
3650 if (!iommu->iommu_state) in iommu_suspend()
3656 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3657 iommu_disable_translation(iommu); in iommu_suspend()
3659 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3661 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3662 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3663 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3664 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3665 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3666 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3667 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3668 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3670 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3675 for_each_active_iommu(iommu, drhd) in iommu_suspend()
3676 kfree(iommu->iommu_state); in iommu_suspend()
3684 struct intel_iommu *iommu = NULL; in iommu_resume() local
3695 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3697 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3699 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3700 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3701 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3702 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3703 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3704 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3705 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3706 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3708 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3711 for_each_active_iommu(iommu, drhd) in iommu_resume()
3712 kfree(iommu->iommu_state); in iommu_resume()
3937 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
3939 if (g_iommus[iommu->seq_id]) in intel_iommu_add()
3942 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu); in intel_iommu_add()
3946 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
3948 iommu->name); in intel_iommu_add()
3951 if (!ecap_sc_support(iommu->ecap) && in intel_iommu_add()
3952 domain_update_iommu_snooping(iommu)) { in intel_iommu_add()
3954 iommu->name); in intel_iommu_add()
3957 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
3958 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3960 iommu->name); in intel_iommu_add()
3967 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
3968 iommu_disable_translation(iommu); in intel_iommu_add()
3970 g_iommus[iommu->seq_id] = iommu; in intel_iommu_add()
3971 ret = iommu_init_domains(iommu); in intel_iommu_add()
3973 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
3977 intel_svm_check(iommu); in intel_iommu_add()
3984 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3988 intel_iommu_init_qi(iommu); in intel_iommu_add()
3989 iommu_flush_write_buffer(iommu); in intel_iommu_add()
3992 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
3993 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
3998 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
4002 iommu_set_root_entry(iommu); in intel_iommu_add()
4003 iommu_enable_translation(iommu); in intel_iommu_add()
4005 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4009 disable_dmar_iommu(iommu); in intel_iommu_add()
4011 free_dmar_iommu(iommu); in intel_iommu_add()
4018 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
4022 if (iommu == NULL) in dmar_iommu_hotplug()
4028 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
4029 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
4194 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
4202 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
4203 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
4222 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
4225 for_each_iommu(iommu, drhd) in intel_disable_iommus()
4226 iommu_disable_translation(iommu); in intel_disable_iommus()
4232 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
4240 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
4241 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
4253 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
4259 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in version_show() local
4260 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
4269 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in address_show() local
4270 return sprintf(buf, "%llx\n", iommu->reg_phys); in address_show()
4277 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in cap_show() local
4278 return sprintf(buf, "%llx\n", iommu->cap); in cap_show()
4285 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in ecap_show() local
4286 return sprintf(buf, "%llx\n", iommu->ecap); in ecap_show()
4293 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_supported_show() local
4294 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
4301 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_used_show() local
4302 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, in domains_used_show()
4303 cap_ndoms(iommu->cap))); in domains_used_show()
4363 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
4367 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
4406 struct intel_iommu *iommu; in intel_iommu_init() local
4457 for_each_iommu(iommu, drhd) in intel_iommu_init()
4458 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
4496 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
4504 if (cap_caching_mode(iommu->cap)) { in intel_iommu_init()
4508 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
4510 "%s", iommu->name); in intel_iommu_init()
4511 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
4524 for_each_iommu(iommu, drhd) { in intel_iommu_init()
4525 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
4526 iommu_enable_translation(iommu); in intel_iommu_init()
4528 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
4561 if (!info->iommu || !info->dev || !dev_is_pci(info->dev)) in domain_context_clear()
4571 struct intel_iommu *iommu; in __dmar_remove_one_dev_info() local
4579 iommu = info->iommu; in __dmar_remove_one_dev_info()
4583 if (dev_is_pci(info->dev) && sm_supported(iommu)) in __dmar_remove_one_dev_info()
4584 intel_pasid_tear_down_entry(iommu, info->dev, in __dmar_remove_one_dev_info()
4594 spin_lock_irqsave(&iommu->lock, flags); in __dmar_remove_one_dev_info()
4595 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
4596 spin_unlock_irqrestore(&iommu->lock, flags); in __dmar_remove_one_dev_info()
4754 struct intel_iommu *iommu; in aux_domain_add_dev() local
4756 iommu = device_to_iommu(dev, NULL, NULL); in aux_domain_add_dev()
4757 if (!iommu) in aux_domain_add_dev()
4792 spin_lock(&iommu->lock); in aux_domain_add_dev()
4793 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
4799 ret = domain_setup_first_level(iommu, domain, dev, in aux_domain_add_dev()
4802 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
4807 spin_unlock(&iommu->lock); in aux_domain_add_dev()
4814 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
4816 spin_unlock(&iommu->lock); in aux_domain_add_dev()
4830 struct intel_iommu *iommu; in aux_domain_remove_dev() local
4838 iommu = info->iommu; in aux_domain_remove_dev()
4841 spin_lock(&iommu->lock); in aux_domain_remove_dev()
4842 intel_pasid_tear_down_entry(iommu, dev, in aux_domain_remove_dev()
4844 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
4845 spin_unlock(&iommu->lock); in aux_domain_remove_dev()
4858 struct intel_iommu *iommu; in prepare_domain_attach_device() local
4861 iommu = device_to_iommu(dev, NULL, NULL); in prepare_domain_attach_device()
4862 if (!iommu) in prepare_domain_attach_device()
4866 !ecap_nest(iommu->ecap)) { in prepare_domain_attach_device()
4868 iommu->name); in prepare_domain_attach_device()
4873 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
4874 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
4875 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
4888 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
5012 struct intel_iommu *iommu; in intel_iommu_sva_invalidate() local
5026 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_sva_invalidate()
5027 if (!iommu) in intel_iommu_sva_invalidate()
5034 spin_lock(&iommu->lock); in intel_iommu_sva_invalidate()
5040 did = dmar_domain->iommu_did[iommu->seq_id]; in intel_iommu_sva_invalidate()
5087 qi_flush_piotlb(iommu, did, pasid, in intel_iommu_sva_invalidate()
5117 qi_flush_dev_iotlb_pasid(iommu, sid, in intel_iommu_sva_invalidate()
5131 spin_unlock(&iommu->lock); in intel_iommu_sva_invalidate()
5285 struct intel_iommu *iommu; in intel_iommu_probe_device() local
5287 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_probe_device()
5288 if (!iommu) in intel_iommu_probe_device()
5291 if (translation_pre_enabled(iommu)) in intel_iommu_probe_device()
5294 return &iommu->iommu; in intel_iommu_probe_device()
5299 struct intel_iommu *iommu; in intel_iommu_release_device() local
5301 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_release_device()
5302 if (!iommu) in intel_iommu_release_device()
5373 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) in intel_iommu_enable_pasid() argument
5387 spin_lock(&iommu->lock); in intel_iommu_enable_pasid()
5394 context = iommu_context_addr(iommu, info->bus, info->devfn, 0); in intel_iommu_enable_pasid()
5404 iommu->flush.flush_context(iommu, in intel_iommu_enable_pasid()
5405 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5418 spin_unlock(&iommu->lock); in intel_iommu_enable_pasid()
5434 struct intel_iommu *iommu; in intel_iommu_enable_auxd() local
5438 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_enable_auxd()
5439 if (!iommu || dmar_disabled) in intel_iommu_enable_auxd()
5442 if (!sm_supported(iommu) || !pasid_supported(iommu)) in intel_iommu_enable_auxd()
5445 ret = intel_iommu_enable_pasid(iommu, dev); in intel_iommu_enable_auxd()
5474 struct intel_iommu *iommu; in intel_iommu_enable_sva() local
5480 iommu = info->iommu; in intel_iommu_enable_sva()
5481 if (!iommu) in intel_iommu_enable_sva()
5484 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
5487 if (intel_iommu_enable_pasid(iommu, dev)) in intel_iommu_enable_sva()
5493 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_sva()
5503 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_sva() local
5508 ret = iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_sva()
5626 struct intel_iommu *iommu; in intel_iommu_iotlb_sync_map() local
5630 iommu = g_iommus[iommu_id]; in intel_iommu_iotlb_sync_map()
5631 __mapping_notify_one(iommu, dmar_domain, pfn, pages); in intel_iommu_iotlb_sync_map()