Lines Matching refs:iommu
61 struct iommu *iommu) in domain_iommu_domid() argument
65 nr_dom = cap_ndoms(iommu->cap); in domain_iommu_domid()
66 i = find_first_bit(iommu->domid_bitmap, nr_dom); in domain_iommu_domid()
69 if ( iommu->domid_map[i] == d->domain_id ) in domain_iommu_domid()
72 i = find_next_bit(iommu->domid_bitmap, nr_dom, i+1); in domain_iommu_domid()
77 d->domain_id, iommu->index); in domain_iommu_domid()
85 struct iommu *iommu) in context_set_domain_id() argument
90 ASSERT(spin_is_locked(&iommu->lock)); in context_set_domain_id()
92 nr_dom = cap_ndoms(iommu->cap); in context_set_domain_id()
93 i = find_first_bit(iommu->domid_bitmap, nr_dom); in context_set_domain_id()
96 if ( iommu->domid_map[i] == d->domain_id ) in context_set_domain_id()
101 i = find_next_bit(iommu->domid_bitmap, nr_dom, i+1); in context_set_domain_id()
106 i = find_first_zero_bit(iommu->domid_bitmap, nr_dom); in context_set_domain_id()
112 iommu->domid_map[i] = d->domain_id; in context_set_domain_id()
115 set_bit(i, iommu->domid_bitmap); in context_set_domain_id()
121 struct iommu *iommu) in context_get_domain_id() argument
126 if (iommu && context) in context_get_domain_id()
128 nr_dom = cap_ndoms(iommu->cap); in context_get_domain_id()
132 if ( dom_index < nr_dom && iommu->domid_map ) in context_get_domain_id()
133 domid = iommu->domid_map[dom_index]; in context_get_domain_id()
225 static u64 bus_to_context_maddr(struct iommu *iommu, u8 bus) in bus_to_context_maddr() argument
231 ASSERT(spin_is_locked(&iommu->lock)); in bus_to_context_maddr()
232 root_entries = (struct root_entry *)map_vtd_domain_page(iommu->root_maddr); in bus_to_context_maddr()
236 drhd = iommu_to_drhd(iommu); in bus_to_context_maddr()
319 static void iommu_flush_write_buffer(struct iommu *iommu) in iommu_flush_write_buffer() argument
324 if ( !rwbf_quirk && !cap_rwbf(iommu->cap) ) in iommu_flush_write_buffer()
327 spin_lock_irqsave(&iommu->register_lock, flags); in iommu_flush_write_buffer()
328 val = dmar_readl(iommu->reg, DMAR_GSTS_REG); in iommu_flush_write_buffer()
329 dmar_writel(iommu->reg, DMAR_GCMD_REG, val | DMA_GCMD_WBF); in iommu_flush_write_buffer()
332 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in iommu_flush_write_buffer()
335 spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_flush_write_buffer()
343 struct iommu *iommu = (struct iommu *) _iommu; in flush_context_reg() local
355 if ( !cap_caching_mode(iommu->cap) ) in flush_context_reg()
379 spin_lock_irqsave(&iommu->register_lock, flags); in flush_context_reg()
380 dmar_writeq(iommu->reg, DMAR_CCMD_REG, val); in flush_context_reg()
383 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq, in flush_context_reg()
386 spin_unlock_irqrestore(&iommu->register_lock, flags); in flush_context_reg()
391 static int __must_check iommu_flush_context_global(struct iommu *iommu, in iommu_flush_context_global() argument
394 struct iommu_flush *flush = iommu_get_flush(iommu); in iommu_flush_context_global()
395 return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, in iommu_flush_context_global()
399 static int __must_check iommu_flush_context_device(struct iommu *iommu, in iommu_flush_context_device() argument
404 struct iommu_flush *flush = iommu_get_flush(iommu); in iommu_flush_context_device()
405 return flush->context(iommu, did, source_id, function_mask, in iommu_flush_context_device()
416 struct iommu *iommu = (struct iommu *) _iommu; in flush_iotlb_reg() local
417 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in flush_iotlb_reg()
429 if ( !cap_caching_mode(iommu->cap) ) in flush_iotlb_reg()
451 if ( cap_read_drain(iommu->cap) ) in flush_iotlb_reg()
453 if ( cap_write_drain(iommu->cap) ) in flush_iotlb_reg()
456 spin_lock_irqsave(&iommu->register_lock, flags); in flush_iotlb_reg()
461 dmar_writeq(iommu->reg, tlb_offset, size_order | addr); in flush_iotlb_reg()
463 dmar_writeq(iommu->reg, tlb_offset + 8, val); in flush_iotlb_reg()
466 IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq, in flush_iotlb_reg()
468 spin_unlock_irqrestore(&iommu->register_lock, flags); in flush_iotlb_reg()
478 static int __must_check iommu_flush_iotlb_global(struct iommu *iommu, in iommu_flush_iotlb_global() argument
482 struct iommu_flush *flush = iommu_get_flush(iommu); in iommu_flush_iotlb_global()
486 vtd_ops_preamble_quirk(iommu); in iommu_flush_iotlb_global()
488 status = flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, in iommu_flush_iotlb_global()
492 vtd_ops_postamble_quirk(iommu); in iommu_flush_iotlb_global()
497 static int __must_check iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did, in iommu_flush_iotlb_dsi() argument
501 struct iommu_flush *flush = iommu_get_flush(iommu); in iommu_flush_iotlb_dsi()
505 vtd_ops_preamble_quirk(iommu); in iommu_flush_iotlb_dsi()
507 status = flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, in iommu_flush_iotlb_dsi()
511 vtd_ops_postamble_quirk(iommu); in iommu_flush_iotlb_dsi()
516 static int __must_check iommu_flush_iotlb_psi(struct iommu *iommu, u16 did, in iommu_flush_iotlb_psi() argument
521 struct iommu_flush *flush = iommu_get_flush(iommu); in iommu_flush_iotlb_psi()
527 if ( !cap_pgsel_inv(iommu->cap) ) in iommu_flush_iotlb_psi()
528 return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb); in iommu_flush_iotlb_psi()
531 if ( order > cap_max_amask_val(iommu->cap) ) in iommu_flush_iotlb_psi()
532 return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb); in iommu_flush_iotlb_psi()
538 vtd_ops_preamble_quirk(iommu); in iommu_flush_iotlb_psi()
540 status = flush->iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH, in iommu_flush_iotlb_psi()
544 vtd_ops_postamble_quirk(iommu); in iommu_flush_iotlb_psi()
552 struct iommu *iommu; in iommu_flush_all() local
561 iommu = drhd->iommu; in iommu_flush_all()
562 context_rc = iommu_flush_context_global(iommu, 0); in iommu_flush_all()
563 flush_dev_iotlb = !!find_ats_dev_drhd(iommu); in iommu_flush_all()
564 iotlb_rc = iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb); in iommu_flush_all()
574 iommu_flush_write_buffer(iommu); in iommu_flush_all()
594 struct iommu *iommu; in iommu_flush_iotlb() local
605 iommu = drhd->iommu; in iommu_flush_iotlb()
607 if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) ) in iommu_flush_iotlb()
610 flush_dev_iotlb = !!find_ats_dev_drhd(iommu); in iommu_flush_iotlb()
611 iommu_domid= domain_iommu_domid(d, iommu); in iommu_flush_iotlb()
616 rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, in iommu_flush_iotlb()
619 rc = iommu_flush_iotlb_psi(iommu, iommu_domid, in iommu_flush_iotlb()
627 iommu_flush_write_buffer(iommu); in iommu_flush_iotlb()
725 static int iommu_set_root_entry(struct iommu *iommu) in iommu_set_root_entry() argument
730 spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_root_entry()
731 dmar_writeq(iommu->reg, DMAR_RTADDR_REG, iommu->root_maddr); in iommu_set_root_entry()
733 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in iommu_set_root_entry()
734 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_SRTP); in iommu_set_root_entry()
737 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in iommu_set_root_entry()
739 spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_root_entry()
748 struct iommu *iommu = drhd->iommu; in iommu_enable_translation() local
771 vtd_ops_preamble_quirk(iommu); in iommu_enable_translation()
775 iommu->reg); in iommu_enable_translation()
776 spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
777 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in iommu_enable_translation()
778 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_TE); in iommu_enable_translation()
781 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in iommu_enable_translation()
783 spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
786 vtd_ops_postamble_quirk(iommu); in iommu_enable_translation()
789 disable_pmr(iommu); in iommu_enable_translation()
792 static void iommu_disable_translation(struct iommu *iommu) in iommu_disable_translation() argument
798 vtd_ops_preamble_quirk(iommu); in iommu_disable_translation()
800 spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_translation()
801 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in iommu_disable_translation()
802 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_TE)); in iommu_disable_translation()
805 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in iommu_disable_translation()
807 spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_translation()
810 vtd_ops_postamble_quirk(iommu); in iommu_disable_translation()
869 static int iommu_page_fault_do_one(struct iommu *iommu, int type, in iommu_page_fault_do_one() argument
874 u16 seg = iommu->intel->drhd->segment; in iommu_page_fault_do_one()
885 PCI_FUNC(source_id), addr, iommu->reg); in iommu_page_fault_do_one()
893 PCI_FUNC(source_id), addr >> 48, iommu->reg); in iommu_page_fault_do_one()
901 PCI_FUNC(source_id), addr, iommu->reg); in iommu_page_fault_do_one()
910 print_vtd_entries(iommu, PCI_BUS(source_id), PCI_DEVFN2(source_id), in iommu_page_fault_do_one()
935 static void __do_iommu_page_fault(struct iommu *iommu) in __do_iommu_page_fault() argument
941 fault_status = dmar_readl(iommu->reg, DMAR_FSTS_REG); in __do_iommu_page_fault()
950 reg = cap_fault_reg_offset(iommu->cap); in __do_iommu_page_fault()
960 spin_lock_irqsave(&iommu->register_lock, flags); in __do_iommu_page_fault()
961 data = dmar_readl(iommu->reg, reg + in __do_iommu_page_fault()
965 spin_unlock_irqrestore(&iommu->register_lock, flags); in __do_iommu_page_fault()
972 data = dmar_readl(iommu->reg, reg + in __do_iommu_page_fault()
976 guest_addr = dmar_readq(iommu->reg, reg + in __do_iommu_page_fault()
980 dmar_writel(iommu->reg, reg + in __do_iommu_page_fault()
982 spin_unlock_irqrestore(&iommu->register_lock, flags); in __do_iommu_page_fault()
984 iommu_page_fault_do_one(iommu, type, fault_reason, in __do_iommu_page_fault()
987 pci_check_disable_device(iommu->intel->drhd->segment, in __do_iommu_page_fault()
991 if ( fault_index > cap_num_fault_regs(iommu->cap) ) in __do_iommu_page_fault()
996 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in __do_iommu_page_fault()
999 spin_lock_irqsave(&iommu->register_lock, flags); in __do_iommu_page_fault()
1000 dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO); in __do_iommu_page_fault()
1001 spin_unlock_irqrestore(&iommu->register_lock, flags); in __do_iommu_page_fault()
1022 __do_iommu_page_fault(drhd->iommu); in do_iommu_page_fault()
1038 struct iommu *iommu = desc->action->dev_id; in dma_msi_unmask() local
1043 spin_lock_irqsave(&iommu->register_lock, flags); in dma_msi_unmask()
1044 sts = dmar_readl(iommu->reg, DMAR_FECTL_REG); in dma_msi_unmask()
1046 dmar_writel(iommu->reg, DMAR_FECTL_REG, sts); in dma_msi_unmask()
1047 spin_unlock_irqrestore(&iommu->register_lock, flags); in dma_msi_unmask()
1048 iommu->msi.msi_attrib.host_masked = 0; in dma_msi_unmask()
1054 struct iommu *iommu = desc->action->dev_id; in dma_msi_mask() local
1058 spin_lock_irqsave(&iommu->register_lock, flags); in dma_msi_mask()
1059 sts = dmar_readl(iommu->reg, DMAR_FECTL_REG); in dma_msi_mask()
1061 dmar_writel(iommu->reg, DMAR_FECTL_REG, sts); in dma_msi_mask()
1062 spin_unlock_irqrestore(&iommu->register_lock, flags); in dma_msi_mask()
1063 iommu->msi.msi_attrib.host_masked = 1; in dma_msi_mask()
1090 struct iommu *iommu = desc->action->dev_id; in dma_msi_set_affinity() local
1104 iommu->msi.msg = msg; in dma_msi_set_affinity()
1106 spin_lock_irqsave(&iommu->register_lock, flags); in dma_msi_set_affinity()
1107 dmar_writel(iommu->reg, DMAR_FEDATA_REG, msg.data); in dma_msi_set_affinity()
1108 dmar_writeq(iommu->reg, DMAR_FEADDR_REG, msg.address); in dma_msi_set_affinity()
1109 spin_unlock_irqrestore(&iommu->register_lock, flags); in dma_msi_set_affinity()
1127 struct iommu *iommu = drhd->iommu; in iommu_set_interrupt() local
1140 ret = request_irq(irq, 0, iommu_page_fault, "dmar", iommu); in iommu_set_interrupt()
1149 iommu->msi.irq = irq; in iommu_set_interrupt()
1150 iommu->msi.msi_attrib.pos = MSI_TYPE_IOMMU; in iommu_set_interrupt()
1151 iommu->msi.msi_attrib.maskbit = 1; in iommu_set_interrupt()
1152 iommu->msi.msi_attrib.is_64 = 1; in iommu_set_interrupt()
1153 desc->msi_desc = &iommu->msi; in iommu_set_interrupt()
1160 struct iommu *iommu; in iommu_alloc() local
1171 iommu = xzalloc(struct iommu); in iommu_alloc()
1172 if ( iommu == NULL ) in iommu_alloc()
1175 iommu->msi.irq = -1; /* No irq assigned yet. */ in iommu_alloc()
1176 INIT_LIST_HEAD(&iommu->ats_devices); in iommu_alloc()
1178 iommu->intel = alloc_intel_iommu(); in iommu_alloc()
1179 if ( iommu->intel == NULL ) in iommu_alloc()
1181 xfree(iommu); in iommu_alloc()
1184 iommu->intel->drhd = drhd; in iommu_alloc()
1185 drhd->iommu = iommu; in iommu_alloc()
1187 if ( !(iommu->root_maddr = alloc_pgtable_maddr(drhd, 1)) ) in iommu_alloc()
1190 iommu->reg = ioremap(drhd->address, PAGE_SIZE); in iommu_alloc()
1191 if ( !iommu->reg ) in iommu_alloc()
1193 iommu->index = nr_iommus++; in iommu_alloc()
1195 iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG); in iommu_alloc()
1196 iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG); in iommu_alloc()
1201 drhd->address, iommu->reg); in iommu_alloc()
1203 iommu->cap, iommu->ecap); in iommu_alloc()
1205 if ( !(iommu->cap + 1) || !(iommu->ecap + 1) ) in iommu_alloc()
1208 if ( cap_fault_reg_offset(iommu->cap) + in iommu_alloc()
1209 cap_num_fault_regs(iommu->cap) * PRIMARY_FAULT_REG_LEN >= PAGE_SIZE || in iommu_alloc()
1210 ecap_iotlb_offset(iommu->ecap) >= PAGE_SIZE ) in iommu_alloc()
1218 sagaw = cap_sagaw(iommu->cap); in iommu_alloc()
1228 iommu->nr_pt_levels = agaw_to_level(agaw); in iommu_alloc()
1230 if ( !ecap_coherent(iommu->ecap) ) in iommu_alloc()
1234 nr_dom = cap_ndoms(iommu->cap); in iommu_alloc()
1235 iommu->domid_bitmap = xzalloc_array(unsigned long, BITS_TO_LONGS(nr_dom)); in iommu_alloc()
1236 if ( !iommu->domid_bitmap ) in iommu_alloc()
1243 if ( cap_caching_mode(iommu->cap) ) in iommu_alloc()
1244 set_bit(0, iommu->domid_bitmap); in iommu_alloc()
1246 iommu->domid_map = xzalloc_array(u16, nr_dom); in iommu_alloc()
1247 if ( !iommu->domid_map ) in iommu_alloc()
1250 spin_lock_init(&iommu->lock); in iommu_alloc()
1251 spin_lock_init(&iommu->register_lock); in iommu_alloc()
1258 struct iommu *iommu = drhd->iommu; in iommu_free() local
1260 if ( iommu == NULL ) in iommu_free()
1263 drhd->iommu = NULL; in iommu_free()
1265 if ( iommu->root_maddr != 0 ) in iommu_free()
1267 free_pgtable_maddr(iommu->root_maddr); in iommu_free()
1268 iommu->root_maddr = 0; in iommu_free()
1271 if ( iommu->reg ) in iommu_free()
1272 iounmap(iommu->reg); in iommu_free()
1274 xfree(iommu->domid_bitmap); in iommu_free()
1275 xfree(iommu->domid_map); in iommu_free()
1277 free_intel_iommu(iommu->intel); in iommu_free()
1278 if ( iommu->msi.irq >= 0 ) in iommu_free()
1279 destroy_irq(iommu->msi.irq); in iommu_free()
1280 xfree(iommu); in iommu_free()
1325 struct iommu *iommu, in domain_context_mapping_one() argument
1331 u16 seg = iommu->intel->drhd->segment; in domain_context_mapping_one()
1336 spin_lock(&iommu->lock); in domain_context_mapping_one()
1337 maddr = bus_to_context_maddr(iommu, bus); in domain_context_mapping_one()
1362 cdomain = context_get_domain_id(context, iommu); in domain_context_mapping_one()
1384 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1391 agaw = level_to_agaw(iommu->nr_pt_levels); in domain_context_mapping_one()
1405 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1414 agaw != level_to_agaw(iommu->nr_pt_levels); in domain_context_mapping_one()
1425 if ( ats_enabled && ecap_dev_iotlb(iommu->ecap) ) in domain_context_mapping_one()
1433 if ( context_set_domain_id(context, domain, iommu) ) in domain_context_mapping_one()
1435 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1444 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1447 rc = iommu_flush_context_device(iommu, 0, PCI_BDF2(bus, devfn), in domain_context_mapping_one()
1449 flush_dev_iotlb = !!find_ats_dev_drhd(iommu); in domain_context_mapping_one()
1450 ret = iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb); in domain_context_mapping_one()
1460 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
1466 set_bit(iommu->index, &hd->arch.iommu_bitmap); in domain_context_mapping_one()
1510 ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, in domain_context_mapping()
1513 enable_ats_device(pdev, &drhd->iommu->ats_devices); in domain_context_mapping()
1523 ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, in domain_context_mapping()
1531 ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, in domain_context_mapping()
1541 ret = domain_context_mapping_one(domain, drhd->iommu, secbus, 0, in domain_context_mapping()
1562 struct iommu *iommu, in domain_context_unmap_one() argument
1571 spin_lock(&iommu->lock); in domain_context_unmap_one()
1573 maddr = bus_to_context_maddr(iommu, bus); in domain_context_unmap_one()
1579 spin_unlock(&iommu->lock); in domain_context_unmap_one()
1588 iommu_domid= domain_iommu_domid(domain, iommu); in domain_context_unmap_one()
1591 spin_unlock(&iommu->lock); in domain_context_unmap_one()
1596 rc = iommu_flush_context_device(iommu, iommu_domid, in domain_context_unmap_one()
1600 flush_dev_iotlb = !!find_ats_dev_drhd(iommu); in domain_context_unmap_one()
1601 ret = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb); in domain_context_unmap_one()
1611 iommu_flush_write_buffer(iommu); in domain_context_unmap_one()
1617 spin_unlock(&iommu->lock); in domain_context_unmap_one()
1620 if ( !iommu->intel->drhd->segment && !rc ) in domain_context_unmap_one()
1630 struct iommu *iommu; in domain_context_unmap() local
1638 iommu = drhd->iommu; in domain_context_unmap()
1661 ret = domain_context_unmap_one(domain, iommu, bus, devfn); in domain_context_unmap()
1671 ret = domain_context_unmap_one(domain, iommu, bus, devfn); in domain_context_unmap()
1683 ret = domain_context_unmap_one(domain, iommu, tmp_bus, tmp_devfn); in domain_context_unmap()
1687 ret = domain_context_unmap_one(domain, iommu, secbus, 0); in domain_context_unmap()
1690 ret = domain_context_unmap_one(domain, iommu, tmp_bus, tmp_devfn); in domain_context_unmap()
1712 if ( drhd && drhd->iommu == iommu ) in domain_context_unmap()
1723 clear_bit(iommu->index, &dom_iommu(domain)->arch.iommu_bitmap); in domain_context_unmap()
1725 iommu_domid = domain_iommu_domid(domain, iommu); in domain_context_unmap()
1732 clear_bit(iommu_domid, iommu->domid_bitmap); in domain_context_unmap()
1733 iommu->domid_map[iommu_domid] = 0; in domain_context_unmap()
1833 struct iommu *iommu = NULL; in iommu_pte_flush() local
1843 iommu = drhd->iommu; in iommu_pte_flush()
1844 if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) ) in iommu_pte_flush()
1847 flush_dev_iotlb = !!find_ats_dev_drhd(iommu); in iommu_pte_flush()
1848 iommu_domid= domain_iommu_domid(d, iommu); in iommu_pte_flush()
1852 rc = iommu_flush_iotlb_psi(iommu, iommu_domid, in iommu_pte_flush()
1857 iommu_flush_write_buffer(iommu); in iommu_pte_flush()
1876 static int __init vtd_ept_page_compatible(struct iommu *iommu) in vtd_ept_page_compatible() argument
1878 u64 ept_cap, vtd_cap = iommu->cap; in vtd_ept_page_compatible()
2020 ret = enable_ats_device(pdev, &drhd->iommu->ats_devices); in intel_iommu_enable_device()
2056 void clear_fault_bits(struct iommu *iommu) in clear_fault_bits() argument
2061 spin_lock_irqsave(&iommu->register_lock, flags); in clear_fault_bits()
2062 val = dmar_readq(iommu->reg, cap_fault_reg_offset(iommu->cap) + 8); in clear_fault_bits()
2063 dmar_writeq(iommu->reg, cap_fault_reg_offset(iommu->cap) + 8, val); in clear_fault_bits()
2064 dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS); in clear_fault_bits()
2065 spin_unlock_irqrestore(&iommu->register_lock, flags); in clear_fault_bits()
2078 dma_msi_set_affinity(irq_to_desc(drhd->iommu->msi.irq), cpumask); in adjust_irq_affinity()
2098 struct iommu *iommu; in init_vtd_hw() local
2111 iommu = drhd->iommu; in init_vtd_hw()
2113 clear_fault_bits(iommu); in init_vtd_hw()
2115 spin_lock_irqsave(&iommu->register_lock, flags); in init_vtd_hw()
2116 sts = dmar_readl(iommu->reg, DMAR_FECTL_REG); in init_vtd_hw()
2118 dmar_writel(iommu->reg, DMAR_FECTL_REG, sts); in init_vtd_hw()
2119 spin_unlock_irqrestore(&iommu->register_lock, flags); in init_vtd_hw()
2127 iommu = drhd->iommu; in init_vtd_hw()
2132 if ( enable_qinval(iommu) != 0 ) in init_vtd_hw()
2134 flush = iommu_get_flush(iommu); in init_vtd_hw()
2163 iommu = drhd->iommu; in init_vtd_hw()
2164 if ( enable_intremap(iommu, 0) != 0 ) in init_vtd_hw()
2175 disable_intremap(drhd->iommu); in init_vtd_hw()
2185 iommu = drhd->iommu; in init_vtd_hw()
2186 ret = iommu_set_root_entry(iommu); in init_vtd_hw()
2223 struct iommu *iommu; in intel_vtd_setup() local
2251 iommu = drhd->iommu; in intel_vtd_setup()
2254 iommu->index); in intel_vtd_setup()
2255 if (cap_sps_2mb(iommu->cap)) in intel_vtd_setup()
2258 if (cap_sps_1gb(iommu->cap)) in intel_vtd_setup()
2263 if ( iommu_snoop && !ecap_snp_ctl(iommu->ecap) ) in intel_vtd_setup()
2266 if ( iommu_passthrough && !ecap_pass_thru(iommu->ecap) ) in intel_vtd_setup()
2269 if ( iommu_qinval && !ecap_queued_inval(iommu->ecap) ) in intel_vtd_setup()
2272 if ( iommu_intremap && !ecap_intr_remap(iommu->ecap) ) in intel_vtd_setup()
2280 if ( !cap_intr_post(iommu->cap) || !cpu_has_cx16 ) in intel_vtd_setup()
2283 if ( !vtd_ept_page_compatible(iommu) ) in intel_vtd_setup()
2489 struct iommu *iommu; in vtd_suspend() local
2507 iommu = drhd->iommu; in vtd_suspend()
2508 i = iommu->index; in vtd_suspend()
2511 (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG); in vtd_suspend()
2513 (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG); in vtd_suspend()
2515 (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG); in vtd_suspend()
2517 (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG); in vtd_suspend()
2523 iommu_disable_translation(iommu); in vtd_suspend()
2530 disable_qinval(iommu); in vtd_suspend()
2539 struct iommu *iommu; in vtd_crash_shutdown() local
2550 iommu = drhd->iommu; in vtd_crash_shutdown()
2551 iommu_disable_translation(iommu); in vtd_crash_shutdown()
2552 disable_intremap(drhd->iommu); in vtd_crash_shutdown()
2553 disable_qinval(drhd->iommu); in vtd_crash_shutdown()
2560 struct iommu *iommu; in vtd_resume() local
2572 iommu = drhd->iommu; in vtd_resume()
2573 i = iommu->index; in vtd_resume()
2575 spin_lock_irqsave(&iommu->register_lock, flags); in vtd_resume()
2576 dmar_writel(iommu->reg, DMAR_FECTL_REG, in vtd_resume()
2578 dmar_writel(iommu->reg, DMAR_FEDATA_REG, in vtd_resume()
2580 dmar_writel(iommu->reg, DMAR_FEADDR_REG, in vtd_resume()
2582 dmar_writel(iommu->reg, DMAR_FEUADDR_REG, in vtd_resume()
2584 spin_unlock_irqrestore(&iommu->register_lock, flags); in vtd_resume()