Lines Matching refs:iommu

164         if ( !ecap_queued_inval(drhd->iommu->ecap) ||  in iommu_supports_eim()
165 !ecap_intr_remap(drhd->iommu->ecap) || in iommu_supports_eim()
166 !ecap_eim(drhd->iommu->ecap) ) in iommu_supports_eim()
179 static void update_irte(struct iommu *iommu, struct iremap_entry *entry, in update_irte() argument
182 ASSERT(spin_is_locked(&iommu_ir_ctrl(iommu)->iremap_lock)); in update_irte()
220 static void free_remap_entry(struct iommu *iommu, int index) in free_remap_entry() argument
223 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in free_remap_entry()
233 update_irte(iommu, iremap_entry, &new_ire, false); in free_remap_entry()
235 iommu_flush_iec_index(iommu, 0, index); in free_remap_entry()
245 static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr) in alloc_remap_entry() argument
248 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in alloc_remap_entry()
283 struct iommu *iommu, int index, struct IO_xAPIC_route_entry *old_rte) in remap_entry_to_ioapic_rte() argument
287 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in remap_entry_to_ioapic_rte()
325 static int ioapic_rte_to_remap_entry(struct iommu *iommu, in ioapic_rte_to_remap_entry() argument
335 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in ioapic_rte_to_remap_entry()
344 index = alloc_remap_entry(iommu, 1); in ioapic_rte_to_remap_entry()
405 update_irte(iommu, iremap_entry, &new_ire, !init); in ioapic_rte_to_remap_entry()
407 iommu_flush_iec_index(iommu, 0, index); in ioapic_rte_to_remap_entry()
421 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic)); in io_apic_read_remap_rte() local
422 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in io_apic_read_remap_rte()
430 if ( remap_entry_to_ioapic_rte(iommu, index, &old_rte) ) in io_apic_read_remap_rte()
446 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic)); in io_apic_write_remap_rte() local
459 if ( ioapic_rte_to_remap_entry(iommu, apic, ioapic_pin, in io_apic_write_remap_rte()
537 struct iommu *iommu, struct msi_msg *msg, unsigned int index) in remap_entry_to_msi_msg() argument
542 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in remap_entry_to_msi_msg()
600 struct iommu *iommu, struct pci_dev *pdev, in msi_msg_to_remap_entry() argument
607 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in msi_msg_to_remap_entry()
620 free_remap_entry(iommu, msi_desc->remap_index + i); in msi_msg_to_remap_entry()
629 index = alloc_remap_entry(iommu, nr); in msi_msg_to_remap_entry()
694 update_irte(iommu, iremap_entry, &new_ire, msi_desc->irte_initialized); in msi_msg_to_remap_entry()
698 iommu_flush_iec_index(iommu, 0, index); in msi_msg_to_remap_entry()
714 remap_entry_to_msi_msg(drhd->iommu, msg, in msi_msg_read_remap_rte()
727 return drhd ? msi_msg_to_remap_entry(drhd->iommu, pdev, msi_desc, msg) in msi_msg_write_remap_rte()
733 struct iommu *iommu = hpet_to_iommu(msi_desc->hpet_id); in intel_setup_hpet_msi() local
734 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu); in intel_setup_hpet_msi()
742 msi_desc->remap_index = alloc_remap_entry(iommu, 1); in intel_setup_hpet_msi()
756 int enable_intremap(struct iommu *iommu, int eim) in enable_intremap() argument
763 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap); in enable_intremap()
772 ir_ctrl = iommu_ir_ctrl(iommu); in enable_intremap()
773 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in enable_intremap()
783 " Should not enable interrupt remapping\n", iommu->index); in enable_intremap()
790 " Device pass-through will be insecure\n", iommu->index); in enable_intremap()
794 drhd = iommu_to_drhd(iommu); in enable_intremap()
808 spin_lock_irqsave(&iommu->register_lock, flags); in enable_intremap()
812 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr); in enable_intremap()
815 gcmd = dmar_readl(iommu->reg, DMAR_GSTS_REG); in enable_intremap()
817 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd); in enable_intremap()
819 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in enable_intremap()
821 spin_unlock_irqrestore(&iommu->register_lock, flags); in enable_intremap()
824 iommu_flush_iec_global(iommu); in enable_intremap()
826 spin_lock_irqsave(&iommu->register_lock, flags); in enable_intremap()
829 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd); in enable_intremap()
831 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in enable_intremap()
833 spin_unlock_irqrestore(&iommu->register_lock, flags); in enable_intremap()
838 void disable_intremap(struct iommu *iommu) in disable_intremap() argument
844 if ( !ecap_intr_remap(iommu->ecap) ) in disable_intremap()
847 spin_lock_irqsave(&iommu->register_lock, flags); in disable_intremap()
848 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in disable_intremap()
852 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE)); in disable_intremap()
854 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in disable_intremap()
864 if ( !ecap_eim(iommu->ecap) ) in disable_intremap()
868 irta = dmar_readl(iommu->reg, DMAR_IRTA_REG); in disable_intremap()
872 dmar_writel(iommu->reg, DMAR_IRTA_REG, irta & ~IRTA_EIME); in disable_intremap()
873 IOMMU_WAIT_OP(iommu, DMAR_IRTA_REG, dmar_readl, in disable_intremap()
877 spin_unlock_irqrestore(&iommu->register_lock, flags); in disable_intremap()
887 struct iommu *iommu; in iommu_enable_x2apic_IR() local
902 iommu = drhd->iommu; in iommu_enable_x2apic_IR()
905 clear_fault_bits(iommu); in iommu_enable_x2apic_IR()
911 disable_intremap(iommu); in iommu_enable_x2apic_IR()
912 disable_qinval(iommu); in iommu_enable_x2apic_IR()
918 iommu = drhd->iommu; in iommu_enable_x2apic_IR()
919 if ( enable_qinval(iommu) != 0 ) in iommu_enable_x2apic_IR()
930 iommu = drhd->iommu; in iommu_enable_x2apic_IR()
931 if ( enable_intremap(iommu, 1) ) in iommu_enable_x2apic_IR()
955 disable_intremap(drhd->iommu); in iommu_disable_x2apic_IR()
958 disable_qinval(drhd->iommu); in iommu_disable_x2apic_IR()