Lines Matching refs:iommu
43 static int iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask) in iommu_has_ht_flag() argument
45 return iommu->ht_flags & mask; in iommu_has_ht_flag()
48 static int __init map_iommu_mmio_region(struct amd_iommu *iommu) in map_iommu_mmio_region() argument
50 iommu->mmio_base = ioremap(iommu->mmio_base_phys, in map_iommu_mmio_region()
52 if ( !iommu->mmio_base ) in map_iommu_mmio_region()
55 memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH); in map_iommu_mmio_region()
60 static void __init unmap_iommu_mmio_region(struct amd_iommu *iommu) in unmap_iommu_mmio_region() argument
62 if ( iommu->mmio_base ) in unmap_iommu_mmio_region()
64 iounmap(iommu->mmio_base); in unmap_iommu_mmio_region()
65 iommu->mmio_base = NULL; in unmap_iommu_mmio_region()
69 static void set_iommu_ht_flags(struct amd_iommu *iommu) in set_iommu_ht_flags() argument
72 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_ht_flags()
75 if ( iommu_has_cap(iommu, PCI_CAP_HT_TUNNEL_SHIFT) ) in set_iommu_ht_flags()
76 iommu_has_ht_flag(iommu, ACPI_IVHD_TT_ENABLE) ? in set_iommu_ht_flags()
80 iommu_has_ht_flag(iommu, ACPI_IVHD_RES_PASS_PW) ? in set_iommu_ht_flags()
84 iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC) ? in set_iommu_ht_flags()
88 iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW) ? in set_iommu_ht_flags()
95 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_ht_flags()
98 static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu) in register_iommu_dev_table_in_mmio_space() argument
103 ASSERT( iommu->dev_table.buffer ); in register_iommu_dev_table_in_mmio_space()
105 addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer); in register_iommu_dev_table_in_mmio_space()
111 set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1, in register_iommu_dev_table_in_mmio_space()
114 writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET); in register_iommu_dev_table_in_mmio_space()
118 writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET); in register_iommu_dev_table_in_mmio_space()
121 static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu) in register_iommu_cmd_buffer_in_mmio_space() argument
128 ASSERT( iommu->cmd_buffer.buffer ); in register_iommu_cmd_buffer_in_mmio_space()
130 addr_64 = virt_to_maddr(iommu->cmd_buffer.buffer); in register_iommu_cmd_buffer_in_mmio_space()
136 writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET); in register_iommu_cmd_buffer_in_mmio_space()
138 power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) + in register_iommu_cmd_buffer_in_mmio_space()
146 writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET); in register_iommu_cmd_buffer_in_mmio_space()
149 static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu) in register_iommu_event_log_in_mmio_space() argument
156 ASSERT( iommu->event_log.buffer ); in register_iommu_event_log_in_mmio_space()
158 addr_64 = virt_to_maddr(iommu->event_log.buffer); in register_iommu_event_log_in_mmio_space()
164 writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET); in register_iommu_event_log_in_mmio_space()
166 power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) + in register_iommu_event_log_in_mmio_space()
174 writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET); in register_iommu_event_log_in_mmio_space()
177 static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu) in register_iommu_ppr_log_in_mmio_space() argument
184 ASSERT ( iommu->ppr_log.buffer ); in register_iommu_ppr_log_in_mmio_space()
186 addr_64 = virt_to_maddr(iommu->ppr_log.buffer); in register_iommu_ppr_log_in_mmio_space()
192 writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_LOW_OFFSET); in register_iommu_ppr_log_in_mmio_space()
194 power_of2_entries = get_order_from_bytes(iommu->ppr_log.alloc_size) + in register_iommu_ppr_log_in_mmio_space()
202 writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_HIGH_OFFSET); in register_iommu_ppr_log_in_mmio_space()
206 static void set_iommu_translation_control(struct amd_iommu *iommu, in set_iommu_translation_control() argument
211 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_translation_control()
217 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_translation_control()
220 static void set_iommu_guest_translation_control(struct amd_iommu *iommu, in set_iommu_guest_translation_control() argument
225 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_guest_translation_control()
231 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_guest_translation_control()
237 static void set_iommu_command_buffer_control(struct amd_iommu *iommu, in set_iommu_command_buffer_control() argument
242 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_command_buffer_control()
247 writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET); in set_iommu_command_buffer_control()
248 writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET); in set_iommu_command_buffer_control()
255 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_command_buffer_control()
258 static void register_iommu_exclusion_range(struct amd_iommu *iommu) in register_iommu_exclusion_range() argument
263 addr_lo = iommu->exclusion_limit; in register_iommu_exclusion_range()
264 addr_hi = iommu->exclusion_limit >> 32; in register_iommu_exclusion_range()
269 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET); in register_iommu_exclusion_range()
274 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET); in register_iommu_exclusion_range()
276 addr_lo = iommu->exclusion_base & DMA_32BIT_MASK; in register_iommu_exclusion_range()
277 addr_hi = iommu->exclusion_base >> 32; in register_iommu_exclusion_range()
281 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET); in register_iommu_exclusion_range()
286 set_field_in_reg_u32(iommu->exclusion_allow_all, entry, in register_iommu_exclusion_range()
290 set_field_in_reg_u32(iommu->exclusion_enable, entry, in register_iommu_exclusion_range()
293 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET); in register_iommu_exclusion_range()
296 static void set_iommu_event_log_control(struct amd_iommu *iommu, in set_iommu_event_log_control() argument
301 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_event_log_control()
306 writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET); in set_iommu_event_log_control()
307 writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET); in set_iommu_event_log_control()
320 writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_event_log_control()
323 static void set_iommu_ppr_log_control(struct amd_iommu *iommu, in set_iommu_ppr_log_control() argument
328 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_ppr_log_control()
333 writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_HEAD_OFFSET); in set_iommu_ppr_log_control()
334 writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_TAIL_OFFSET); in set_iommu_ppr_log_control()
347 writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in set_iommu_ppr_log_control()
353 static int iommu_read_log(struct amd_iommu *iommu, in iommu_read_log() argument
360 BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log))); in iommu_read_log()
365 tail_offest = ( log == &iommu->event_log ) ? in iommu_read_log()
369 head_offset = ( log == &iommu->event_log ) ? in iommu_read_log()
373 tail = readl(iommu->mmio_base + tail_offest); in iommu_read_log()
381 parse_func(iommu, entry); in iommu_read_log()
389 writel(head, iommu->mmio_base + head_offset); in iommu_read_log()
398 static void iommu_reset_log(struct amd_iommu *iommu, in iommu_reset_log() argument
400 void (*ctrl_func)(struct amd_iommu *iommu, int)) in iommu_reset_log() argument
406 BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log))); in iommu_reset_log()
408 run_bit = ( log == &iommu->event_log ) ? in iommu_reset_log()
414 entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_reset_log()
426 ctrl_func(iommu, IOMMU_CONTROL_DISABLED); in iommu_reset_log()
429 writel(log == &iommu->event_log ? IOMMU_STATUS_EVENT_OVERFLOW_MASK in iommu_reset_log()
431 iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_reset_log()
436 ctrl_func(iommu, IOMMU_CONTROL_ENABLED); in iommu_reset_log()
439 static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag) in amd_iommu_msi_enable() argument
441 __msi_set_enable(iommu->seg, PCI_BUS(iommu->bdf), PCI_SLOT(iommu->bdf), in amd_iommu_msi_enable()
442 PCI_FUNC(iommu->bdf), iommu->msi.msi_attrib.pos, flag); in amd_iommu_msi_enable()
448 struct amd_iommu *iommu = desc->action->dev_id; in iommu_msi_unmask() local
450 spin_lock_irqsave(&iommu->lock, flags); in iommu_msi_unmask()
451 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); in iommu_msi_unmask()
452 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_msi_unmask()
453 iommu->msi.msi_attrib.host_masked = 0; in iommu_msi_unmask()
459 struct amd_iommu *iommu = desc->action->dev_id; in iommu_msi_mask() local
463 spin_lock_irqsave(&iommu->lock, flags); in iommu_msi_mask()
464 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED); in iommu_msi_mask()
465 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_msi_mask()
466 iommu->msi.msi_attrib.host_masked = 1; in iommu_msi_mask()
524 static void parse_event_log_entry(struct amd_iommu *iommu, u32 entry[]) in parse_event_log_entry() argument
582 if ( get_dma_requestor_id(iommu->seg, bdf) == device_id ) in parse_event_log_entry()
583 pci_check_disable_device(iommu->seg, PCI_BUS(bdf), in parse_event_log_entry()
597 static void iommu_check_event_log(struct amd_iommu *iommu) in iommu_check_event_log() argument
604 iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_check_event_log()
606 iommu_read_log(iommu, &iommu->event_log, in iommu_check_event_log()
609 spin_lock_irqsave(&iommu->lock, flags); in iommu_check_event_log()
612 entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_check_event_log()
614 iommu_reset_log(iommu, &iommu->event_log, set_iommu_event_log_control); in iommu_check_event_log()
617 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in iommu_check_event_log()
621 writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in iommu_check_event_log()
634 entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_check_event_log()
638 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_check_event_log()
641 void parse_ppr_log_entry(struct amd_iommu *iommu, u32 entry[]) in parse_ppr_log_entry() argument
677 pdev = pci_get_real_pdev(iommu->seg, bus, devfn); in parse_ppr_log_entry()
686 static void iommu_check_ppr_log(struct amd_iommu *iommu) in iommu_check_ppr_log() argument
693 iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_check_ppr_log()
695 iommu_read_log(iommu, &iommu->ppr_log, in iommu_check_ppr_log()
698 spin_lock_irqsave(&iommu->lock, flags); in iommu_check_ppr_log()
701 entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_check_ppr_log()
703 iommu_reset_log(iommu, &iommu->ppr_log, set_iommu_ppr_log_control); in iommu_check_ppr_log()
706 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in iommu_check_ppr_log()
710 writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in iommu_check_ppr_log()
723 entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in iommu_check_ppr_log()
727 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_check_ppr_log()
732 struct amd_iommu *iommu; in do_amd_iommu_irq() local
745 for_each_amd_iommu ( iommu ) { in do_amd_iommu_irq()
746 iommu_check_event_log(iommu); in do_amd_iommu_irq()
748 if ( iommu->ppr_log.buffer != NULL ) in do_amd_iommu_irq()
749 iommu_check_ppr_log(iommu); in do_amd_iommu_irq()
758 struct amd_iommu *iommu = dev_id; in iommu_interrupt_handler() local
760 spin_lock_irqsave(&iommu->lock, flags); in iommu_interrupt_handler()
766 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in iommu_interrupt_handler()
769 writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); in iommu_interrupt_handler()
771 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_interrupt_handler()
777 static bool_t __init set_iommu_interrupt_handler(struct amd_iommu *iommu) in set_iommu_interrupt_handler() argument
791 iommu->msi.dev = pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf), in set_iommu_interrupt_handler()
792 PCI_DEVFN2(iommu->bdf)); in set_iommu_interrupt_handler()
794 if ( !iommu->msi.dev ) in set_iommu_interrupt_handler()
797 iommu->seg, PCI_BUS(iommu->bdf), in set_iommu_interrupt_handler()
798 PCI_SLOT(iommu->bdf), PCI_FUNC(iommu->bdf)); in set_iommu_interrupt_handler()
801 control = pci_conf_read16(iommu->seg, PCI_BUS(iommu->bdf), in set_iommu_interrupt_handler()
802 PCI_SLOT(iommu->bdf), PCI_FUNC(iommu->bdf), in set_iommu_interrupt_handler()
803 iommu->msi.msi_attrib.pos + PCI_MSI_FLAGS); in set_iommu_interrupt_handler()
804 iommu->msi.msi.nvec = 1; in set_iommu_interrupt_handler()
807 iommu->msi.msi_attrib.maskbit = 1; in set_iommu_interrupt_handler()
808 iommu->msi.msi.mpos = msi_mask_bits_reg(iommu->msi.msi_attrib.pos, in set_iommu_interrupt_handler()
814 ret = __setup_msi_irq(irq_to_desc(irq), &iommu->msi, handler); in set_iommu_interrupt_handler()
816 ret = request_irq(irq, 0, iommu_interrupt_handler, "amd_iommu", iommu); in set_iommu_interrupt_handler()
824 iommu->msi.irq = irq; in set_iommu_interrupt_handler()
835 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) in amd_iommu_erratum_746_workaround() argument
838 u8 bus = PCI_BUS(iommu->bdf); in amd_iommu_erratum_746_workaround()
839 u8 dev = PCI_SLOT(iommu->bdf); in amd_iommu_erratum_746_workaround()
840 u8 func = PCI_FUNC(iommu->bdf); in amd_iommu_erratum_746_workaround()
847 pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
848 value = pci_conf_read32(iommu->seg, bus, dev, func, 0xf4); in amd_iommu_erratum_746_workaround()
854 pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90 | (1 << 8)); in amd_iommu_erratum_746_workaround()
856 pci_conf_write32(iommu->seg, bus, dev, func, 0xf4, value | (1 << 2)); in amd_iommu_erratum_746_workaround()
859 iommu->seg, bus, dev, func); in amd_iommu_erratum_746_workaround()
862 pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
865 static void enable_iommu(struct amd_iommu *iommu) in enable_iommu() argument
870 spin_lock_irqsave(&iommu->lock, flags); in enable_iommu()
872 if ( iommu->enabled ) in enable_iommu()
874 spin_unlock_irqrestore(&iommu->lock, flags); in enable_iommu()
878 amd_iommu_erratum_746_workaround(iommu); in enable_iommu()
880 register_iommu_dev_table_in_mmio_space(iommu); in enable_iommu()
881 register_iommu_cmd_buffer_in_mmio_space(iommu); in enable_iommu()
882 register_iommu_event_log_in_mmio_space(iommu); in enable_iommu()
883 register_iommu_exclusion_range(iommu); in enable_iommu()
885 if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) in enable_iommu()
886 register_iommu_ppr_log_in_mmio_space(iommu); in enable_iommu()
888 desc = irq_to_desc(iommu->msi.irq); in enable_iommu()
893 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); in enable_iommu()
895 set_iommu_ht_flags(iommu); in enable_iommu()
896 set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED); in enable_iommu()
897 set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED); in enable_iommu()
899 if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) in enable_iommu()
900 set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_ENABLED); in enable_iommu()
902 if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_GTSUP_SHIFT) ) in enable_iommu()
903 set_iommu_guest_translation_control(iommu, IOMMU_CONTROL_ENABLED); in enable_iommu()
905 set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); in enable_iommu()
907 if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) ) in enable_iommu()
908 amd_iommu_flush_all_caches(iommu); in enable_iommu()
910 iommu->enabled = 1; in enable_iommu()
911 spin_unlock_irqrestore(&iommu->lock, flags); in enable_iommu()
972 static void * __init allocate_cmd_buffer(struct amd_iommu *iommu) in allocate_cmd_buffer() argument
975 return allocate_ring_buffer(&iommu->cmd_buffer, sizeof(cmd_entry_t), in allocate_cmd_buffer()
980 static void * __init allocate_event_log(struct amd_iommu *iommu) in allocate_event_log() argument
983 return allocate_ring_buffer(&iommu->event_log, sizeof(event_entry_t), in allocate_event_log()
987 static void * __init allocate_ppr_log(struct amd_iommu *iommu) in allocate_ppr_log() argument
990 return allocate_ring_buffer(&iommu->ppr_log, sizeof(ppr_entry_t), in allocate_ppr_log()
994 static int __init amd_iommu_init_one(struct amd_iommu *iommu) in amd_iommu_init_one() argument
996 if ( map_iommu_mmio_region(iommu) != 0 ) in amd_iommu_init_one()
999 get_iommu_features(iommu); in amd_iommu_init_one()
1001 if ( iommu->features ) in amd_iommu_init_one()
1004 if ( allocate_cmd_buffer(iommu) == NULL ) in amd_iommu_init_one()
1007 if ( allocate_event_log(iommu) == NULL ) in amd_iommu_init_one()
1010 if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) in amd_iommu_init_one()
1011 if ( allocate_ppr_log(iommu) == NULL ) in amd_iommu_init_one()
1014 if ( !set_iommu_interrupt_handler(iommu) ) in amd_iommu_init_one()
1021 iommu->dev_table.alloc_size = device_table.alloc_size; in amd_iommu_init_one()
1022 iommu->dev_table.entries = device_table.entries; in amd_iommu_init_one()
1023 iommu->dev_table.buffer = device_table.buffer; in amd_iommu_init_one()
1025 enable_iommu(iommu); in amd_iommu_init_one()
1039 struct amd_iommu *iommu, *next; in amd_iommu_init_cleanup() local
1042 list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list ) in amd_iommu_init_cleanup()
1044 list_del(&iommu->list); in amd_iommu_init_cleanup()
1045 if ( iommu->enabled ) in amd_iommu_init_cleanup()
1047 deallocate_ring_buffer(&iommu->cmd_buffer); in amd_iommu_init_cleanup()
1048 deallocate_ring_buffer(&iommu->event_log); in amd_iommu_init_cleanup()
1049 deallocate_ring_buffer(&iommu->ppr_log); in amd_iommu_init_cleanup()
1050 unmap_iommu_mmio_region(iommu); in amd_iommu_init_cleanup()
1052 xfree(iommu); in amd_iommu_init_cleanup()
1141 ivrs_mappings[bdf].iommu = NULL; in alloc_ivrs_mappings()
1227 struct amd_iommu *iommu; in amd_iommu_init() local
1253 for_each_amd_iommu ( iommu ) in amd_iommu_init()
1255 rc = alloc_ivrs_mappings(iommu->seg); in amd_iommu_init()
1284 for_each_amd_iommu ( iommu ) in amd_iommu_init()
1286 rc = amd_iommu_init_one(iommu); in amd_iommu_init()
1298 static void disable_iommu(struct amd_iommu *iommu) in disable_iommu() argument
1302 spin_lock_irqsave(&iommu->lock, flags); in disable_iommu()
1304 if ( !iommu->enabled ) in disable_iommu()
1306 spin_unlock_irqrestore(&iommu->lock, flags); in disable_iommu()
1310 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED); in disable_iommu()
1311 set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED); in disable_iommu()
1312 set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED); in disable_iommu()
1314 if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) in disable_iommu()
1315 set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_DISABLED); in disable_iommu()
1317 if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_GTSUP_SHIFT) ) in disable_iommu()
1318 set_iommu_guest_translation_control(iommu, IOMMU_CONTROL_DISABLED); in disable_iommu()
1320 set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED); in disable_iommu()
1322 iommu->enabled = 0; in disable_iommu()
1324 spin_unlock_irqrestore(&iommu->lock, flags); in disable_iommu()
1341 struct amd_iommu *iommu; in _invalidate_all_devices() local
1345 iommu = find_iommu_for_device(seg, bdf); in _invalidate_all_devices()
1347 if ( iommu ) in _invalidate_all_devices()
1349 spin_lock_irqsave(&iommu->lock, flags); in _invalidate_all_devices()
1350 amd_iommu_flush_device(iommu, req_id); in _invalidate_all_devices()
1351 amd_iommu_flush_intremap(iommu, req_id); in _invalidate_all_devices()
1352 spin_unlock_irqrestore(&iommu->lock, flags); in _invalidate_all_devices()
1373 struct amd_iommu *iommu; in amd_iommu_crash_shutdown() local
1375 for_each_amd_iommu ( iommu ) in amd_iommu_crash_shutdown()
1376 disable_iommu(iommu); in amd_iommu_crash_shutdown()
1381 struct amd_iommu *iommu; in amd_iommu_resume() local
1383 for_each_amd_iommu ( iommu ) in amd_iommu_resume()
1389 disable_iommu(iommu); in amd_iommu_resume()
1390 enable_iommu(iommu); in amd_iommu_resume()
1394 if ( !amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) ) in amd_iommu_resume()