Lines Matching refs:iommu
232 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in find_dev_data() local
241 if (translation_pre_enabled(iommu)) in find_dev_data()
344 struct amd_iommu *iommu; in iommu_init_device() local
346 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
347 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
524 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
526 struct device *dev = iommu->iommu.dev; in iommu_print_event()
613 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
617 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
618 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
621 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
625 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
628 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
646 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
650 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
653 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
654 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
661 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
686 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
689 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
692 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
693 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
708 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
712 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
715 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
716 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
722 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
730 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
752 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
758 dev_set_msi_domain(dev, iommu->msi_domain); in amd_iommu_set_pci_msi_domain()
763 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
773 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_int_thread() local
774 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
779 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
783 iommu_poll_events(iommu); in amd_iommu_int_thread()
788 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
794 iommu_poll_ga_log(iommu); in amd_iommu_int_thread()
811 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
827 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
831 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
844 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
851 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
852 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
856 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
859 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
863 struct amd_iommu *iommu, in build_completion_wait() argument
866 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1023 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1030 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1032 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1046 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1052 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1055 iommu->need_sync = sync; in __iommu_queue_command_sync()
1060 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1067 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1068 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1069 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1074 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1076 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1083 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1090 if (!iommu->need_sync) in iommu_completion_wait()
1093 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1095 data = ++iommu->cmd_sem_val; in iommu_completion_wait()
1096 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1098 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1102 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1105 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1110 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1116 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1119 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1124 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1126 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1133 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1141 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1144 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1147 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1153 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1155 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1158 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1164 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1165 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1168 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1174 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1177 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1182 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1184 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1187 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1189 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1190 amd_iommu_flush_all(iommu); in iommu_flush_all_caches()
1192 amd_iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1193 amd_iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1194 amd_iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1204 struct amd_iommu *iommu; in device_flush_iotlb() local
1209 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1213 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1218 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1220 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1228 struct amd_iommu *iommu; in device_flush_dte() local
1232 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1236 device_flush_dte_alias, iommu); in device_flush_dte()
1238 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1244 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1478 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1480 if (iommu_feature(iommu, FEATURE_EPHSUP)) in set_dte_entry()
1523 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1525 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1541 struct amd_iommu *iommu; in do_attach() local
1544 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1552 domain->dev_iommu[iommu->index] += 1; in do_attach()
1566 struct amd_iommu *iommu; in do_detach() local
1568 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1586 domain->dev_iommu[iommu->index] -= 1; in do_detach()
1745 struct amd_iommu *iommu; in amd_iommu_probe_device() local
1752 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_probe_device()
1755 return &iommu->iommu; in amd_iommu_probe_device()
1764 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
1765 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
1768 iommu_completion_wait(iommu); in amd_iommu_probe_device()
1783 struct amd_iommu *iommu; in amd_iommu_release_device() local
1788 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_release_device()
1791 iommu_completion_wait(iommu); in amd_iommu_release_device()
2009 struct amd_iommu *iommu; in amd_iommu_detach_device() local
2017 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
2018 if (!iommu) in amd_iommu_detach_device()
2027 iommu_completion_wait(iommu); in amd_iommu_detach_device()
2035 struct amd_iommu *iommu; in amd_iommu_attach_device() local
2044 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2045 if (!iommu) in amd_iommu_attach_device()
2062 iommu_completion_wait(iommu); in amd_iommu_attach_device()
2402 struct amd_iommu *iommu; in __flush_pasid() local
2413 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2418 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
2572 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
2576 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
2581 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
2691 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
2696 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
2716 struct amd_iommu *iommu; in alloc_irq_table() local
2722 iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_table()
2723 if (!iommu) in alloc_irq_table()
2733 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2751 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2762 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2765 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
2768 iommu_completion_wait(iommu); in alloc_irq_table()
2786 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_index() local
2788 if (!iommu) in alloc_irq_index()
2803 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
2813 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
2835 struct amd_iommu *iommu; in modify_irte_ga() local
2839 iommu = amd_iommu_rlookup_table[devid]; in modify_irte_ga()
2840 if (iommu == NULL) in modify_irte_ga()
2868 iommu_flush_irt(iommu, devid); in modify_irte_ga()
2869 iommu_completion_wait(iommu); in modify_irte_ga()
2877 struct amd_iommu *iommu; in modify_irte() local
2880 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
2881 if (iommu == NULL) in modify_irte()
2892 iommu_flush_irt(iommu, devid); in modify_irte()
2893 iommu_completion_wait(iommu); in modify_irte()
2901 struct amd_iommu *iommu; in free_irte() local
2904 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
2905 if (iommu == NULL) in free_irte()
2913 iommu->irte_ops->clear_allocated(table, index); in free_irte()
2916 iommu_flush_irt(iommu, devid); in free_irte()
2917 iommu_completion_wait(iommu); in free_irte()
3091 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_prepare_irte() local
3093 if (!iommu) in irq_remapping_prepare_irte()
3098 iommu->irte_ops->prepare(data->entry, apic->delivery_mode, in irq_remapping_prepare_irte()
3169 struct amd_iommu *iommu; in irq_remapping_alloc() local
3179 iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_alloc()
3181 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3270 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3280 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_activate() local
3283 if (!iommu) in irq_remapping_activate()
3286 iommu->irte_ops->activate(data->entry, irte_info->devid, in irq_remapping_activate()
3288 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3297 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_deactivate() local
3299 if (iommu) in irq_remapping_deactivate()
3300 iommu->irte_ops->deactivate(data->entry, irte_info->devid, in irq_remapping_deactivate()
3307 struct amd_iommu *iommu; in irq_remapping_select() local
3321 iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_select()
3322 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3393 struct amd_iommu *iommu; in amd_ir_set_vcpu_affinity() local
3420 iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_vcpu_affinity()
3421 if (iommu == NULL) in amd_ir_set_vcpu_affinity()
3447 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
3457 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3469 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_affinity() local
3472 if (!iommu) in amd_ir_set_affinity()
3479 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
3505 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
3509 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3512 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
3513 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3518 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
3519 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, in amd_iommu_create_irq_domain()
3521 iommu->index); in amd_iommu_create_irq_domain()
3528 struct amd_iommu *iommu; in amd_iommu_update_ga() local
3539 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_update_ga()
3540 if (!iommu) in amd_iommu_update_ga()
3562 iommu_flush_irt(iommu, devid); in amd_iommu_update_ga()
3563 iommu_completion_wait(iommu); in amd_iommu_update_ga()