Lines Matching refs:devid
78 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
80 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
138 struct dev_table_entry *ptr = &dev_table[dev_data->devid]; in update_dte256()
146 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
151 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
159 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
168 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
181 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
186 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
206 ptr = &dev_table[dev_data->devid]; in get_dte256()
274 return p1->devid; in get_acpihid_device_id()
310 u32 devid = get_acpihid_device_id(dev, NULL); in get_device_segment() local
312 seg = PCI_SBDF_TO_SEGID(devid); in get_device_segment()
319 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) in amd_iommu_set_rlookup_table() argument
323 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table()
326 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid) in __rlookup_amd_iommu() argument
332 return pci_seg->rlookup_table[devid]; in __rlookup_amd_iommu()
340 int devid = get_device_sbdf_id(dev); in rlookup_amd_iommu() local
342 if (devid < 0) in rlookup_amd_iommu()
344 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid)); in rlookup_amd_iommu()
347 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) in alloc_dev_data() argument
358 dev_data->devid = devid; in alloc_dev_data()
365 struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) in search_dev_data() argument
376 if (dev_data->devid == devid) in search_dev_data()
388 u16 devid = pci_dev_id(pdev); in clone_alias() local
391 if (devid == alias) in clone_alias()
401 pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid); in clone_alias()
461 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) in find_dev_data() argument
465 dev_data = search_dev_data(iommu, devid); in find_dev_data()
468 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
485 int devid; in acpihid_device_group() local
487 devid = get_acpihid_device_id(dev, &entry); in acpihid_device_group()
488 if (devid < 0) in acpihid_device_group()
489 return ERR_PTR(devid); in acpihid_device_group()
492 if ((devid == p->devid) && p->group) in acpihid_device_group()
651 int devid, sbdf; in check_device() local
659 devid = PCI_SBDF_TO_DEVID(sbdf); in check_device()
667 if (devid > pci_seg->last_bdf) in check_device()
676 int devid, sbdf; in iommu_init_device() local
685 devid = PCI_SBDF_TO_DEVID(sbdf); in iommu_init_device()
686 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
717 int devid, sbdf; in iommu_ignore_device() local
723 devid = PCI_SBDF_TO_DEVID(sbdf); in iommu_ignore_device()
724 pci_seg->rlookup_table[devid] = NULL; in iommu_ignore_device()
725 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry)); in iommu_ignore_device()
737 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) in dump_dte_entry() argument
741 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); in dump_dte_entry()
761 int devid, vmg_tag, flags; in amd_iommu_report_rmp_hw_error() local
765 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in amd_iommu_report_rmp_hw_error()
770 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_hw_error()
771 devid & 0xff); in amd_iommu_report_rmp_hw_error()
782 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_hw_error()
793 int devid, flags_rmp, vmg_tag, flags; in amd_iommu_report_rmp_fault() local
797 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in amd_iommu_report_rmp_fault()
803 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_fault()
804 devid & 0xff); in amd_iommu_report_rmp_fault()
815 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_fault()
830 u16 devid, u16 domain_id, in amd_iommu_report_page_fault() argument
836 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_page_fault()
837 devid & 0xff); in amd_iommu_report_page_fault()
852 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), in amd_iommu_report_page_fault()
853 PCI_FUNC(devid), domain_id); in amd_iommu_report_page_fault()
871 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_page_fault()
883 int type, devid, flags, tag; in iommu_print_event() local
891 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in iommu_print_event()
909 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); in iommu_print_event()
916 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
919 dump_dte_entry(iommu, devid); in iommu_print_event()
924 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
929 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
942 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
947 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
960 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
1208 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) in build_inv_dte() argument
1211 cmd->data[0] = devid; in build_inv_dte()
1277 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, in build_inv_iotlb_pages() argument
1285 cmd->data[0] = devid; in build_inv_iotlb_pages()
1287 cmd->data[1] = devid; in build_inv_iotlb_pages()
1299 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid, in build_complete_ppr() argument
1304 cmd->data[0] = devid; in build_complete_ppr()
1321 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid) in build_inv_irt() argument
1324 cmd->data[0] = devid; in build_inv_irt()
1434 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1438 build_inv_dte(&cmd, devid); in iommu_flush_dte()
1443 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte_sync() argument
1447 ret = iommu_flush_dte(iommu, devid); in iommu_flush_dte_sync()
1454 u32 devid; in amd_iommu_flush_dte_all() local
1457 for (devid = 0; devid <= last_bdf; ++devid) in amd_iommu_flush_dte_all()
1458 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1503 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1507 build_inv_irt(&cmd, devid); in iommu_flush_irt()
1514 u32 devid; in amd_iommu_flush_irt_all() local
1520 for (devid = 0; devid <= last_bdf; devid++) in amd_iommu_flush_irt_all()
1521 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1547 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, in device_flush_iotlb()
1578 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1583 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1584 if (alias != dev_data->devid) { in device_flush_dte()
1803 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
2031 __func__, dev_data->devid, gcr3_info->glx, in set_dte_gcr3_table()
2061 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in set_dte_entry()
2102 initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); in set_dte_entry()
2128 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in clear_dte_entry()
2868 dte = &get_dev_table(iommu)[dev_data->devid]; in amd_iommu_set_dirty_tracking()
2918 int devid, sbdf; in amd_iommu_get_resv_regions() local
2924 devid = PCI_SBDF_TO_DEVID(sbdf); in amd_iommu_get_resv_regions()
2932 if (devid < entry->devid_start || devid > entry->devid_end) in amd_iommu_get_resv_regions()
3079 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt_and_complete() argument
3089 build_inv_irt(&cmd, devid); in iommu_flush_irt_and_complete()
3112 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, in set_dte_irq_entry() argument
3116 struct dev_table_entry *dte = &get_dev_table(iommu)[devid]; in set_dte_irq_entry()
3117 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); in set_dte_irq_entry()
3134 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) in get_irq_table() argument
3139 if (WARN_ONCE(!pci_seg->rlookup_table[devid], in get_irq_table()
3141 __func__, pci_seg->id, devid)) in get_irq_table()
3144 table = pci_seg->irq_lookup_table[devid]; in get_irq_table()
3146 __func__, pci_seg->id, devid)) in get_irq_table()
3171 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
3176 pci_seg->irq_lookup_table[devid] = table; in set_remap_table_entry()
3177 set_dte_irq_entry(iommu, devid, table); in set_remap_table_entry()
3178 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
3208 u16 devid, struct pci_dev *pdev, in alloc_irq_table() argument
3221 table = pci_seg->irq_lookup_table[devid]; in alloc_irq_table()
3225 alias = pci_seg->alias_table[devid]; in alloc_irq_table()
3228 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3240 table = pci_seg->irq_lookup_table[devid]; in alloc_irq_table()
3246 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3257 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3259 if (devid != alias) in alloc_irq_table()
3275 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, in alloc_irq_index() argument
3283 table = alloc_irq_table(iommu, devid, pdev, max_irqs); in alloc_irq_index()
3322 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in __modify_irte_ga() argument
3330 table = get_irq_table(iommu, devid); in __modify_irte_ga()
3353 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in modify_irte_ga() argument
3358 ret = __modify_irte_ga(iommu, devid, index, irte); in modify_irte_ga()
3362 iommu_flush_irt_and_complete(iommu, devid); in modify_irte_ga()
3368 u16 devid, int index, union irte *irte) in modify_irte() argument
3373 table = get_irq_table(iommu, devid); in modify_irte()
3381 iommu_flush_irt_and_complete(iommu, devid); in modify_irte()
3386 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) in free_irte() argument
3391 table = get_irq_table(iommu, devid); in free_irte()
3399 iommu_flush_irt_and_complete(iommu, devid); in free_irte()
3404 u8 vector, u32 dest_apicid, int devid) in irte_prepare() argument
3418 u8 vector, u32 dest_apicid, int devid) in irte_ga_prepare() argument
3432 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_activate() argument
3437 modify_irte(iommu, devid, index, irte); in irte_activate()
3440 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_activate() argument
3445 modify_irte_ga(iommu, devid, index, irte); in irte_ga_activate()
3448 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_deactivate() argument
3453 modify_irte(iommu, devid, index, irte); in irte_deactivate()
3456 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_deactivate() argument
3461 modify_irte_ga(iommu, devid, index, irte); in irte_ga_deactivate()
3464 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_set_affinity() argument
3471 modify_irte(iommu, devid, index, irte); in irte_set_affinity()
3474 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_ga_set_affinity() argument
3485 modify_irte_ga(iommu, devid, index, irte); in irte_ga_set_affinity()
3539 return get_ioapic_devid(info->devid); in get_devid()
3541 return get_hpet_devid(info->devid); in get_devid()
3578 int devid, int index, int sub_handle) in irq_remapping_prepare_irte() argument
3586 data->irq_2_irte.devid = devid; in irq_remapping_prepare_irte()
3590 irq_cfg->dest_apicid, devid); in irq_remapping_prepare_irte()
3636 int i, ret, devid, seg, sbdf; in irq_remapping_alloc() local
3649 devid = PCI_SBDF_TO_DEVID(sbdf); in irq_remapping_alloc()
3650 iommu = __rlookup_amd_iommu(seg, devid); in irq_remapping_alloc()
3654 dev_data = search_dev_data(iommu, devid); in irq_remapping_alloc()
3664 table = alloc_irq_table(iommu, devid, NULL, max_irqs); in irq_remapping_alloc()
3684 index = alloc_irq_index(iommu, devid, nr_irqs, align, in irq_remapping_alloc()
3688 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL, in irq_remapping_alloc()
3722 irq_data->hwirq = (devid << 16) + i; in irq_remapping_alloc()
3725 irq_remapping_prepare_irte(data, cfg, info, devid, index, i); in irq_remapping_alloc()
3737 free_irte(iommu, devid, index + i); in irq_remapping_alloc()
3756 free_irte(data->iommu, irte_info->devid, irte_info->index); in irq_remapping_free()
3780 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, in irq_remapping_activate()
3794 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, in irq_remapping_deactivate()
3802 int devid = -1; in irq_remapping_select() local
3808 devid = get_ioapic_devid(fwspec->param[0]); in irq_remapping_select()
3810 devid = get_hpet_devid(fwspec->param[0]); in irq_remapping_select()
3812 if (devid < 0) in irq_remapping_select()
3814 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); in irq_remapping_select()
3876 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_update_ga()
3906 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3938 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3957 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3997 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, in amd_ir_update_irte()