Lines Matching refs:dev_data

76 			  struct iommu_dev_data *dev_data);
133 static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, in update_dte256() argument
138 struct dev_table_entry *ptr = &dev_table[dev_data->devid]; in update_dte256()
140 spin_lock_irqsave(&dev_data->dte_lock, flags); in update_dte256()
146 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
151 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
159 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
168 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
181 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
186 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
196 spin_unlock_irqrestore(&dev_data->dte_lock, flags); in update_dte256()
199 static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, in get_dte256() argument
206 ptr = &dev_table[dev_data->devid]; in get_dte256()
208 spin_lock_irqsave(&dev_data->dte_lock, flags); in get_dte256()
211 spin_unlock_irqrestore(&dev_data->dte_lock, flags); in get_dte256()
349 struct iommu_dev_data *dev_data; in alloc_dev_data() local
352 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
353 if (!dev_data) in alloc_dev_data()
356 mutex_init(&dev_data->mutex); in alloc_dev_data()
357 spin_lock_init(&dev_data->dte_lock); in alloc_dev_data()
358 dev_data->devid = devid; in alloc_dev_data()
359 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
361 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); in alloc_dev_data()
362 return dev_data; in alloc_dev_data()
367 struct iommu_dev_data *dev_data; in search_dev_data() local
375 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
376 if (dev_data->devid == devid) in search_dev_data()
377 return dev_data; in search_dev_data()
387 struct iommu_dev_data *dev_data, *alias_data; in clone_alias() local
399 dev_data = dev_iommu_priv_get(&pdev->dev); in clone_alias()
400 if (!dev_data) { in clone_alias()
405 get_dte256(iommu, dev_data, &new); in clone_alias()
463 struct iommu_dev_data *dev_data; in find_dev_data() local
465 dev_data = search_dev_data(iommu, devid); in find_dev_data()
467 if (dev_data == NULL) { in find_dev_data()
468 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
469 if (!dev_data) in find_dev_data()
473 dev_data->defer_attach = true; in find_dev_data()
476 return dev_data; in find_dev_data()
504 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data) in pdev_pasid_supported() argument
506 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); in pdev_pasid_supported()
536 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_ats() local
539 if (dev_data->ats_enabled) in pdev_enable_cap_ats()
543 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { in pdev_enable_cap_ats()
546 dev_data->ats_enabled = 1; in pdev_enable_cap_ats()
547 dev_data->ats_qdep = pci_ats_queue_depth(pdev); in pdev_enable_cap_ats()
556 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_ats() local
558 if (dev_data->ats_enabled) { in pdev_disable_cap_ats()
560 dev_data->ats_enabled = 0; in pdev_disable_cap_ats()
566 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pri() local
569 if (dev_data->pri_enabled) in pdev_enable_cap_pri()
572 if (!dev_data->ats_enabled) in pdev_enable_cap_pri()
575 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { in pdev_enable_cap_pri()
581 dev_data->pri_enabled = 1; in pdev_enable_cap_pri()
582 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in pdev_enable_cap_pri()
593 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pri() local
595 if (dev_data->pri_enabled) { in pdev_disable_cap_pri()
597 dev_data->pri_enabled = 0; in pdev_disable_cap_pri()
603 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pasid() local
606 if (dev_data->pasid_enabled) in pdev_enable_cap_pasid()
609 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { in pdev_enable_cap_pasid()
613 dev_data->pasid_enabled = 1; in pdev_enable_cap_pasid()
621 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pasid() local
623 if (dev_data->pasid_enabled) { in pdev_disable_cap_pasid()
625 dev_data->pasid_enabled = 0; in pdev_disable_cap_pasid()
675 struct iommu_dev_data *dev_data; in iommu_init_device() local
686 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
687 if (!dev_data) in iommu_init_device()
690 dev_data->dev = dev; in iommu_init_device()
696 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
707 dev_data->flags = pdev_get_caps(to_pci_dev(dev)); in iommu_init_device()
741 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); in dump_dte_entry() local
743 get_dte256(iommu, dev_data, &dte); in dump_dte_entry()
760 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
773 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
775 if (dev_data) { in amd_iommu_report_rmp_hw_error()
776 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
792 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
806 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
808 if (dev_data) { in amd_iommu_report_rmp_fault()
809 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
833 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
839 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
841 if (dev_data) { in amd_iommu_report_page_fault()
849 if (dev_data->domain == NULL) { in amd_iommu_report_page_fault()
857 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
865 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1540 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address, in device_flush_iotlb() argument
1543 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_iotlb()
1545 int qdep = dev_data->ats_qdep; in device_flush_iotlb()
1547 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, in device_flush_iotlb()
1563 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1565 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_dte()
1571 if (dev_is_pci(dev_data->dev)) in device_flush_dte()
1572 pdev = to_pci_dev(dev_data->dev); in device_flush_dte()
1578 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1583 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1584 if (alias != dev_data->devid) { in device_flush_dte()
1590 if (dev_data->ats_enabled) { in device_flush_dte()
1592 ret = device_flush_iotlb(dev_data, 0, ~0UL, in device_flush_dte()
1602 struct iommu_dev_data *dev_data; in domain_flush_pages_v2() local
1607 list_for_each_entry(dev_data, &pdom->dev_list, list) { in domain_flush_pages_v2()
1608 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in domain_flush_pages_v2()
1609 u16 domid = dev_data->gcr3_info.domid; in domain_flush_pages_v2()
1651 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1665 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1667 if (!dev_data->ats_enabled) in __domain_flush_pages()
1670 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn); in __domain_flush_pages()
1735 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, in amd_iommu_dev_flush_pasid_pages() argument
1739 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in amd_iommu_dev_flush_pasid_pages()
1742 dev_data->gcr3_info.domid, pasid, true); in amd_iommu_dev_flush_pasid_pages()
1745 if (dev_data->ats_enabled) in amd_iommu_dev_flush_pasid_pages()
1746 device_flush_iotlb(dev_data, address, size, pasid, true); in amd_iommu_dev_flush_pasid_pages()
1751 static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, in dev_flush_pasid_all() argument
1754 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, in dev_flush_pasid_all()
1777 struct iommu_dev_data *dev_data; in amd_iommu_update_and_flush_device_table() local
1781 list_for_each_entry(dev_data, &domain->dev_list, list) { in amd_iommu_update_and_flush_device_table()
1782 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in amd_iommu_update_and_flush_device_table()
1784 set_dte_entry(iommu, dev_data); in amd_iommu_update_and_flush_device_table()
1785 clone_aliases(iommu, dev_data->dev); in amd_iommu_update_and_flush_device_table()
1788 list_for_each_entry(dev_data, &domain->dev_list, list) in amd_iommu_update_and_flush_device_table()
1789 device_flush_dte(dev_data); in amd_iommu_update_and_flush_device_table()
1796 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
1800 dev_data = dev_iommu_priv_get(dev); in amd_iommu_complete_ppr()
1803 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
1804 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
1958 static int update_gcr3(struct iommu_dev_data *dev_data, in update_gcr3() argument
1961 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in update_gcr3()
1973 dev_flush_pasid_all(dev_data, pasid); in update_gcr3()
1977 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, in amd_iommu_set_gcr3() argument
1980 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_set_gcr3()
1983 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_set_gcr3()
1985 ret = update_gcr3(dev_data, pasid, gcr3, true); in amd_iommu_set_gcr3()
1993 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid) in amd_iommu_clear_gcr3() argument
1995 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_clear_gcr3()
1998 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_clear_gcr3()
2000 ret = update_gcr3(dev_data, pasid, 0, false); in amd_iommu_clear_gcr3()
2008 static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr, in make_clear_dte() argument
2021 struct iommu_dev_data *dev_data, in set_dte_gcr3_table() argument
2024 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in set_dte_gcr3_table()
2031 __func__, dev_data->devid, gcr3_info->glx, in set_dte_gcr3_table()
2039 if (pdom_is_v2_pgtbl_mode(dev_data->domain)) in set_dte_gcr3_table()
2053 struct iommu_dev_data *dev_data) in set_dte_entry() argument
2059 struct protection_domain *domain = dev_data->domain; in set_dte_entry()
2060 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in set_dte_entry()
2061 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in set_dte_entry()
2064 domid = dev_data->gcr3_info.domid; in set_dte_entry()
2068 make_clear_dte(dev_data, dte, &new); in set_dte_entry()
2086 if (dev_data->ppr) in set_dte_entry()
2092 if (dev_data->ats_enabled) in set_dte_entry()
2102 initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); in set_dte_entry()
2108 set_dte_gcr3_table(iommu, dev_data, &new); in set_dte_entry()
2110 update_dte256(iommu, dev_data, &new); in set_dte_entry()
2125 static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) in clear_dte_entry() argument
2128 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in clear_dte_entry()
2130 make_clear_dte(dev_data, dte, &new); in clear_dte_entry()
2131 update_dte256(iommu, dev_data, &new); in clear_dte_entry()
2135 static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) in dev_update_dte() argument
2137 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in dev_update_dte()
2140 set_dte_entry(iommu, dev_data); in dev_update_dte()
2142 clear_dte_entry(iommu, dev_data); in dev_update_dte()
2144 clone_aliases(iommu, dev_data->dev); in dev_update_dte()
2145 device_flush_dte(dev_data); in dev_update_dte()
2153 static int init_gcr3_table(struct iommu_dev_data *dev_data, in init_gcr3_table() argument
2156 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in init_gcr3_table()
2157 int max_pasids = dev_data->max_pasids; in init_gcr3_table()
2164 if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) in init_gcr3_table()
2171 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, in init_gcr3_table()
2180 ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); in init_gcr3_table()
2182 free_gcr3_table(&dev_data->gcr3_info); in init_gcr3_table()
2187 static void destroy_gcr3_table(struct iommu_dev_data *dev_data, in destroy_gcr3_table() argument
2190 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in destroy_gcr3_table()
2193 update_gcr3(dev_data, 0, 0, false); in destroy_gcr3_table()
2268 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in attach_device() local
2269 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in attach_device()
2274 mutex_lock(&dev_data->mutex); in attach_device()
2276 if (dev_data->domain != NULL) { in attach_device()
2288 ret = init_gcr3_table(dev_data, domain); in attach_device()
2295 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; in attach_device()
2304 if (amd_iommu_iopf_add_device(iommu, dev_data)) in attach_device()
2311 dev_data->domain = domain; in attach_device()
2313 list_add(&dev_data->list, &domain->dev_list); in attach_device()
2317 dev_update_dte(dev_data, true); in attach_device()
2320 mutex_unlock(&dev_data->mutex); in attach_device()
2330 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in detach_device() local
2331 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in detach_device()
2332 struct protection_domain *domain = dev_data->domain; in detach_device()
2335 mutex_lock(&dev_data->mutex); in detach_device()
2343 if (WARN_ON(!dev_data->domain)) in detach_device()
2347 if (dev_data->ppr) { in detach_device()
2349 amd_iommu_iopf_remove_device(iommu, dev_data); in detach_device()
2356 dev_update_dte(dev_data, false); in detach_device()
2361 list_del(&dev_data->list); in detach_device()
2366 destroy_gcr3_table(dev_data, domain); in detach_device()
2369 dev_data->domain = NULL; in detach_device()
2375 mutex_unlock(&dev_data->mutex); in detach_device()
2382 struct iommu_dev_data *dev_data; in amd_iommu_probe_device() local
2414 dev_data = dev_iommu_priv_get(dev); in amd_iommu_probe_device()
2416 pdev_pasid_supported(dev_data)) { in amd_iommu_probe_device()
2417 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, in amd_iommu_probe_device()
2433 dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K; in amd_iommu_probe_device()
2435 dev_data->max_irqs = MAX_IRQS_PER_TABLE_512; in amd_iommu_probe_device()
2445 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_release_device() local
2447 WARN_ON(dev_data->domain); in amd_iommu_release_device()
2639 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in blocked_domain_attach_device() local
2641 if (dev_data->domain) in blocked_domain_attach_device()
2645 mutex_lock(&dev_data->mutex); in blocked_domain_attach_device()
2646 dev_update_dte(dev_data, false); in blocked_domain_attach_device()
2647 mutex_unlock(&dev_data->mutex); in blocked_domain_attach_device()
2698 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device() local
2707 if (dev_data->domain == domain) in amd_iommu_attach_device()
2710 dev_data->defer_attach = false; in amd_iommu_attach_device()
2719 if (dev_data->domain) in amd_iommu_attach_device()
2727 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2729 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2853 struct iommu_dev_data *dev_data; in amd_iommu_set_dirty_tracking() local
2865 list_for_each_entry(dev_data, &pdomain->dev_list, list) { in amd_iommu_set_dirty_tracking()
2866 spin_lock(&dev_data->dte_lock); in amd_iommu_set_dirty_tracking()
2867 iommu = get_amd_iommu_from_dev_data(dev_data); in amd_iommu_set_dirty_tracking()
2868 dte = &get_dev_table(iommu)[dev_data->devid]; in amd_iommu_set_dirty_tracking()
2872 spin_unlock(&dev_data->dte_lock); in amd_iommu_set_dirty_tracking()
2875 device_flush_dte(dev_data); in amd_iommu_set_dirty_tracking()
2975 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2977 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
3004 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
3006 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
3007 if (!dev_data) in amd_iommu_def_domain_type()
3021 if (pdev_pasid_supported(dev_data) && in amd_iommu_def_domain_type()
3105 static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data) in iommu_get_int_tablen() argument
3107 if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K) in iommu_get_int_tablen()
3117 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); in set_dte_irq_entry() local
3119 if (dev_data) in set_dte_irq_entry()
3120 spin_lock(&dev_data->dte_lock); in set_dte_irq_entry()
3126 new |= iommu_get_int_tablen(dev_data); in set_dte_irq_entry()
3130 if (dev_data) in set_dte_irq_entry()
3131 spin_unlock(&dev_data->dte_lock); in set_dte_irq_entry()
3634 struct iommu_dev_data *dev_data; in irq_remapping_alloc() local
3654 dev_data = search_dev_data(iommu, devid); in irq_remapping_alloc()
3655 max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512; in irq_remapping_alloc()
3949 struct iommu_dev_data *dev_data; in amd_ir_set_vcpu_affinity() local
3957 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3963 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()