Lines Matching refs:dev_data
187 struct iommu_dev_data *dev_data; in alloc_dev_data() local
190 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
191 if (!dev_data) in alloc_dev_data()
194 spin_lock_init(&dev_data->lock); in alloc_dev_data()
195 dev_data->devid = devid; in alloc_dev_data()
196 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
198 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); in alloc_dev_data()
199 return dev_data; in alloc_dev_data()
204 struct iommu_dev_data *dev_data; in search_dev_data() local
212 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
213 if (dev_data->devid == devid) in search_dev_data()
214 return dev_data; in search_dev_data()
284 struct iommu_dev_data *dev_data; in find_dev_data() local
286 dev_data = search_dev_data(iommu, devid); in find_dev_data()
288 if (dev_data == NULL) { in find_dev_data()
289 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
290 if (!dev_data) in find_dev_data()
294 dev_data->defer_attach = true; in find_dev_data()
297 return dev_data; in find_dev_data()
377 struct iommu_dev_data *dev_data; in iommu_init_device() local
388 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
389 if (!dev_data) in iommu_init_device()
392 dev_data->dev = dev; in iommu_init_device()
403 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
406 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
430 struct iommu_dev_data *dev_data; in amd_iommu_uninit_device() local
432 dev_data = dev_iommu_priv_get(dev); in amd_iommu_uninit_device()
433 if (!dev_data) in amd_iommu_uninit_device()
436 if (dev_data->domain) in amd_iommu_uninit_device()
473 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
486 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
488 if (dev_data) { in amd_iommu_report_rmp_hw_error()
489 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
505 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
519 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
521 if (dev_data) { in amd_iommu_report_rmp_fault()
522 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
546 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
552 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
554 if (dev_data) { in amd_iommu_report_page_fault()
562 if (dev_data->domain == NULL) { in amd_iommu_report_page_fault()
570 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
578 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1289 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1296 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1297 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_iotlb()
1301 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1316 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1324 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_dte()
1328 if (dev_is_pci(dev_data->dev)) in device_flush_dte()
1329 pdev = to_pci_dev(dev_data->dev); in device_flush_dte()
1335 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1340 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1341 if (alias != dev_data->devid) { in device_flush_dte()
1347 if (dev_data->ats.enabled) in device_flush_dte()
1348 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1361 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1378 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1380 if (!dev_data->ats.enabled) in __domain_flush_pages()
1383 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1474 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1476 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1477 device_flush_dte(dev_data); in domain_flush_devices()
1650 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
1656 iommu = rlookup_amd_iommu(dev_data->dev); in do_attach()
1659 ats = dev_data->ats.enabled; in do_attach()
1662 dev_data->domain = domain; in do_attach()
1663 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1674 set_dte_entry(iommu, dev_data->devid, domain, in do_attach()
1675 ats, dev_data->iommu_v2); in do_attach()
1676 clone_aliases(iommu, dev_data->dev); in do_attach()
1678 device_flush_dte(dev_data); in do_attach()
1681 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
1683 struct protection_domain *domain = dev_data->domain; in do_detach()
1686 iommu = rlookup_amd_iommu(dev_data->dev); in do_detach()
1691 dev_data->domain = NULL; in do_detach()
1692 list_del(&dev_data->list); in do_detach()
1693 clear_dte_entry(iommu, dev_data->devid); in do_detach()
1694 clone_aliases(iommu, dev_data->dev); in do_detach()
1697 device_flush_dte(dev_data); in do_detach()
1759 struct iommu_dev_data *dev_data; in attach_device() local
1766 dev_data = dev_iommu_priv_get(dev); in attach_device()
1768 spin_lock(&dev_data->lock); in attach_device()
1771 if (dev_data->domain != NULL) in attach_device()
1793 if (dev_data->iommu_v2) { in attach_device()
1797 dev_data->ats.enabled = true; in attach_device()
1798 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1799 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in attach_device()
1803 dev_data->ats.enabled = true; in attach_device()
1804 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1810 do_attach(dev_data, domain); in attach_device()
1822 spin_unlock(&dev_data->lock); in attach_device()
1835 struct iommu_dev_data *dev_data; in detach_device() local
1838 dev_data = dev_iommu_priv_get(dev); in detach_device()
1839 domain = dev_data->domain; in detach_device()
1843 spin_lock(&dev_data->lock); in detach_device()
1851 if (WARN_ON(!dev_data->domain)) in detach_device()
1854 do_detach(dev_data); in detach_device()
1859 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
1861 else if (dev_data->ats.enabled) in detach_device()
1864 dev_data->ats.enabled = false; in detach_device()
1867 spin_unlock(&dev_data->lock); in detach_device()
1946 struct iommu_dev_data *dev_data; in update_device_table() local
1948 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
1949 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in update_device_table()
1953 set_dte_entry(iommu, dev_data->devid, domain, in update_device_table()
1954 dev_data->ats.enabled, dev_data->iommu_v2); in update_device_table()
1955 clone_aliases(iommu, dev_data->dev); in update_device_table()
2157 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device() local
2166 if (dev_data->domain == domain) in amd_iommu_attach_device()
2169 dev_data->defer_attach = false; in amd_iommu_attach_device()
2171 if (dev_data->domain) in amd_iommu_attach_device()
2179 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2181 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2357 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2359 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2388 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
2390 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
2391 if (!dev_data) in amd_iommu_def_domain_type()
2401 if (dev_data->iommu_v2 && in amd_iommu_def_domain_type()
2530 struct iommu_dev_data *dev_data; in __flush_pasid() local
2556 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2564 if (!dev_data->ats.enabled) in __flush_pasid()
2567 qdep = dev_data->ats.qdep; in __flush_pasid()
2568 iommu = rlookup_amd_iommu(dev_data->dev); in __flush_pasid()
2571 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
2727 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
2731 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
2736 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
2737 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3545 struct iommu_dev_data *dev_data; in amd_ir_set_vcpu_affinity() local
3550 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3556 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()