Lines Matching refs:dev_data

144 	struct iommu_dev_data *dev_data;  in alloc_dev_data()  local
146 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
147 if (!dev_data) in alloc_dev_data()
150 spin_lock_init(&dev_data->lock); in alloc_dev_data()
151 dev_data->devid = devid; in alloc_dev_data()
152 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
154 llist_add(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
155 return dev_data; in alloc_dev_data()
160 struct iommu_dev_data *dev_data; in search_dev_data() local
167 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
168 if (dev_data->devid == devid) in search_dev_data()
169 return dev_data; in search_dev_data()
231 struct iommu_dev_data *dev_data; in find_dev_data() local
234 dev_data = search_dev_data(devid); in find_dev_data()
236 if (dev_data == NULL) { in find_dev_data()
237 dev_data = alloc_dev_data(devid); in find_dev_data()
238 if (!dev_data) in find_dev_data()
242 dev_data->defer_attach = true; in find_dev_data()
245 return dev_data; in find_dev_data()
320 struct iommu_dev_data *dev_data; in iommu_init_device() local
330 dev_data = find_dev_data(devid); in iommu_init_device()
331 if (!dev_data) in iommu_init_device()
334 dev_data->pdev = setup_aliases(dev); in iommu_init_device()
346 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
347 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
350 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
371 struct iommu_dev_data *dev_data; in amd_iommu_uninit_device() local
373 dev_data = dev_iommu_priv_get(dev); in amd_iommu_uninit_device()
374 if (!dev_data) in amd_iommu_uninit_device()
377 if (dev_data->domain) in amd_iommu_uninit_device()
414 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
427 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
429 if (dev_data) { in amd_iommu_report_rmp_hw_error()
430 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
446 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
460 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
462 if (dev_data) { in amd_iommu_report_rmp_fault()
463 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
486 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
492 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
494 if (dev_data) { in amd_iommu_report_page_fault()
501 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
509 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1201 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1208 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1209 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1211 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1226 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1232 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1234 if (dev_data->pdev) in device_flush_dte()
1235 ret = pci_for_each_dma_alias(dev_data->pdev, in device_flush_dte()
1238 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1242 alias = amd_iommu_alias_table[dev_data->devid]; in device_flush_dte()
1243 if (alias != dev_data->devid) { in device_flush_dte()
1249 if (dev_data->ats.enabled) in device_flush_dte()
1250 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1263 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1280 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1282 if (!dev_data->ats.enabled) in __domain_flush_pages()
1285 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1376 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1378 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1379 device_flush_dte(dev_data); in domain_flush_devices()
1538 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
1544 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1545 ats = dev_data->ats.enabled; in do_attach()
1548 dev_data->domain = domain; in do_attach()
1549 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1556 set_dte_entry(dev_data->devid, domain, in do_attach()
1557 ats, dev_data->iommu_v2); in do_attach()
1558 clone_aliases(dev_data->pdev); in do_attach()
1560 device_flush_dte(dev_data); in do_attach()
1563 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
1565 struct protection_domain *domain = dev_data->domain; in do_detach()
1568 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1571 dev_data->domain = NULL; in do_detach()
1572 list_del(&dev_data->list); in do_detach()
1573 clear_dte_entry(dev_data->devid); in do_detach()
1574 clone_aliases(dev_data->pdev); in do_detach()
1577 device_flush_dte(dev_data); in do_detach()
1637 struct iommu_dev_data *dev_data; in attach_device() local
1644 dev_data = dev_iommu_priv_get(dev); in attach_device()
1646 spin_lock(&dev_data->lock); in attach_device()
1649 if (dev_data->domain != NULL) in attach_device()
1663 if (dev_data->iommu_v2) { in attach_device()
1667 dev_data->ats.enabled = true; in attach_device()
1668 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1669 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in attach_device()
1673 dev_data->ats.enabled = true; in attach_device()
1674 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1680 do_attach(dev_data, domain); in attach_device()
1692 spin_unlock(&dev_data->lock); in attach_device()
1705 struct iommu_dev_data *dev_data; in detach_device() local
1708 dev_data = dev_iommu_priv_get(dev); in detach_device()
1709 domain = dev_data->domain; in detach_device()
1713 spin_lock(&dev_data->lock); in detach_device()
1721 if (WARN_ON(!dev_data->domain)) in detach_device()
1724 do_detach(dev_data); in detach_device()
1729 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
1731 else if (dev_data->ats.enabled) in detach_device()
1734 dev_data->ats.enabled = false; in detach_device()
1737 spin_unlock(&dev_data->lock); in detach_device()
1810 struct iommu_dev_data *dev_data; in update_device_table() local
1812 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
1813 set_dte_entry(dev_data->devid, domain, in update_device_table()
1814 dev_data->ats.enabled, dev_data->iommu_v2); in update_device_table()
1815 clone_aliases(dev_data->pdev); in update_device_table()
2007 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_detach_device() local
2014 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2024 dev_data->use_vapic = 0; in amd_iommu_detach_device()
2034 struct iommu_dev_data *dev_data; in amd_iommu_attach_device() local
2041 dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device()
2042 dev_data->defer_attach = false; in amd_iommu_attach_device()
2044 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2048 if (dev_data->domain) in amd_iommu_attach_device()
2056 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2058 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2221 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2223 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2252 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
2254 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
2255 if (!dev_data) in amd_iommu_def_domain_type()
2263 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2) in amd_iommu_def_domain_type()
2375 struct iommu_dev_data *dev_data; in __flush_pasid() local
2401 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2409 if (!dev_data->ats.enabled) in __flush_pasid()
2412 qdep = dev_data->ats.qdep; in __flush_pasid()
2413 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2415 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
2571 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
2575 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
2576 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
2578 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
2579 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3398 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); in amd_ir_set_vcpu_affinity() local
3404 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()