Lines Matching refs:iommu
67 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
76 iommu->name); in intel_svm_enable_prq()
79 iommu->prq = page_address(pages); in intel_svm_enable_prq()
81 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
84 iommu->name); in intel_svm_enable_prq()
88 iommu->pr_irq = irq; in intel_svm_enable_prq()
90 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
91 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
92 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
94 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
98 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
100 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
103 iommu->prq_name, iommu); in intel_svm_enable_prq()
106 iommu->name); in intel_svm_enable_prq()
109 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
110 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
111 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
113 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
118 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
119 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
122 iommu->pr_irq = 0; in intel_svm_enable_prq()
124 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
125 iommu->prq = NULL; in intel_svm_enable_prq()
130 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
132 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
133 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
134 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
136 if (iommu->pr_irq) { in intel_svm_finish_prq()
137 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
138 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
139 iommu->pr_irq = 0; in intel_svm_finish_prq()
142 if (iommu->iopf_queue) { in intel_svm_finish_prq()
143 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
144 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
147 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
148 iommu->prq = NULL; in intel_svm_finish_prq()
153 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
155 if (!pasid_supported(iommu)) in intel_svm_check()
159 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
161 iommu->name); in intel_svm_check()
166 !cap_fl5lp_support(iommu->cap)) { in intel_svm_check()
168 iommu->name); in intel_svm_check()
172 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
185 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
187 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
252 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
301 static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, in intel_svm_bind_mm() argument
342 sdev->iommu = iommu; in intel_svm_bind_mm()
354 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, in intel_svm_bind_mm()
379 struct intel_iommu *iommu; in intel_svm_unbind_mm() local
384 iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_mm()
385 if (!iommu) in intel_svm_unbind_mm()
404 intel_pasid_tear_down_entry(iommu, dev, svm->pasid, false); in intel_svm_unbind_mm()
482 struct intel_iommu *iommu; in intel_svm_drain_prq() local
496 iommu = info->iommu; in intel_svm_drain_prq()
500 did = domain_id_iommu(domain, iommu); in intel_svm_drain_prq()
508 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
509 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
510 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
514 req = &iommu->prq[head / sizeof(*req)]; in intel_svm_drain_prq()
520 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
556 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
557 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_svm_drain_prq()
558 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_svm_drain_prq()
559 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
580 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_svm_prq_report() argument
613 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) { in intel_svm_prq_report()
624 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
630 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
660 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
665 struct intel_iommu *iommu = d; in prq_event_thread() local
675 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
677 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
678 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
681 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
686 iommu->name); in prq_event_thread()
688 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
694 iommu->name); in prq_event_thread()
700 iommu->name); in prq_event_thread()
706 iommu->name); in prq_event_thread()
714 pdev = pci_get_domain_bus_and_slot(iommu->segment, in prq_event_thread()
724 if (intel_svm_prq_report(iommu, &pdev->dev, req)) in prq_event_thread()
725 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
727 trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, in prq_event_thread()
729 iommu->prq_seq_number++); in prq_event_thread()
735 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
741 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
743 iommu->name); in prq_event_thread()
744 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
745 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
747 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
748 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
750 iommu->name); in prq_event_thread()
754 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
755 complete(&iommu->prq_complete); in prq_event_thread()
765 struct intel_iommu *iommu; in intel_svm_page_response() local
776 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_page_response()
777 if (!iommu) in intel_svm_page_response()
821 dmar_latency_update(iommu, DMAR_LATENCY_PRQ, in intel_svm_page_response()
825 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()
842 struct intel_iommu *iommu = info->iommu; in intel_svm_set_dev_pasid() local
847 ret = intel_svm_bind_mm(iommu, dev, mm); in intel_svm_set_dev_pasid()