Lines Matching refs:iommu

85 int intel_svm_enable_prq(struct intel_iommu *iommu)  in intel_svm_enable_prq()  argument
94 iommu->name); in intel_svm_enable_prq()
97 iommu->prq = page_address(pages); in intel_svm_enable_prq()
99 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
102 iommu->name); in intel_svm_enable_prq()
106 iommu->pr_irq = irq; in intel_svm_enable_prq()
108 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
109 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
110 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
112 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
116 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
118 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
121 iommu->prq_name, iommu); in intel_svm_enable_prq()
124 iommu->name); in intel_svm_enable_prq()
127 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
128 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
129 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
131 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
136 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
137 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
140 iommu->pr_irq = 0; in intel_svm_enable_prq()
142 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
143 iommu->prq = NULL; in intel_svm_enable_prq()
148 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
150 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
151 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
152 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
154 if (iommu->pr_irq) { in intel_svm_finish_prq()
155 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
156 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
157 iommu->pr_irq = 0; in intel_svm_finish_prq()
160 if (iommu->iopf_queue) { in intel_svm_finish_prq()
161 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
162 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
165 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
166 iommu->prq = NULL; in intel_svm_finish_prq()
171 static inline bool intel_svm_capable(struct intel_iommu *iommu) in intel_svm_capable() argument
173 return iommu->flags & VTD_FLAG_SVM_CAPABLE; in intel_svm_capable()
176 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
178 if (!pasid_supported(iommu)) in intel_svm_check()
182 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
184 iommu->name); in intel_svm_check()
189 !cap_5lp_support(iommu->cap)) { in intel_svm_check()
191 iommu->name); in intel_svm_check()
195 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
208 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
210 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
272 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
324 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_bind_gpasid() local
332 if (WARN_ON(!iommu) || !data) in intel_svm_bind_gpasid()
412 sdev->iommu = iommu; in intel_svm_bind_gpasid()
419 ret = intel_iommu_enable_pasid(iommu, sdev->dev); in intel_svm_bind_gpasid()
431 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_gpasid()
432 ret = intel_pasid_setup_nested(iommu, dev, in intel_svm_bind_gpasid()
436 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_gpasid()
465 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_gpasid() local
470 if (WARN_ON(!iommu)) in intel_svm_unbind_gpasid()
483 intel_pasid_tear_down_entry(iommu, dev, in intel_svm_unbind_gpasid()
522 static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, in intel_svm_bind_mm() argument
576 sdev->iommu = iommu; in intel_svm_bind_mm()
594 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_mm()
595 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, in intel_svm_bind_mm()
597 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_mm()
623 struct intel_iommu *iommu; in intel_svm_unbind_mm() local
628 iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_mm()
629 if (!iommu) in intel_svm_unbind_mm()
648 intel_pasid_tear_down_entry(iommu, dev, in intel_svm_unbind_mm()
728 struct intel_iommu *iommu; in intel_svm_drain_prq() local
742 iommu = info->iommu; in intel_svm_drain_prq()
746 did = domain->iommu_did[iommu->seq_id]; in intel_svm_drain_prq()
754 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
755 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
756 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
760 req = &iommu->prq[head / sizeof(*req)]; in intel_svm_drain_prq()
766 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
802 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
803 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_svm_drain_prq()
804 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_svm_drain_prq()
805 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
826 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_svm_prq_report() argument
859 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) { in intel_svm_prq_report()
870 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
876 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
906 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
912 struct intel_iommu *iommu = d; in prq_event_thread() local
922 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
924 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
925 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
928 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
933 iommu->name); in prq_event_thread()
937 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
943 iommu->name); in prq_event_thread()
949 iommu->name); in prq_event_thread()
955 iommu->name); in prq_event_thread()
981 if (intel_svm_prq_report(iommu, sdev->dev, req)) in prq_event_thread()
982 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
984 trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1, in prq_event_thread()
991 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
997 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
999 iommu->name); in prq_event_thread()
1000 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
1001 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
1003 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
1004 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
1006 iommu->name); in prq_event_thread()
1010 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
1011 complete(&iommu->prq_complete); in prq_event_thread()
1018 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_bind() local
1027 if (!ecap_srs(iommu->ecap)) { in intel_svm_bind()
1029 iommu->name); in intel_svm_bind()
1035 iommu->name); in intel_svm_bind()
1049 sva = intel_svm_bind_mm(iommu, dev, mm, flags); in intel_svm_bind()
1086 struct intel_iommu *iommu; in intel_svm_page_response() local
1097 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_page_response()
1098 if (!iommu) in intel_svm_page_response()
1172 dmar_latency_update(iommu, DMAR_LATENCY_PRQ, in intel_svm_page_response()
1176 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()