Lines Matching refs:iommu
34 static int __must_check invalidate_sync(struct iommu *iommu);
36 static void print_qi_regs(struct iommu *iommu) in print_qi_regs() argument
40 val = dmar_readq(iommu->reg, DMAR_IQA_REG); in print_qi_regs()
43 val = dmar_readq(iommu->reg, DMAR_IQH_REG); in print_qi_regs()
46 val = dmar_readq(iommu->reg, DMAR_IQT_REG); in print_qi_regs()
50 static unsigned int qinval_next_index(struct iommu *iommu) in qinval_next_index() argument
54 tail = dmar_readq(iommu->reg, DMAR_IQT_REG); in qinval_next_index()
59 ( dmar_readq(iommu->reg, DMAR_IQH_REG) >> QINVAL_INDEX_SHIFT ) ) in qinval_next_index()
65 static void qinval_update_qtail(struct iommu *iommu, unsigned int index) in qinval_update_qtail() argument
70 ASSERT( spin_is_locked(&iommu->register_lock) ); in qinval_update_qtail()
72 dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT)); in qinval_update_qtail()
75 static int __must_check queue_invalidate_context_sync(struct iommu *iommu, in queue_invalidate_context_sync() argument
85 spin_lock_irqsave(&iommu->register_lock, flags); in queue_invalidate_context_sync()
86 index = qinval_next_index(iommu); in queue_invalidate_context_sync()
87 entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + in queue_invalidate_context_sync()
101 qinval_update_qtail(iommu, index); in queue_invalidate_context_sync()
102 spin_unlock_irqrestore(&iommu->register_lock, flags); in queue_invalidate_context_sync()
106 return invalidate_sync(iommu); in queue_invalidate_context_sync()
109 static int __must_check queue_invalidate_iotlb_sync(struct iommu *iommu, in queue_invalidate_iotlb_sync() argument
119 spin_lock_irqsave(&iommu->register_lock, flags); in queue_invalidate_iotlb_sync()
120 index = qinval_next_index(iommu); in queue_invalidate_iotlb_sync()
121 entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + in queue_invalidate_iotlb_sync()
140 qinval_update_qtail(iommu, index); in queue_invalidate_iotlb_sync()
141 spin_unlock_irqrestore(&iommu->register_lock, flags); in queue_invalidate_iotlb_sync()
143 return invalidate_sync(iommu); in queue_invalidate_iotlb_sync()
146 static int __must_check queue_invalidate_wait(struct iommu *iommu, in queue_invalidate_wait() argument
156 spin_lock_irqsave(&iommu->register_lock, flags); in queue_invalidate_wait()
157 index = qinval_next_index(iommu); in queue_invalidate_wait()
158 entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + in queue_invalidate_wait()
173 qinval_update_qtail(iommu, index); in queue_invalidate_wait()
174 spin_unlock_irqrestore(&iommu->register_lock, flags); in queue_invalidate_wait()
189 print_qi_regs(iommu); in queue_invalidate_wait()
202 static int __must_check invalidate_sync(struct iommu *iommu) in invalidate_sync() argument
204 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); in invalidate_sync()
208 return queue_invalidate_wait(iommu, 0, 1, 1, 0); in invalidate_sync()
211 static int __must_check dev_invalidate_sync(struct iommu *iommu, in dev_invalidate_sync() argument
214 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); in dev_invalidate_sync()
218 rc = queue_invalidate_wait(iommu, 0, 1, 1, 1); in dev_invalidate_sync()
223 if ( test_bit(did, iommu->domid_bitmap) ) in dev_invalidate_sync()
224 d = rcu_lock_domain_by_id(iommu->domid_map[did]); in dev_invalidate_sync()
240 int qinval_device_iotlb_sync(struct iommu *iommu, struct pci_dev *pdev, in qinval_device_iotlb_sync() argument
249 spin_lock_irqsave(&iommu->register_lock, flags); in qinval_device_iotlb_sync()
250 index = qinval_next_index(iommu); in qinval_device_iotlb_sync()
251 entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + in qinval_device_iotlb_sync()
268 qinval_update_qtail(iommu, index); in qinval_device_iotlb_sync()
269 spin_unlock_irqrestore(&iommu->register_lock, flags); in qinval_device_iotlb_sync()
271 return dev_invalidate_sync(iommu, pdev, did); in qinval_device_iotlb_sync()
274 static int __must_check queue_invalidate_iec_sync(struct iommu *iommu, in queue_invalidate_iec_sync() argument
283 spin_lock_irqsave(&iommu->register_lock, flags); in queue_invalidate_iec_sync()
284 index = qinval_next_index(iommu); in queue_invalidate_iec_sync()
285 entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + in queue_invalidate_iec_sync()
299 qinval_update_qtail(iommu, index); in queue_invalidate_iec_sync()
300 spin_unlock_irqrestore(&iommu->register_lock, flags); in queue_invalidate_iec_sync()
302 ret = invalidate_sync(iommu); in queue_invalidate_iec_sync()
308 (void)dmar_readq(iommu->reg, DMAR_CAP_REG); in queue_invalidate_iec_sync()
313 int iommu_flush_iec_global(struct iommu *iommu) in iommu_flush_iec_global() argument
315 return queue_invalidate_iec_sync(iommu, IEC_GLOBAL_INVL, 0, 0); in iommu_flush_iec_global()
318 int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx) in iommu_flush_iec_index() argument
320 return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx); in iommu_flush_iec_index()
327 struct iommu *iommu = (struct iommu *)_iommu; in flush_context_qi() local
328 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); in flush_context_qi()
340 if ( !cap_caching_mode(iommu->cap) ) in flush_context_qi()
346 return queue_invalidate_context_sync(iommu, did, sid, fm, in flush_context_qi()
357 struct iommu *iommu = (struct iommu *)_iommu; in flush_iotlb_qi() local
358 struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); in flush_iotlb_qi()
370 if ( !cap_caching_mode(iommu->cap) ) in flush_iotlb_qi()
377 if (cap_write_drain(iommu->cap)) in flush_iotlb_qi()
379 if (cap_read_drain(iommu->cap)) in flush_iotlb_qi()
382 rc = queue_invalidate_iotlb_sync(iommu, in flush_iotlb_qi()
390 rc = dev_invalidate_iotlb(iommu, did, addr, size_order, type); in flush_iotlb_qi()
397 int enable_qinval(struct iommu *iommu) in enable_qinval() argument
405 if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval ) in enable_qinval()
408 qi_ctrl = iommu_qi_ctrl(iommu); in enable_qinval()
409 flush = iommu_get_flush(iommu); in enable_qinval()
412 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in enable_qinval()
418 drhd = iommu_to_drhd(iommu); in enable_qinval()
440 spin_lock_irqsave(&iommu->register_lock, flags); in enable_qinval()
441 dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr); in enable_qinval()
443 dmar_writeq(iommu->reg, DMAR_IQT_REG, 0); in enable_qinval()
446 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in enable_qinval()
447 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_QIE); in enable_qinval()
450 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in enable_qinval()
452 spin_unlock_irqrestore(&iommu->register_lock, flags); in enable_qinval()
457 void disable_qinval(struct iommu *iommu) in disable_qinval() argument
462 if ( !ecap_queued_inval(iommu->ecap) ) in disable_qinval()
465 spin_lock_irqsave(&iommu->register_lock, flags); in disable_qinval()
466 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); in disable_qinval()
470 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE)); in disable_qinval()
473 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, in disable_qinval()
476 spin_unlock_irqrestore(&iommu->register_lock, flags); in disable_qinval()