Lines Matching refs:iommu

25 static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[])  in queue_iommu_command()  argument
30 tail = iommu->cmd_buffer.tail; in queue_iommu_command()
31 if ( ++tail == iommu->cmd_buffer.entries ) in queue_iommu_command()
34 head = iommu_get_rb_pointer(readl(iommu->mmio_base + in queue_iommu_command()
38 cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer + in queue_iommu_command()
39 (iommu->cmd_buffer.tail * in queue_iommu_command()
45 iommu->cmd_buffer.tail = tail; in queue_iommu_command()
52 static void commit_iommu_command_buffer(struct amd_iommu *iommu) in commit_iommu_command_buffer() argument
56 iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail); in commit_iommu_command_buffer()
57 writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET); in commit_iommu_command_buffer()
60 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]) in send_iommu_command() argument
62 if ( queue_iommu_command(iommu, cmd) ) in send_iommu_command()
64 commit_iommu_command_buffer(iommu); in send_iommu_command()
71 static void flush_command_buffer(struct amd_iommu *iommu) in flush_command_buffer() argument
78 iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in flush_command_buffer()
88 send_iommu_command(iommu, cmd); in flush_command_buffer()
93 status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in flush_command_buffer()
104 iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); in flush_command_buffer()
111 static void invalidate_iommu_pages(struct amd_iommu *iommu, in invalidate_iommu_pages() argument
164 send_iommu_command(iommu, cmd); in invalidate_iommu_pages()
167 static void invalidate_iotlb_pages(struct amd_iommu *iommu, in invalidate_iotlb_pages() argument
234 send_iommu_command(iommu, cmd); in invalidate_iotlb_pages()
237 static void invalidate_dev_table_entry(struct amd_iommu *iommu, in invalidate_dev_table_entry() argument
253 send_iommu_command(iommu, cmd); in invalidate_dev_table_entry()
256 static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id) in invalidate_interrupt_table() argument
269 send_iommu_command(iommu, cmd); in invalidate_interrupt_table()
272 void invalidate_iommu_all(struct amd_iommu *iommu) in invalidate_iommu_all() argument
283 send_iommu_command(iommu, cmd); in invalidate_iommu_all()
290 struct amd_iommu *iommu; in amd_iommu_flush_iotlb() local
299 iommu = find_iommu_for_device(pdev->seg, PCI_BDF2(pdev->bus, pdev->devfn)); in amd_iommu_flush_iotlb()
301 if ( !iommu ) in amd_iommu_flush_iotlb()
309 if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) in amd_iommu_flush_iotlb()
312 req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(pdev->bus, devfn)); in amd_iommu_flush_iotlb()
317 spin_lock_irqsave(&iommu->lock, flags); in amd_iommu_flush_iotlb()
318 invalidate_iotlb_pages(iommu, maxpend, 0, queueid, gaddr, req_id, order); in amd_iommu_flush_iotlb()
319 flush_command_buffer(iommu); in amd_iommu_flush_iotlb()
320 spin_unlock_irqrestore(&iommu->lock, flags); in amd_iommu_flush_iotlb()
348 struct amd_iommu *iommu; in _amd_iommu_flush_pages() local
352 for_each_amd_iommu ( iommu ) in _amd_iommu_flush_pages()
354 spin_lock_irqsave(&iommu->lock, flags); in _amd_iommu_flush_pages()
355 invalidate_iommu_pages(iommu, gaddr, dom_id, order); in _amd_iommu_flush_pages()
356 flush_command_buffer(iommu); in _amd_iommu_flush_pages()
357 spin_unlock_irqrestore(&iommu->lock, flags); in _amd_iommu_flush_pages()
375 void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf) in amd_iommu_flush_device() argument
377 ASSERT( spin_is_locked(&iommu->lock) ); in amd_iommu_flush_device()
379 invalidate_dev_table_entry(iommu, bdf); in amd_iommu_flush_device()
380 flush_command_buffer(iommu); in amd_iommu_flush_device()
383 void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf) in amd_iommu_flush_intremap() argument
385 ASSERT( spin_is_locked(&iommu->lock) ); in amd_iommu_flush_intremap()
387 invalidate_interrupt_table(iommu, bdf); in amd_iommu_flush_intremap()
388 flush_command_buffer(iommu); in amd_iommu_flush_intremap()
391 void amd_iommu_flush_all_caches(struct amd_iommu *iommu) in amd_iommu_flush_all_caches() argument
393 ASSERT( spin_is_locked(&iommu->lock) ); in amd_iommu_flush_all_caches()
395 invalidate_iommu_all(iommu); in amd_iommu_flush_all_caches()
396 flush_command_buffer(iommu); in amd_iommu_flush_all_caches()
399 void amd_iommu_send_guest_cmd(struct amd_iommu *iommu, u32 cmd[]) in amd_iommu_send_guest_cmd() argument
403 spin_lock_irqsave(&iommu->lock, flags); in amd_iommu_send_guest_cmd()
405 send_iommu_command(iommu, cmd); in amd_iommu_send_guest_cmd()
406 flush_command_buffer(iommu); in amd_iommu_send_guest_cmd()
408 spin_unlock_irqrestore(&iommu->lock, flags); in amd_iommu_send_guest_cmd()