Lines Matching refs:tbl
45 struct iommu_table *tbl = data; in iommu_debugfs_weight_get() local
46 *val = bitmap_weight(tbl->it_map, tbl->it_size); in iommu_debugfs_weight_get()
51 static void iommu_debugfs_add(struct iommu_table *tbl) in iommu_debugfs_add() argument
56 sprintf(name, "%08lx", tbl->it_index); in iommu_debugfs_add()
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight); in iommu_debugfs_add()
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size); in iommu_debugfs_add()
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift); in iommu_debugfs_add()
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start); in iommu_debugfs_add()
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end); in iommu_debugfs_add()
64 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels); in iommu_debugfs_add()
65 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size); in iommu_debugfs_add()
68 static void iommu_debugfs_del(struct iommu_table *tbl) in iommu_debugfs_del() argument
73 sprintf(name, "%08lx", tbl->it_index); in iommu_debugfs_del()
78 static void iommu_debugfs_add(struct iommu_table *tbl){} in iommu_debugfs_add() argument
79 static void iommu_debugfs_del(struct iommu_table *tbl){} in iommu_debugfs_del() argument
205 struct iommu_table *tbl, in iommu_range_alloc() argument
238 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
241 pool = &(tbl->large_pool); in iommu_range_alloc()
243 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
263 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
264 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
271 pool = &(tbl->pools[0]); in iommu_range_alloc()
279 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
280 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), in iommu_range_alloc()
289 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
292 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
293 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
299 } else if (pass == tbl->nr_pools + 1) { in iommu_range_alloc()
302 pool = &tbl->large_pool; in iommu_range_alloc()
323 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
324 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
336 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
346 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
351 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
352 ret = entry << tbl->it_page_shift; /* Set the return dma address */ in iommu_alloc()
355 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
357 IOMMU_PAGE_MASK(tbl), direction, attrs); in iommu_alloc()
365 __iommu_free(tbl, ret, npages); in iommu_alloc()
370 if (tbl->it_ops->flush) in iommu_alloc()
371 tbl->it_ops->flush(tbl); in iommu_alloc()
379 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
384 entry = dma_addr >> tbl->it_page_shift; in iommu_free_check()
385 free_entry = entry - tbl->it_offset; in iommu_free_check()
387 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
388 (entry < tbl->it_offset)) { in iommu_free_check()
393 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
394 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
395 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
396 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
397 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
407 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
411 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
415 p = &tbl->large_pool; in get_pool()
417 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
419 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
420 p = &tbl->pools[pool_nr]; in get_pool()
426 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
433 entry = dma_addr >> tbl->it_page_shift; in __iommu_free()
434 free_entry = entry - tbl->it_offset; in __iommu_free()
436 pool = get_pool(tbl, free_entry); in __iommu_free()
438 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
441 tbl->it_ops->clear(tbl, entry, npages); in __iommu_free()
444 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
448 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
451 __iommu_free(tbl, dma_addr, npages); in iommu_free()
457 if (tbl->it_ops->flush) in iommu_free()
458 tbl->it_ops->flush(tbl); in iommu_free()
461 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, in ppc_iommu_map_sg() argument
475 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg()
500 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
502 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && in ppc_iommu_map_sg()
504 align = PAGE_SHIFT - tbl->it_page_shift; in ppc_iommu_map_sg()
505 entry = iommu_range_alloc(dev, tbl, npages, &handle, in ppc_iommu_map_sg()
506 mask >> tbl->it_page_shift, align); in ppc_iommu_map_sg()
515 "vaddr %lx npages %lu\n", tbl, vaddr, in ppc_iommu_map_sg()
521 entry += tbl->it_offset; in ppc_iommu_map_sg()
522 dma_addr = entry << tbl->it_page_shift; in ppc_iommu_map_sg()
523 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); in ppc_iommu_map_sg()
529 build_fail = tbl->it_ops->set(tbl, entry, npages, in ppc_iommu_map_sg()
530 vaddr & IOMMU_PAGE_MASK(tbl), in ppc_iommu_map_sg()
568 if (tbl->it_ops->flush) in ppc_iommu_map_sg()
569 tbl->it_ops->flush(tbl); in ppc_iommu_map_sg()
591 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); in ppc_iommu_map_sg()
593 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
594 __iommu_free(tbl, vaddr, npages); in ppc_iommu_map_sg()
604 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in ppc_iommu_unmap_sg() argument
612 if (!tbl) in ppc_iommu_unmap_sg()
623 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_unmap_sg()
624 __iommu_free(tbl, dma_handle, npages); in ppc_iommu_unmap_sg()
632 if (tbl->it_ops->flush) in ppc_iommu_unmap_sg()
633 tbl->it_ops->flush(tbl); in ppc_iommu_unmap_sg()
636 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
645 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
650 if (tbl->it_ops->get) { in iommu_table_clear()
654 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
655 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); in iommu_table_clear()
660 __set_bit(index, tbl->it_map); in iommu_table_clear()
665 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
669 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
670 index < tbl->it_size; index++) in iommu_table_clear()
671 __clear_bit(index, tbl->it_map); in iommu_table_clear()
677 static void iommu_table_reserve_pages(struct iommu_table *tbl, in iommu_table_reserve_pages() argument
688 if (tbl->it_offset == 0) in iommu_table_reserve_pages()
689 set_bit(0, tbl->it_map); in iommu_table_reserve_pages()
691 if (res_start < tbl->it_offset) in iommu_table_reserve_pages()
692 res_start = tbl->it_offset; in iommu_table_reserve_pages()
694 if (res_end > (tbl->it_offset + tbl->it_size)) in iommu_table_reserve_pages()
695 res_end = tbl->it_offset + tbl->it_size; in iommu_table_reserve_pages()
699 tbl->it_reserved_start = tbl->it_offset; in iommu_table_reserve_pages()
700 tbl->it_reserved_end = tbl->it_offset; in iommu_table_reserve_pages()
704 tbl->it_reserved_start = res_start; in iommu_table_reserve_pages()
705 tbl->it_reserved_end = res_end; in iommu_table_reserve_pages()
707 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_reserve_pages()
708 set_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_reserve_pages()
715 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, in iommu_init_table() argument
723 BUG_ON(!tbl->it_ops); in iommu_init_table()
726 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
728 tbl->it_map = vzalloc_node(sz, nid); in iommu_init_table()
729 if (!tbl->it_map) { in iommu_init_table()
734 iommu_table_reserve_pages(tbl, res_start, res_end); in iommu_init_table()
737 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
738 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
740 tbl->nr_pools = 1; in iommu_init_table()
743 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
745 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
746 p = &tbl->pools[i]; in iommu_init_table()
748 p->start = tbl->poolsize * i; in iommu_init_table()
750 p->end = p->start + tbl->poolsize; in iommu_init_table()
753 p = &tbl->large_pool; in iommu_init_table()
755 p->start = tbl->poolsize * i; in iommu_init_table()
757 p->end = tbl->it_size; in iommu_init_table()
759 iommu_table_clear(tbl); in iommu_init_table()
767 iommu_debugfs_add(tbl); in iommu_init_table()
769 return tbl; in iommu_init_table()
772 bool iommu_table_in_use(struct iommu_table *tbl) in iommu_table_in_use() argument
777 if (tbl->it_offset == 0) in iommu_table_in_use()
779 end = tbl->it_reserved_start - tbl->it_offset; in iommu_table_in_use()
780 if (find_next_bit(tbl->it_map, end, start) != end) in iommu_table_in_use()
783 start = tbl->it_reserved_end - tbl->it_offset; in iommu_table_in_use()
784 end = tbl->it_size; in iommu_table_in_use()
785 return find_next_bit(tbl->it_map, end, start) != end; in iommu_table_in_use()
790 struct iommu_table *tbl; in iommu_table_free() local
792 tbl = container_of(kref, struct iommu_table, it_kref); in iommu_table_free()
794 if (tbl->it_ops->free) in iommu_table_free()
795 tbl->it_ops->free(tbl); in iommu_table_free()
797 if (!tbl->it_map) { in iommu_table_free()
798 kfree(tbl); in iommu_table_free()
802 iommu_debugfs_del(tbl); in iommu_table_free()
805 if (iommu_table_in_use(tbl)) in iommu_table_free()
809 vfree(tbl->it_map); in iommu_table_free()
812 kfree(tbl); in iommu_table_free()
815 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) in iommu_tce_table_get() argument
817 if (kref_get_unless_zero(&tbl->it_kref)) in iommu_tce_table_get()
818 return tbl; in iommu_tce_table_get()
824 int iommu_tce_table_put(struct iommu_table *tbl) in iommu_tce_table_put() argument
826 if (WARN_ON(!tbl)) in iommu_tce_table_put()
829 return kref_put(&tbl->it_kref, iommu_table_free); in iommu_tce_table_put()
838 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
853 if (tbl) { in iommu_map_page()
854 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); in iommu_map_page()
856 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && in iommu_map_page()
858 align = PAGE_SHIFT - tbl->it_page_shift; in iommu_map_page()
860 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
861 mask >> tbl->it_page_shift, align, in iommu_map_page()
867 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
871 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); in iommu_map_page()
877 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
885 if (tbl) { in iommu_unmap_page()
887 IOMMU_PAGE_SIZE(tbl)); in iommu_unmap_page()
888 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
896 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
920 if (!tbl) in iommu_alloc_coherent()
931 nio_pages = size >> tbl->it_page_shift; in iommu_alloc_coherent()
932 io_order = get_iommu_order(size, tbl); in iommu_alloc_coherent()
933 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
934 mask >> tbl->it_page_shift, io_order, 0); in iommu_alloc_coherent()
943 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
946 if (tbl) { in iommu_free_coherent()
950 nio_pages = size >> tbl->it_page_shift; in iommu_free_coherent()
951 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()
1018 void iommu_flush_tce(struct iommu_table *tbl) in iommu_flush_tce() argument
1021 if (tbl->it_ops->flush) in iommu_flush_tce()
1022 tbl->it_ops->flush(tbl); in iommu_flush_tce()
1061 struct iommu_table *tbl, in iommu_tce_xchg_no_kill() argument
1068 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); in iommu_tce_xchg_no_kill()
1071 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, in iommu_tce_xchg_no_kill()
1079 void iommu_tce_kill(struct iommu_table *tbl, in iommu_tce_kill() argument
1082 if (tbl->it_ops->tce_kill) in iommu_tce_kill()
1083 tbl->it_ops->tce_kill(tbl, entry, pages, false); in iommu_tce_kill()
1087 int iommu_take_ownership(struct iommu_table *tbl) in iommu_take_ownership() argument
1089 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_take_ownership()
1099 if (!tbl->it_ops->xchg_no_kill) in iommu_take_ownership()
1102 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1103 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1104 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership()
1106 if (iommu_table_in_use(tbl)) { in iommu_take_ownership()
1110 memset(tbl->it_map, 0xff, sz); in iommu_take_ownership()
1113 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1114 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership()
1115 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1121 void iommu_release_ownership(struct iommu_table *tbl) in iommu_release_ownership() argument
1123 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_release_ownership()
1125 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1126 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1127 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_release_ownership()
1129 memset(tbl->it_map, 0, sz); in iommu_release_ownership()
1131 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_release_ownership()
1132 tbl->it_reserved_end); in iommu_release_ownership()
1134 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1135 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
1136 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_release_ownership()