Lines Matching refs:mem
137 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_print_info() local
139 if (!mem->nslabs) { in swiotlb_print_info()
144 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
145 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
166 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_update_mem_attributes() local
170 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
172 vaddr = phys_to_virt(mem->start); in swiotlb_update_mem_attributes()
173 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
178 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, in swiotlb_init_io_tlb_mem() argument
184 mem->nslabs = nslabs; in swiotlb_init_io_tlb_mem()
185 mem->start = start; in swiotlb_init_io_tlb_mem()
186 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_mem()
187 mem->index = 0; in swiotlb_init_io_tlb_mem()
188 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_mem()
191 mem->force_bounce = true; in swiotlb_init_io_tlb_mem()
193 spin_lock_init(&mem->lock); in swiotlb_init_io_tlb_mem()
194 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_mem()
195 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); in swiotlb_init_io_tlb_mem()
196 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_mem()
197 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_mem()
204 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_init_with_tbl() local
211 if (WARN_ON_ONCE(mem->nslabs)) in swiotlb_init_with_tbl()
214 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_with_tbl()
215 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_with_tbl()
216 if (!mem->slots) in swiotlb_init_with_tbl()
220 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); in swiotlb_init_with_tbl()
224 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); in swiotlb_init_with_tbl()
306 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_late_init_with_tbl() local
313 if (WARN_ON_ONCE(mem->nslabs)) in swiotlb_late_init_with_tbl()
316 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_late_init_with_tbl()
317 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_late_init_with_tbl()
318 if (!mem->slots) in swiotlb_late_init_with_tbl()
322 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); in swiotlb_late_init_with_tbl()
325 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); in swiotlb_late_init_with_tbl()
331 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_exit() local
335 if (!mem->nslabs) in swiotlb_exit()
339 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
340 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
341 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
344 if (mem->late_alloc) { in swiotlb_exit()
346 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
348 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
349 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
352 memset(mem, 0, sizeof(*mem)); in swiotlb_exit()
369 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_bounce() local
370 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
371 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
372 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
450 static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) in wrap_index() argument
452 if (index >= mem->nslabs) in wrap_index()
464 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots() local
467 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; in swiotlb_find_slots()
488 spin_lock_irqsave(&mem->lock, flags); in swiotlb_find_slots()
489 if (unlikely(nslots > mem->nslabs - mem->used)) in swiotlb_find_slots()
492 index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); in swiotlb_find_slots()
497 index = wrap_index(mem, index + 1); in swiotlb_find_slots()
509 if (mem->slots[index].list >= nslots) in swiotlb_find_slots()
512 index = wrap_index(mem, index + stride); in swiotlb_find_slots()
516 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_find_slots()
521 mem->slots[i].list = 0; in swiotlb_find_slots()
522 mem->slots[i].alloc_size = in swiotlb_find_slots()
527 mem->slots[i].list; i--) in swiotlb_find_slots()
528 mem->slots[i].list = ++count; in swiotlb_find_slots()
533 if (index + nslots < mem->nslabs) in swiotlb_find_slots()
534 mem->index = index + nslots; in swiotlb_find_slots()
536 mem->index = 0; in swiotlb_find_slots()
537 mem->used += nslots; in swiotlb_find_slots()
539 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_find_slots()
548 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single() local
554 if (!mem) in swiotlb_tbl_map_single()
572 alloc_size, mem->nslabs, mem->used); in swiotlb_tbl_map_single()
582 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
583 tlb_addr = slot_addr(mem->start, index) + offset; in swiotlb_tbl_map_single()
592 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_release_slots() local
595 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
596 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
605 spin_lock_irqsave(&mem->lock, flags); in swiotlb_release_slots()
607 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
616 mem->slots[i].list = ++count; in swiotlb_release_slots()
617 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
618 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
626 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
628 mem->slots[i].list = ++count; in swiotlb_release_slots()
629 mem->used -= nslots; in swiotlb_release_slots()
630 spin_unlock_irqrestore(&mem->lock, flags); in swiotlb_release_slots()
709 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active() local
711 return mem && mem->nslabs; in is_swiotlb_active()
718 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem) in swiotlb_create_debugfs_files() argument
720 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
721 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); in swiotlb_create_debugfs_files()
726 struct io_tlb_mem *mem = &io_tlb_default_mem; in swiotlb_create_default_debugfs() local
729 if (mem->nslabs) { in swiotlb_create_default_debugfs()
730 mem->debugfs = debugfs_dir; in swiotlb_create_default_debugfs()
731 swiotlb_create_debugfs_files(mem); in swiotlb_create_default_debugfs()
745 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_debugfs_init() local
747 mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir); in rmem_swiotlb_debugfs_init()
748 swiotlb_create_debugfs_files(mem); in rmem_swiotlb_debugfs_init()
758 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc() local
762 if (!mem) in swiotlb_alloc()
769 tlb_addr = slot_addr(mem->start, index); in swiotlb_alloc()
789 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init() local
797 if (!mem) { in rmem_swiotlb_device_init()
798 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in rmem_swiotlb_device_init()
799 if (!mem) in rmem_swiotlb_device_init()
802 mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs), in rmem_swiotlb_device_init()
804 if (!mem->slots) { in rmem_swiotlb_device_init()
805 kfree(mem); in rmem_swiotlb_device_init()
811 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); in rmem_swiotlb_device_init()
812 mem->force_bounce = true; in rmem_swiotlb_device_init()
813 mem->for_alloc = true; in rmem_swiotlb_device_init()
815 rmem->priv = mem; in rmem_swiotlb_device_init()
820 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()