Lines Matching refs:cb
15 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_map_mem() argument
38 INIT_LIST_HEAD(&cb->va_block_list); in cb_map_mem()
40 for (bus_addr = cb->bus_address; in cb_map_mem()
41 bus_addr < cb->bus_address + cb->size; in cb_map_mem()
62 list_add_tail(&va_block->node, &cb->va_block_list); in cb_map_mem()
67 bus_addr = cb->bus_address; in cb_map_mem()
69 list_for_each_entry(va_block, &cb->va_block_list, node) { in cb_map_mem()
72 &cb->va_block_list)); in cb_map_mem()
87 cb->is_mmu_mapped = true; in cb_map_mem()
92 list_for_each_entry(va_block, &cb->va_block_list, node) { in cb_map_mem()
105 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { in cb_map_mem()
114 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_unmap_mem() argument
121 list_for_each_entry(va_block, &cb->va_block_list, node) in cb_unmap_mem()
124 &cb->va_block_list))) in cb_unmap_mem()
133 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { in cb_unmap_mem()
140 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) in cb_fini() argument
142 if (cb->is_internal) in cb_fini()
144 (uintptr_t)cb->kernel_address, cb->size); in cb_fini()
146 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size, in cb_fini()
147 cb->kernel_address, cb->bus_address); in cb_fini()
149 kfree(cb); in cb_fini()
152 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) in cb_do_release() argument
154 if (cb->is_pool) { in cb_do_release()
156 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
159 cb_fini(hdev, cb); in cb_do_release()
166 struct hl_cb *cb; in cb_release() local
168 cb = container_of(ref, struct hl_cb, refcount); in cb_release()
169 hdev = cb->hdev; in cb_release()
171 hl_debugfs_remove_cb(cb); in cb_release()
173 if (cb->is_mmu_mapped) in cb_release()
174 cb_unmap_mem(cb->ctx, cb); in cb_release()
176 hl_ctx_put(cb->ctx); in cb_release()
178 cb_do_release(hdev, cb); in cb_release()
184 struct hl_cb *cb = NULL; in hl_cb_alloc() local
197 cb = kzalloc(sizeof(*cb), GFP_ATOMIC); in hl_cb_alloc()
199 if (!cb) in hl_cb_alloc()
200 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in hl_cb_alloc()
202 if (!cb) in hl_cb_alloc()
208 kfree(cb); in hl_cb_alloc()
213 cb->is_internal = true; in hl_cb_alloc()
214 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
217 &cb->bus_address, GFP_ATOMIC); in hl_cb_alloc()
220 cb_size, &cb->bus_address, GFP_KERNEL); in hl_cb_alloc()
223 &cb->bus_address, in hl_cb_alloc()
231 kfree(cb); in hl_cb_alloc()
235 cb->kernel_address = p; in hl_cb_alloc()
236 cb->size = cb_size; in hl_cb_alloc()
238 return cb; in hl_cb_alloc()
245 struct hl_cb *cb; in hl_cb_create() local
278 cb = list_first_entry(&hdev->cb_pool, in hl_cb_create()
279 typeof(*cb), pool_list); in hl_cb_create()
280 list_del(&cb->pool_list); in hl_cb_create()
291 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb); in hl_cb_create()
292 if (!cb) { in hl_cb_create()
298 cb->hdev = hdev; in hl_cb_create()
299 cb->ctx = ctx; in hl_cb_create()
300 hl_ctx_get(hdev, cb->ctx); in hl_cb_create()
310 rc = cb_map_mem(ctx, cb); in hl_cb_create()
316 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); in hl_cb_create()
324 cb->id = (u64) rc; in hl_cb_create()
326 kref_init(&cb->refcount); in hl_cb_create()
327 spin_lock_init(&cb->lock); in hl_cb_create()
333 *handle = cb->id | HL_MMAP_TYPE_CB; in hl_cb_create()
336 hl_debugfs_add_cb(cb); in hl_cb_create()
341 if (cb->is_mmu_mapped) in hl_cb_create()
342 cb_unmap_mem(cb->ctx, cb); in hl_cb_create()
344 hl_ctx_put(cb->ctx); in hl_cb_create()
345 cb_do_release(hdev, cb); in hl_cb_create()
354 struct hl_cb *cb; in hl_cb_destroy() local
367 cb = idr_find(&mgr->cb_handles, handle); in hl_cb_destroy()
368 if (cb) { in hl_cb_destroy()
371 kref_put(&cb->refcount, cb_release); in hl_cb_destroy()
385 struct hl_cb *cb; in hl_cb_info() local
397 cb = idr_find(&mgr->cb_handles, handle); in hl_cb_info()
398 if (!cb) { in hl_cb_info()
405 *usage_cnt = atomic_read(&cb->cs_cnt); in hl_cb_info()
468 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; in cb_vm_close() local
471 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start); in cb_vm_close()
474 cb->mmap_size = new_mmap_size; in cb_vm_close()
478 spin_lock(&cb->lock); in cb_vm_close()
479 cb->mmap = false; in cb_vm_close()
480 spin_unlock(&cb->lock); in cb_vm_close()
482 hl_cb_put(cb); in cb_vm_close()
493 struct hl_cb *cb; in hl_cb_mmap() local
504 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); in hl_cb_mmap()
505 if (!cb) { in hl_cb_mmap()
513 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) { in hl_cb_mmap()
516 vma->vm_end - vma->vm_start, cb->size); in hl_cb_mmap()
531 spin_lock(&cb->lock); in hl_cb_mmap()
533 if (cb->mmap) { in hl_cb_mmap()
540 cb->mmap = true; in hl_cb_mmap()
542 spin_unlock(&cb->lock); in hl_cb_mmap()
551 vma->vm_private_data = cb; in hl_cb_mmap()
553 rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address, in hl_cb_mmap()
554 cb->bus_address, cb->size); in hl_cb_mmap()
556 spin_lock(&cb->lock); in hl_cb_mmap()
557 cb->mmap = false; in hl_cb_mmap()
561 cb->mmap_size = cb->size; in hl_cb_mmap()
567 spin_unlock(&cb->lock); in hl_cb_mmap()
569 hl_cb_put(cb); in hl_cb_mmap()
576 struct hl_cb *cb; in hl_cb_get() local
579 cb = idr_find(&mgr->cb_handles, handle); in hl_cb_get()
581 if (!cb) { in hl_cb_get()
588 kref_get(&cb->refcount); in hl_cb_get()
592 return cb; in hl_cb_get()
596 void hl_cb_put(struct hl_cb *cb) in hl_cb_put() argument
598 kref_put(&cb->refcount, cb_release); in hl_cb_put()
609 struct hl_cb *cb; in hl_cb_mgr_fini() local
615 idr_for_each_entry(idp, cb, id) { in hl_cb_mgr_fini()
616 if (kref_put(&cb->refcount, cb_release) != 1) in hl_cb_mgr_fini()
619 id, cb->ctx->asid); in hl_cb_mgr_fini()
629 struct hl_cb *cb; in hl_cb_kernel_create() local
641 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); in hl_cb_kernel_create()
643 if (!cb) { in hl_cb_kernel_create()
649 return cb; in hl_cb_kernel_create()
659 struct hl_cb *cb; in hl_cb_pool_init() local
666 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
668 if (cb) { in hl_cb_pool_init()
669 cb->is_pool = true; in hl_cb_pool_init()
670 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
682 struct hl_cb *cb, *tmp; in hl_cb_pool_fini() local
684 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
685 list_del(&cb->pool_list); in hl_cb_pool_fini()
686 cb_fini(hdev, cb); in hl_cb_pool_fini()