Lines Matching refs:cb

17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)  in cb_map_mem()  argument
36 if (cb->is_mmu_mapped) in cb_map_mem()
39 cb->roundup_size = roundup(cb->size, page_size); in cb_map_mem()
41 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); in cb_map_mem()
42 if (!cb->virtual_addr) { in cb_map_mem()
48 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); in cb_map_mem()
50 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem()
56 cb->is_mmu_mapped = true; in cb_map_mem()
61 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_map_mem()
65 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_unmap_mem() argument
70 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
74 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
77 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) in cb_fini() argument
79 if (cb->is_internal) in cb_fini()
81 (uintptr_t)cb->kernel_address, cb->size); in cb_fini()
83 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address); in cb_fini()
85 kfree(cb); in cb_fini()
88 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) in cb_do_release() argument
90 if (cb->is_pool) { in cb_do_release()
91 atomic_set(&cb->is_handle_destroyed, 0); in cb_do_release()
93 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
96 cb_fini(hdev, cb); in cb_do_release()
103 struct hl_cb *cb = NULL; in hl_cb_alloc() local
116 cb = kzalloc(sizeof(*cb), GFP_ATOMIC); in hl_cb_alloc()
118 if (!cb) in hl_cb_alloc()
119 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in hl_cb_alloc()
121 if (!cb) in hl_cb_alloc()
127 kfree(cb); in hl_cb_alloc()
132 cb->is_internal = true; in hl_cb_alloc()
133 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
135 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC); in hl_cb_alloc()
137 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL); in hl_cb_alloc()
139 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, in hl_cb_alloc()
147 kfree(cb); in hl_cb_alloc()
151 cb->kernel_address = p; in hl_cb_alloc()
152 cb->size = cb_size; in hl_cb_alloc()
154 return cb; in hl_cb_alloc()
167 struct hl_cb *cb = buf->private; in hl_cb_mmap_mem_release() local
169 hl_debugfs_remove_cb(cb); in hl_cb_mmap_mem_release()
171 if (cb->is_mmu_mapped) in hl_cb_mmap_mem_release()
172 cb_unmap_mem(cb->ctx, cb); in hl_cb_mmap_mem_release()
174 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_release()
176 cb_do_release(cb->hdev, cb); in hl_cb_mmap_mem_release()
182 struct hl_cb *cb; in hl_cb_mmap_mem_alloc() local
196 cb = list_first_entry(&cb_args->hdev->cb_pool, in hl_cb_mmap_mem_alloc()
197 typeof(*cb), pool_list); in hl_cb_mmap_mem_alloc()
198 list_del(&cb->pool_list); in hl_cb_mmap_mem_alloc()
209 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb); in hl_cb_mmap_mem_alloc()
210 if (!cb) in hl_cb_mmap_mem_alloc()
214 cb->hdev = cb_args->hdev; in hl_cb_mmap_mem_alloc()
215 cb->ctx = cb_args->ctx; in hl_cb_mmap_mem_alloc()
216 cb->buf = buf; in hl_cb_mmap_mem_alloc()
217 cb->buf->mappable_size = cb->size; in hl_cb_mmap_mem_alloc()
218 cb->buf->private = cb; in hl_cb_mmap_mem_alloc()
220 hl_ctx_get(cb->ctx); in hl_cb_mmap_mem_alloc()
230 rc = cb_map_mem(cb_args->ctx, cb); in hl_cb_mmap_mem_alloc()
235 hl_debugfs_add_cb(cb); in hl_cb_mmap_mem_alloc()
240 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_alloc()
241 cb_do_release(cb_args->hdev, cb); in hl_cb_mmap_mem_alloc()
249 struct hl_cb *cb = buf->private; in hl_cb_mmap() local
251 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address, in hl_cb_mmap()
252 cb->bus_address, cb->size); in hl_cb_mmap()
302 struct hl_cb *cb; in hl_cb_destroy() local
305 cb = hl_cb_get(mmg, cb_handle); in hl_cb_destroy()
306 if (!cb) { in hl_cb_destroy()
313 rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1); in hl_cb_destroy()
314 hl_cb_put(cb); in hl_cb_destroy()
334 struct hl_cb *cb; in hl_cb_info() local
337 cb = hl_cb_get(mmg, handle); in hl_cb_info()
338 if (!cb) { in hl_cb_info()
345 if (cb->is_mmu_mapped) { in hl_cb_info()
346 *device_va = cb->virtual_addr; in hl_cb_info()
353 *usage_cnt = atomic_read(&cb->cs_cnt); in hl_cb_info()
357 hl_cb_put(cb); in hl_cb_info()
435 void hl_cb_put(struct hl_cb *cb) in hl_cb_put() argument
437 hl_mmap_mem_buf_put(cb->buf); in hl_cb_put()
444 struct hl_cb *cb; in hl_cb_kernel_create() local
455 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle); in hl_cb_kernel_create()
457 if (!cb) { in hl_cb_kernel_create()
463 return cb; in hl_cb_kernel_create()
473 struct hl_cb *cb; in hl_cb_pool_init() local
480 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
482 if (cb) { in hl_cb_pool_init()
483 cb->is_pool = true; in hl_cb_pool_init()
484 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
496 struct hl_cb *cb, *tmp; in hl_cb_pool_fini() local
498 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
499 list_del(&cb->pool_list); in hl_cb_pool_fini()
500 cb_fini(hdev, cb); in hl_cb_pool_fini()