Lines Matching refs:binding
58 struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w); in __net_devmem_dmabuf_binding_free() local
62 gen_pool_for_each_chunk(binding->chunk_pool, in __net_devmem_dmabuf_binding_free()
65 size = gen_pool_size(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
66 avail = gen_pool_avail(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
70 gen_pool_destroy(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
72 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in __net_devmem_dmabuf_binding_free()
73 binding->direction); in __net_devmem_dmabuf_binding_free()
74 dma_buf_detach(binding->dmabuf, binding->attachment); in __net_devmem_dmabuf_binding_free()
75 dma_buf_put(binding->dmabuf); in __net_devmem_dmabuf_binding_free()
76 xa_destroy(&binding->bound_rxqs); in __net_devmem_dmabuf_binding_free()
77 kvfree(binding->tx_vec); in __net_devmem_dmabuf_binding_free()
78 kfree(binding); in __net_devmem_dmabuf_binding_free()
82 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_alloc_dmabuf() argument
90 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, in net_devmem_alloc_dmabuf()
108 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov); in net_devmem_free_dmabuf() local
111 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, in net_devmem_free_dmabuf()
115 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); in net_devmem_free_dmabuf()
118 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_unbind_dmabuf() argument
124 xa_erase(&net_devmem_dmabuf_bindings, binding->id); in net_devmem_unbind_dmabuf()
131 if (binding->list.next) in net_devmem_unbind_dmabuf()
132 list_del(&binding->list); in net_devmem_unbind_dmabuf()
134 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { in net_devmem_unbind_dmabuf()
136 .mp_priv = binding, in net_devmem_unbind_dmabuf()
142 __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params); in net_devmem_unbind_dmabuf()
145 net_devmem_dmabuf_binding_put(binding); in net_devmem_unbind_dmabuf()
149 struct net_devmem_dmabuf_binding *binding, in net_devmem_bind_dmabuf_to_queue() argument
153 .mp_priv = binding, in net_devmem_bind_dmabuf_to_queue()
165 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, in net_devmem_bind_dmabuf_to_queue()
183 struct net_devmem_dmabuf_binding *binding; in net_devmem_bind_dmabuf() local
195 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, in net_devmem_bind_dmabuf()
197 if (!binding) { in net_devmem_bind_dmabuf()
202 binding->dev = dev; in net_devmem_bind_dmabuf()
203 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); in net_devmem_bind_dmabuf()
205 refcount_set(&binding->ref, 1); in net_devmem_bind_dmabuf()
207 mutex_init(&binding->lock); in net_devmem_bind_dmabuf()
209 binding->dmabuf = dmabuf; in net_devmem_bind_dmabuf()
210 binding->direction = direction; in net_devmem_bind_dmabuf()
212 binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent); in net_devmem_bind_dmabuf()
213 if (IS_ERR(binding->attachment)) { in net_devmem_bind_dmabuf()
214 err = PTR_ERR(binding->attachment); in net_devmem_bind_dmabuf()
219 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, in net_devmem_bind_dmabuf()
221 if (IS_ERR(binding->sgt)) { in net_devmem_bind_dmabuf()
222 err = PTR_ERR(binding->sgt); in net_devmem_bind_dmabuf()
228 binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE, in net_devmem_bind_dmabuf()
231 if (!binding->tx_vec) { in net_devmem_bind_dmabuf()
241 binding->chunk_pool = gen_pool_create(PAGE_SHIFT, in net_devmem_bind_dmabuf()
243 if (!binding->chunk_pool) { in net_devmem_bind_dmabuf()
249 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { in net_devmem_bind_dmabuf()
265 owner->binding = binding; in net_devmem_bind_dmabuf()
267 err = gen_pool_add_owner(binding->chunk_pool, dma_addr, in net_devmem_bind_dmabuf()
291 binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov; in net_devmem_bind_dmabuf()
297 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, in net_devmem_bind_dmabuf()
298 binding, xa_limit_32b, &id_alloc_next, in net_devmem_bind_dmabuf()
303 list_add(&binding->list, &priv->bindings); in net_devmem_bind_dmabuf()
305 return binding; in net_devmem_bind_dmabuf()
308 gen_pool_for_each_chunk(binding->chunk_pool, in net_devmem_bind_dmabuf()
310 gen_pool_destroy(binding->chunk_pool); in net_devmem_bind_dmabuf()
312 kvfree(binding->tx_vec); in net_devmem_bind_dmabuf()
314 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in net_devmem_bind_dmabuf()
317 dma_buf_detach(dmabuf, binding->attachment); in net_devmem_bind_dmabuf()
319 kfree(binding); in net_devmem_bind_dmabuf()
327 struct net_devmem_dmabuf_binding *binding; in net_devmem_lookup_dmabuf() local
330 binding = xa_load(&net_devmem_dmabuf_bindings, id); in net_devmem_lookup_dmabuf()
331 if (binding) { in net_devmem_lookup_dmabuf()
332 if (!net_devmem_dmabuf_binding_get(binding)) in net_devmem_lookup_dmabuf()
333 binding = NULL; in net_devmem_lookup_dmabuf()
337 return binding; in net_devmem_lookup_dmabuf()
353 struct net_devmem_dmabuf_binding *binding; in net_devmem_get_binding() local
357 binding = net_devmem_lookup_dmabuf(dmabuf_id); in net_devmem_get_binding()
358 if (!binding || !binding->tx_vec) { in net_devmem_get_binding()
366 if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) { in net_devmem_get_binding()
371 return binding; in net_devmem_get_binding()
374 if (binding) in net_devmem_get_binding()
375 net_devmem_dmabuf_binding_put(binding); in net_devmem_get_binding()
381 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, in net_devmem_get_niov_at() argument
384 if (virt_addr >= binding->dmabuf->size) in net_devmem_get_niov_at()
390 return binding->tx_vec[virt_addr / PAGE_SIZE]; in net_devmem_get_niov_at()
397 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_init() local
399 if (!binding) in mp_dmabuf_devmem_init()
411 net_devmem_dmabuf_binding_get(binding); in mp_dmabuf_devmem_init()
417 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_alloc_netmems() local
421 niov = net_devmem_alloc_dmabuf(binding); in mp_dmabuf_devmem_alloc_netmems()
436 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_destroy() local
438 net_devmem_dmabuf_binding_put(binding); in mp_dmabuf_devmem_destroy()
462 const struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_nl_fill() local
465 return nla_put_u32(rsp, type, binding->id); in mp_dmabuf_devmem_nl_fill()
471 struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_uninstall() local
475 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) { in mp_dmabuf_devmem_uninstall()
477 xa_erase(&binding->bound_rxqs, xa_idx); in mp_dmabuf_devmem_uninstall()
478 if (xa_empty(&binding->bound_rxqs)) { in mp_dmabuf_devmem_uninstall()
479 mutex_lock(&binding->lock); in mp_dmabuf_devmem_uninstall()
480 binding->dev = NULL; in mp_dmabuf_devmem_uninstall()
481 mutex_unlock(&binding->lock); in mp_dmabuf_devmem_uninstall()