1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Devmem TCP
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com
8 */
9
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/mm.h>
13 #include <linux/netdevice.h>
14 #include <linux/types.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/page_pool/helpers.h>
18 #include <net/page_pool/memory_provider.h>
19 #include <net/sock.h>
20 #include <trace/events/page_pool.h>
21
22 #include "devmem.h"
23 #include "mp_dmabuf_devmem.h"
24 #include "page_pool_priv.h"
25
26 /* Device memory support */
27
28 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
29
30 static const struct memory_provider_ops dmabuf_devmem_ops;
31
net_is_devmem_iov(struct net_iov * niov)32 bool net_is_devmem_iov(struct net_iov *niov)
33 {
34 return niov->type == NET_IOV_DMABUF;
35 }
36
net_devmem_dmabuf_free_chunk_owner(struct gen_pool * genpool,struct gen_pool_chunk * chunk,void * not_used)37 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
38 struct gen_pool_chunk *chunk,
39 void *not_used)
40 {
41 struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
42
43 kvfree(owner->area.niovs);
44 kfree(owner);
45 }
46
net_devmem_get_dma_addr(const struct net_iov * niov)47 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
48 {
49 struct dmabuf_genpool_chunk_owner *owner;
50
51 owner = net_devmem_iov_to_chunk_owner(niov);
52 return owner->base_dma_addr +
53 ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
54 }
55
__net_devmem_dmabuf_binding_free(struct work_struct * wq)56 void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
57 {
58 struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w);
59
60 size_t size, avail;
61
62 gen_pool_for_each_chunk(binding->chunk_pool,
63 net_devmem_dmabuf_free_chunk_owner, NULL);
64
65 size = gen_pool_size(binding->chunk_pool);
66 avail = gen_pool_avail(binding->chunk_pool);
67
68 if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
69 size, avail))
70 gen_pool_destroy(binding->chunk_pool);
71
72 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
73 binding->direction);
74 dma_buf_detach(binding->dmabuf, binding->attachment);
75 dma_buf_put(binding->dmabuf);
76 xa_destroy(&binding->bound_rxqs);
77 kvfree(binding->tx_vec);
78 kfree(binding);
79 }
80
81 struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)82 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
83 {
84 struct dmabuf_genpool_chunk_owner *owner;
85 unsigned long dma_addr;
86 struct net_iov *niov;
87 ssize_t offset;
88 ssize_t index;
89
90 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
91 (void **)&owner);
92 if (!dma_addr)
93 return NULL;
94
95 offset = dma_addr - owner->base_dma_addr;
96 index = offset / PAGE_SIZE;
97 niov = &owner->area.niovs[index];
98
99 niov->pp_magic = 0;
100 niov->pp = NULL;
101 atomic_long_set(&niov->pp_ref_count, 0);
102
103 return niov;
104 }
105
net_devmem_free_dmabuf(struct net_iov * niov)106 void net_devmem_free_dmabuf(struct net_iov *niov)
107 {
108 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
109 unsigned long dma_addr = net_devmem_get_dma_addr(niov);
110
111 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
112 PAGE_SIZE)))
113 return;
114
115 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
116 }
117
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)118 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
119 {
120 struct netdev_rx_queue *rxq;
121 unsigned long xa_idx;
122 unsigned int rxq_idx;
123
124 xa_erase(&net_devmem_dmabuf_bindings, binding->id);
125
126 /* Ensure no tx net_devmem_lookup_dmabuf() are in flight after the
127 * erase.
128 */
129 synchronize_net();
130
131 if (binding->list.next)
132 list_del(&binding->list);
133
134 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
135 const struct pp_memory_provider_params mp_params = {
136 .mp_priv = binding,
137 .mp_ops = &dmabuf_devmem_ops,
138 };
139
140 rxq_idx = get_netdev_rx_queue_index(rxq);
141
142 __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
143 }
144
145 net_devmem_dmabuf_binding_put(binding);
146 }
147
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)148 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
149 struct net_devmem_dmabuf_binding *binding,
150 struct netlink_ext_ack *extack)
151 {
152 struct pp_memory_provider_params mp_params = {
153 .mp_priv = binding,
154 .mp_ops = &dmabuf_devmem_ops,
155 };
156 struct netdev_rx_queue *rxq;
157 u32 xa_idx;
158 int err;
159
160 err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
161 if (err)
162 return err;
163
164 rxq = __netif_get_rx_queue(dev, rxq_idx);
165 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
166 GFP_KERNEL);
167 if (err)
168 goto err_close_rxq;
169
170 return 0;
171
172 err_close_rxq:
173 __net_mp_close_rxq(dev, rxq_idx, &mp_params);
174 return err;
175 }
176
177 struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,enum dma_data_direction direction,unsigned int dmabuf_fd,struct netdev_nl_sock * priv,struct netlink_ext_ack * extack)178 net_devmem_bind_dmabuf(struct net_device *dev,
179 enum dma_data_direction direction,
180 unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
181 struct netlink_ext_ack *extack)
182 {
183 struct net_devmem_dmabuf_binding *binding;
184 static u32 id_alloc_next;
185 struct scatterlist *sg;
186 struct dma_buf *dmabuf;
187 unsigned int sg_idx, i;
188 unsigned long virtual;
189 int err;
190
191 dmabuf = dma_buf_get(dmabuf_fd);
192 if (IS_ERR(dmabuf))
193 return ERR_CAST(dmabuf);
194
195 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
196 dev_to_node(&dev->dev));
197 if (!binding) {
198 err = -ENOMEM;
199 goto err_put_dmabuf;
200 }
201
202 binding->dev = dev;
203 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
204
205 refcount_set(&binding->ref, 1);
206
207 mutex_init(&binding->lock);
208
209 binding->dmabuf = dmabuf;
210 binding->direction = direction;
211
212 binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
213 if (IS_ERR(binding->attachment)) {
214 err = PTR_ERR(binding->attachment);
215 NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
216 goto err_free_binding;
217 }
218
219 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
220 direction);
221 if (IS_ERR(binding->sgt)) {
222 err = PTR_ERR(binding->sgt);
223 NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
224 goto err_detach;
225 }
226
227 if (direction == DMA_TO_DEVICE) {
228 binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE,
229 sizeof(struct net_iov *),
230 GFP_KERNEL);
231 if (!binding->tx_vec) {
232 err = -ENOMEM;
233 goto err_unmap;
234 }
235 }
236
237 /* For simplicity we expect to make PAGE_SIZE allocations, but the
238 * binding can be much more flexible than that. We may be able to
239 * allocate MTU sized chunks here. Leave that for future work...
240 */
241 binding->chunk_pool = gen_pool_create(PAGE_SHIFT,
242 dev_to_node(&dev->dev));
243 if (!binding->chunk_pool) {
244 err = -ENOMEM;
245 goto err_tx_vec;
246 }
247
248 virtual = 0;
249 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
250 dma_addr_t dma_addr = sg_dma_address(sg);
251 struct dmabuf_genpool_chunk_owner *owner;
252 size_t len = sg_dma_len(sg);
253 struct net_iov *niov;
254
255 owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
256 dev_to_node(&dev->dev));
257 if (!owner) {
258 err = -ENOMEM;
259 goto err_free_chunks;
260 }
261
262 owner->area.base_virtual = virtual;
263 owner->base_dma_addr = dma_addr;
264 owner->area.num_niovs = len / PAGE_SIZE;
265 owner->binding = binding;
266
267 err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
268 dma_addr, len, dev_to_node(&dev->dev),
269 owner);
270 if (err) {
271 kfree(owner);
272 err = -EINVAL;
273 goto err_free_chunks;
274 }
275
276 owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
277 sizeof(*owner->area.niovs),
278 GFP_KERNEL);
279 if (!owner->area.niovs) {
280 err = -ENOMEM;
281 goto err_free_chunks;
282 }
283
284 for (i = 0; i < owner->area.num_niovs; i++) {
285 niov = &owner->area.niovs[i];
286 niov->type = NET_IOV_DMABUF;
287 niov->owner = &owner->area;
288 page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
289 net_devmem_get_dma_addr(niov));
290 if (direction == DMA_TO_DEVICE)
291 binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
292 }
293
294 virtual += len;
295 }
296
297 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
298 binding, xa_limit_32b, &id_alloc_next,
299 GFP_KERNEL);
300 if (err < 0)
301 goto err_free_chunks;
302
303 list_add(&binding->list, &priv->bindings);
304
305 return binding;
306
307 err_free_chunks:
308 gen_pool_for_each_chunk(binding->chunk_pool,
309 net_devmem_dmabuf_free_chunk_owner, NULL);
310 gen_pool_destroy(binding->chunk_pool);
311 err_tx_vec:
312 kvfree(binding->tx_vec);
313 err_unmap:
314 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
315 direction);
316 err_detach:
317 dma_buf_detach(dmabuf, binding->attachment);
318 err_free_binding:
319 kfree(binding);
320 err_put_dmabuf:
321 dma_buf_put(dmabuf);
322 return ERR_PTR(err);
323 }
324
net_devmem_lookup_dmabuf(u32 id)325 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
326 {
327 struct net_devmem_dmabuf_binding *binding;
328
329 rcu_read_lock();
330 binding = xa_load(&net_devmem_dmabuf_bindings, id);
331 if (binding) {
332 if (!net_devmem_dmabuf_binding_get(binding))
333 binding = NULL;
334 }
335 rcu_read_unlock();
336
337 return binding;
338 }
339
net_devmem_get_net_iov(struct net_iov * niov)340 void net_devmem_get_net_iov(struct net_iov *niov)
341 {
342 net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov));
343 }
344
net_devmem_put_net_iov(struct net_iov * niov)345 void net_devmem_put_net_iov(struct net_iov *niov)
346 {
347 net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov));
348 }
349
net_devmem_get_binding(struct sock * sk,unsigned int dmabuf_id)350 struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
351 unsigned int dmabuf_id)
352 {
353 struct net_devmem_dmabuf_binding *binding;
354 struct dst_entry *dst = __sk_dst_get(sk);
355 int err = 0;
356
357 binding = net_devmem_lookup_dmabuf(dmabuf_id);
358 if (!binding || !binding->tx_vec) {
359 err = -EINVAL;
360 goto out_err;
361 }
362
363 /* The dma-addrs in this binding are only reachable to the corresponding
364 * net_device.
365 */
366 if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) {
367 err = -ENODEV;
368 goto out_err;
369 }
370
371 return binding;
372
373 out_err:
374 if (binding)
375 net_devmem_dmabuf_binding_put(binding);
376
377 return ERR_PTR(err);
378 }
379
380 struct net_iov *
net_devmem_get_niov_at(struct net_devmem_dmabuf_binding * binding,size_t virt_addr,size_t * off,size_t * size)381 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding,
382 size_t virt_addr, size_t *off, size_t *size)
383 {
384 if (virt_addr >= binding->dmabuf->size)
385 return NULL;
386
387 *off = virt_addr % PAGE_SIZE;
388 *size = PAGE_SIZE - *off;
389
390 return binding->tx_vec[virt_addr / PAGE_SIZE];
391 }
392
393 /*** "Dmabuf devmem memory provider" ***/
394
mp_dmabuf_devmem_init(struct page_pool * pool)395 int mp_dmabuf_devmem_init(struct page_pool *pool)
396 {
397 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
398
399 if (!binding)
400 return -EINVAL;
401
402 /* dma-buf dma addresses do not need and should not be used with
403 * dma_sync_for_cpu/device. Force disable dma_sync.
404 */
405 pool->dma_sync = false;
406 pool->dma_sync_for_cpu = false;
407
408 if (pool->p.order != 0)
409 return -E2BIG;
410
411 net_devmem_dmabuf_binding_get(binding);
412 return 0;
413 }
414
mp_dmabuf_devmem_alloc_netmems(struct page_pool * pool,gfp_t gfp)415 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
416 {
417 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
418 struct net_iov *niov;
419 netmem_ref netmem;
420
421 niov = net_devmem_alloc_dmabuf(binding);
422 if (!niov)
423 return 0;
424
425 netmem = net_iov_to_netmem(niov);
426
427 page_pool_set_pp_info(pool, netmem);
428
429 pool->pages_state_hold_cnt++;
430 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
431 return netmem;
432 }
433
mp_dmabuf_devmem_destroy(struct page_pool * pool)434 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
435 {
436 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
437
438 net_devmem_dmabuf_binding_put(binding);
439 }
440
mp_dmabuf_devmem_release_page(struct page_pool * pool,netmem_ref netmem)441 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
442 {
443 long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
444
445 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
446 return false;
447
448 if (WARN_ON_ONCE(refcount != 1))
449 return false;
450
451 page_pool_clear_pp_info(netmem);
452
453 net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
454
455 /* We don't want the page pool put_page()ing our net_iovs. */
456 return false;
457 }
458
mp_dmabuf_devmem_nl_fill(void * mp_priv,struct sk_buff * rsp,struct netdev_rx_queue * rxq)459 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
460 struct netdev_rx_queue *rxq)
461 {
462 const struct net_devmem_dmabuf_binding *binding = mp_priv;
463 int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
464
465 return nla_put_u32(rsp, type, binding->id);
466 }
467
mp_dmabuf_devmem_uninstall(void * mp_priv,struct netdev_rx_queue * rxq)468 static void mp_dmabuf_devmem_uninstall(void *mp_priv,
469 struct netdev_rx_queue *rxq)
470 {
471 struct net_devmem_dmabuf_binding *binding = mp_priv;
472 struct netdev_rx_queue *bound_rxq;
473 unsigned long xa_idx;
474
475 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
476 if (bound_rxq == rxq) {
477 xa_erase(&binding->bound_rxqs, xa_idx);
478 if (xa_empty(&binding->bound_rxqs)) {
479 mutex_lock(&binding->lock);
480 binding->dev = NULL;
481 mutex_unlock(&binding->lock);
482 }
483 break;
484 }
485 }
486 }
487
488 static const struct memory_provider_ops dmabuf_devmem_ops = {
489 .init = mp_dmabuf_devmem_init,
490 .destroy = mp_dmabuf_devmem_destroy,
491 .alloc_netmems = mp_dmabuf_devmem_alloc_netmems,
492 .release_netmem = mp_dmabuf_devmem_release_page,
493 .nl_fill = mp_dmabuf_devmem_nl_fill,
494 .uninstall = mp_dmabuf_devmem_uninstall,
495 };
496