1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/skbuff.h>
3 #include <linux/slab.h>
4 #include <linux/netdevice.h>
5 #include <net/gro_cells.h>
6
7 struct gro_cell {
8 struct sk_buff_head napi_skbs;
9 struct napi_struct napi;
10 };
11
gro_cells_receive(struct gro_cells * gcells,struct sk_buff * skb)12 int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
13 {
14 struct net_device *dev = skb->dev;
15 struct gro_cell *cell;
16 int res;
17
18 rcu_read_lock();
19 if (unlikely(!(dev->flags & IFF_UP)))
20 goto drop;
21
22 if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
23 res = netif_rx(skb);
24 goto unlock;
25 }
26
27 cell = this_cpu_ptr(gcells->cells);
28
29 if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
30 drop:
31 dev_core_stats_rx_dropped_inc(dev);
32 kfree_skb(skb);
33 res = NET_RX_DROP;
34 goto unlock;
35 }
36
37 __skb_queue_tail(&cell->napi_skbs, skb);
38 if (skb_queue_len(&cell->napi_skbs) == 1)
39 napi_schedule(&cell->napi);
40
41 res = NET_RX_SUCCESS;
42
43 unlock:
44 rcu_read_unlock();
45 return res;
46 }
47 EXPORT_SYMBOL(gro_cells_receive);
48
49 /* called under BH context */
gro_cell_poll(struct napi_struct * napi,int budget)50 static int gro_cell_poll(struct napi_struct *napi, int budget)
51 {
52 struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
53 struct sk_buff *skb;
54 int work_done = 0;
55
56 while (work_done < budget) {
57 skb = __skb_dequeue(&cell->napi_skbs);
58 if (!skb)
59 break;
60 napi_gro_receive(napi, skb);
61 work_done++;
62 }
63
64 if (work_done < budget)
65 napi_complete_done(napi, work_done);
66 return work_done;
67 }
68
gro_cells_init(struct gro_cells * gcells,struct net_device * dev)69 int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
70 {
71 int i;
72
73 gcells->cells = alloc_percpu(struct gro_cell);
74 if (!gcells->cells)
75 return -ENOMEM;
76
77 for_each_possible_cpu(i) {
78 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
79
80 __skb_queue_head_init(&cell->napi_skbs);
81
82 set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
83
84 netif_napi_add(dev, &cell->napi, gro_cell_poll);
85 napi_enable(&cell->napi);
86 }
87 return 0;
88 }
89 EXPORT_SYMBOL(gro_cells_init);
90
91 struct percpu_free_defer {
92 struct rcu_head rcu;
93 void __percpu *ptr;
94 };
95
percpu_free_defer_callback(struct rcu_head * head)96 static void percpu_free_defer_callback(struct rcu_head *head)
97 {
98 struct percpu_free_defer *defer;
99
100 defer = container_of(head, struct percpu_free_defer, rcu);
101 free_percpu(defer->ptr);
102 kfree(defer);
103 }
104
gro_cells_destroy(struct gro_cells * gcells)105 void gro_cells_destroy(struct gro_cells *gcells)
106 {
107 struct percpu_free_defer *defer;
108 int i;
109
110 if (!gcells->cells)
111 return;
112 for_each_possible_cpu(i) {
113 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
114
115 napi_disable(&cell->napi);
116 __netif_napi_del(&cell->napi);
117 __skb_queue_purge(&cell->napi_skbs);
118 }
119 /* We need to observe an rcu grace period before freeing ->cells,
120 * because netpoll could access dev->napi_list under rcu protection.
121 * Try hard using call_rcu() instead of synchronize_rcu(),
122 * because we might be called from cleanup_net(), and we
123 * definitely do not want to block this critical task.
124 */
125 defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN);
126 if (likely(defer)) {
127 defer->ptr = gcells->cells;
128 call_rcu(&defer->rcu, percpu_free_defer_callback);
129 } else {
130 /* We do not hold RTNL at this point, synchronize_net()
131 * would not be able to expedite this sync.
132 */
133 synchronize_rcu_expedited();
134 free_percpu(gcells->cells);
135 }
136 gcells->cells = NULL;
137 }
138 EXPORT_SYMBOL(gro_cells_destroy);
139