1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
flow_offload_fill_dir(struct flow_offload * flow,enum flow_offload_tuple_dir dir)21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
23 {
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42
43 switch (ctt->dst.protonum) {
44 case IPPROTO_TCP:
45 case IPPROTO_UDP:
46 ft->src_port = ctt->src.u.tcp.port;
47 ft->dst_port = ctt->dst.u.tcp.port;
48 break;
49 }
50 }
51
flow_offload_alloc(struct nf_conn * ct)52 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
53 {
54 struct flow_offload *flow;
55
56 if (unlikely(nf_ct_is_dying(ct)))
57 return NULL;
58
59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
60 if (!flow)
61 return NULL;
62
63 refcount_inc(&ct->ct_general.use);
64 flow->ct = ct;
65
66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
68
69 if (ct->status & IPS_SRC_NAT)
70 __set_bit(NF_FLOW_SNAT, &flow->flags);
71 if (ct->status & IPS_DST_NAT)
72 __set_bit(NF_FLOW_DNAT, &flow->flags);
73
74 return flow;
75 }
76 EXPORT_SYMBOL_GPL(flow_offload_alloc);
77
flow_offload_dst_cookie(struct flow_offload_tuple * flow_tuple)78 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
79 {
80 const struct rt6_info *rt;
81
82 if (flow_tuple->l3proto == NFPROTO_IPV6) {
83 rt = (const struct rt6_info *)flow_tuple->dst_cache;
84 return rt6_get_cookie(rt);
85 }
86
87 return 0;
88 }
89
flow_offload_fill_route(struct flow_offload * flow,const struct nf_flow_route * route,enum flow_offload_tuple_dir dir)90 static int flow_offload_fill_route(struct flow_offload *flow,
91 const struct nf_flow_route *route,
92 enum flow_offload_tuple_dir dir)
93 {
94 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
95 struct dst_entry *dst = route->tuple[dir].dst;
96 int i, j = 0;
97
98 switch (flow_tuple->l3proto) {
99 case NFPROTO_IPV4:
100 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
101 break;
102 case NFPROTO_IPV6:
103 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
104 break;
105 }
106
107 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
108 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
109 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
110 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
111 if (route->tuple[dir].in.ingress_vlans & BIT(i))
112 flow_tuple->in_vlan_ingress |= BIT(j);
113 j++;
114 }
115 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
116
117 switch (route->tuple[dir].xmit_type) {
118 case FLOW_OFFLOAD_XMIT_DIRECT:
119 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
120 ETH_ALEN);
121 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
122 ETH_ALEN);
123 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
124 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
125 break;
126 case FLOW_OFFLOAD_XMIT_XFRM:
127 case FLOW_OFFLOAD_XMIT_NEIGH:
128 if (!dst_hold_safe(route->tuple[dir].dst))
129 return -1;
130
131 flow_tuple->dst_cache = dst;
132 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
133 break;
134 default:
135 WARN_ON_ONCE(1);
136 break;
137 }
138 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
139
140 return 0;
141 }
142
nft_flow_dst_release(struct flow_offload * flow,enum flow_offload_tuple_dir dir)143 static void nft_flow_dst_release(struct flow_offload *flow,
144 enum flow_offload_tuple_dir dir)
145 {
146 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
147 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
148 dst_release(flow->tuplehash[dir].tuple.dst_cache);
149 }
150
flow_offload_route_init(struct flow_offload * flow,const struct nf_flow_route * route)151 int flow_offload_route_init(struct flow_offload *flow,
152 const struct nf_flow_route *route)
153 {
154 int err;
155
156 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
157 if (err < 0)
158 return err;
159
160 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
161 if (err < 0)
162 goto err_route_reply;
163
164 flow->type = NF_FLOW_OFFLOAD_ROUTE;
165
166 return 0;
167
168 err_route_reply:
169 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
170
171 return err;
172 }
173 EXPORT_SYMBOL_GPL(flow_offload_route_init);
174
flow_offload_fixup_tcp(struct ip_ct_tcp * tcp)175 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
176 {
177 tcp->seen[0].td_maxwin = 0;
178 tcp->seen[1].td_maxwin = 0;
179 }
180
flow_offload_fixup_ct(struct nf_conn * ct)181 static void flow_offload_fixup_ct(struct nf_conn *ct)
182 {
183 struct net *net = nf_ct_net(ct);
184 int l4num = nf_ct_protonum(ct);
185 s32 timeout;
186
187 if (l4num == IPPROTO_TCP) {
188 struct nf_tcp_net *tn = nf_tcp_pernet(net);
189
190 flow_offload_fixup_tcp(&ct->proto.tcp);
191
192 timeout = tn->timeouts[ct->proto.tcp.state];
193 timeout -= tn->offload_timeout;
194 } else if (l4num == IPPROTO_UDP) {
195 struct nf_udp_net *tn = nf_udp_pernet(net);
196 enum udp_conntrack state =
197 test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
198 UDP_CT_REPLIED : UDP_CT_UNREPLIED;
199
200 timeout = tn->timeouts[state];
201 timeout -= tn->offload_timeout;
202 } else {
203 return;
204 }
205
206 if (timeout < 0)
207 timeout = 0;
208
209 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
210 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
211 }
212
flow_offload_route_release(struct flow_offload * flow)213 static void flow_offload_route_release(struct flow_offload *flow)
214 {
215 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
216 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
217 }
218
flow_offload_free(struct flow_offload * flow)219 void flow_offload_free(struct flow_offload *flow)
220 {
221 switch (flow->type) {
222 case NF_FLOW_OFFLOAD_ROUTE:
223 flow_offload_route_release(flow);
224 break;
225 default:
226 break;
227 }
228 nf_ct_put(flow->ct);
229 kfree_rcu(flow, rcu_head);
230 }
231 EXPORT_SYMBOL_GPL(flow_offload_free);
232
flow_offload_hash(const void * data,u32 len,u32 seed)233 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
234 {
235 const struct flow_offload_tuple *tuple = data;
236
237 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
238 }
239
flow_offload_hash_obj(const void * data,u32 len,u32 seed)240 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
241 {
242 const struct flow_offload_tuple_rhash *tuplehash = data;
243
244 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
245 }
246
flow_offload_hash_cmp(struct rhashtable_compare_arg * arg,const void * ptr)247 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
248 const void *ptr)
249 {
250 const struct flow_offload_tuple *tuple = arg->key;
251 const struct flow_offload_tuple_rhash *x = ptr;
252
253 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
254 return 1;
255
256 return 0;
257 }
258
259 static const struct rhashtable_params nf_flow_offload_rhash_params = {
260 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
261 .hashfn = flow_offload_hash,
262 .obj_hashfn = flow_offload_hash_obj,
263 .obj_cmpfn = flow_offload_hash_cmp,
264 .automatic_shrinking = true,
265 };
266
flow_offload_get_timeout(struct flow_offload * flow)267 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
268 {
269 unsigned long timeout = NF_FLOW_TIMEOUT;
270 struct net *net = nf_ct_net(flow->ct);
271 int l4num = nf_ct_protonum(flow->ct);
272
273 if (l4num == IPPROTO_TCP) {
274 struct nf_tcp_net *tn = nf_tcp_pernet(net);
275
276 timeout = tn->offload_timeout;
277 } else if (l4num == IPPROTO_UDP) {
278 struct nf_udp_net *tn = nf_udp_pernet(net);
279
280 timeout = tn->offload_timeout;
281 }
282
283 return timeout;
284 }
285
flow_offload_add(struct nf_flowtable * flow_table,struct flow_offload * flow)286 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
287 {
288 int err;
289
290 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
291
292 err = rhashtable_insert_fast(&flow_table->rhashtable,
293 &flow->tuplehash[0].node,
294 nf_flow_offload_rhash_params);
295 if (err < 0)
296 return err;
297
298 err = rhashtable_insert_fast(&flow_table->rhashtable,
299 &flow->tuplehash[1].node,
300 nf_flow_offload_rhash_params);
301 if (err < 0) {
302 rhashtable_remove_fast(&flow_table->rhashtable,
303 &flow->tuplehash[0].node,
304 nf_flow_offload_rhash_params);
305 return err;
306 }
307
308 nf_ct_offload_timeout(flow->ct);
309
310 if (nf_flowtable_hw_offload(flow_table)) {
311 __set_bit(NF_FLOW_HW, &flow->flags);
312 nf_flow_offload_add(flow_table, flow);
313 }
314
315 return 0;
316 }
317 EXPORT_SYMBOL_GPL(flow_offload_add);
318
flow_offload_refresh(struct nf_flowtable * flow_table,struct flow_offload * flow)319 void flow_offload_refresh(struct nf_flowtable *flow_table,
320 struct flow_offload *flow)
321 {
322 u32 timeout;
323
324 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
325 if (timeout - READ_ONCE(flow->timeout) > HZ)
326 WRITE_ONCE(flow->timeout, timeout);
327 else
328 return;
329
330 if (likely(!nf_flowtable_hw_offload(flow_table)))
331 return;
332
333 nf_flow_offload_add(flow_table, flow);
334 }
335 EXPORT_SYMBOL_GPL(flow_offload_refresh);
336
nf_flow_has_expired(const struct flow_offload * flow)337 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
338 {
339 return nf_flow_timeout_delta(flow->timeout) <= 0;
340 }
341
flow_offload_del(struct nf_flowtable * flow_table,struct flow_offload * flow)342 static void flow_offload_del(struct nf_flowtable *flow_table,
343 struct flow_offload *flow)
344 {
345 rhashtable_remove_fast(&flow_table->rhashtable,
346 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
347 nf_flow_offload_rhash_params);
348 rhashtable_remove_fast(&flow_table->rhashtable,
349 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
350 nf_flow_offload_rhash_params);
351 flow_offload_free(flow);
352 }
353
flow_offload_teardown(struct flow_offload * flow)354 void flow_offload_teardown(struct flow_offload *flow)
355 {
356 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
357 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
358 flow_offload_fixup_ct(flow->ct);
359 }
360 EXPORT_SYMBOL_GPL(flow_offload_teardown);
361
362 struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable * flow_table,struct flow_offload_tuple * tuple)363 flow_offload_lookup(struct nf_flowtable *flow_table,
364 struct flow_offload_tuple *tuple)
365 {
366 struct flow_offload_tuple_rhash *tuplehash;
367 struct flow_offload *flow;
368 int dir;
369
370 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
371 nf_flow_offload_rhash_params);
372 if (!tuplehash)
373 return NULL;
374
375 dir = tuplehash->tuple.dir;
376 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
377 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
378 return NULL;
379
380 if (unlikely(nf_ct_is_dying(flow->ct)))
381 return NULL;
382
383 return tuplehash;
384 }
385 EXPORT_SYMBOL_GPL(flow_offload_lookup);
386
387 static int
nf_flow_table_iterate(struct nf_flowtable * flow_table,void (* iter)(struct nf_flowtable * flowtable,struct flow_offload * flow,void * data),void * data)388 nf_flow_table_iterate(struct nf_flowtable *flow_table,
389 void (*iter)(struct nf_flowtable *flowtable,
390 struct flow_offload *flow, void *data),
391 void *data)
392 {
393 struct flow_offload_tuple_rhash *tuplehash;
394 struct rhashtable_iter hti;
395 struct flow_offload *flow;
396 int err = 0;
397
398 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
399 rhashtable_walk_start(&hti);
400
401 while ((tuplehash = rhashtable_walk_next(&hti))) {
402 if (IS_ERR(tuplehash)) {
403 if (PTR_ERR(tuplehash) != -EAGAIN) {
404 err = PTR_ERR(tuplehash);
405 break;
406 }
407 continue;
408 }
409 if (tuplehash->tuple.dir)
410 continue;
411
412 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
413
414 iter(flow_table, flow, data);
415 }
416 rhashtable_walk_stop(&hti);
417 rhashtable_walk_exit(&hti);
418
419 return err;
420 }
421
nf_flow_offload_gc_step(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data)422 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
423 struct flow_offload *flow, void *data)
424 {
425 if (nf_flow_has_expired(flow) ||
426 nf_ct_is_dying(flow->ct))
427 flow_offload_teardown(flow);
428
429 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
430 if (test_bit(NF_FLOW_HW, &flow->flags)) {
431 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
432 nf_flow_offload_del(flow_table, flow);
433 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
434 flow_offload_del(flow_table, flow);
435 } else {
436 flow_offload_del(flow_table, flow);
437 }
438 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
439 nf_flow_offload_stats(flow_table, flow);
440 }
441 }
442
nf_flow_table_gc_run(struct nf_flowtable * flow_table)443 void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
444 {
445 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
446 }
447
nf_flow_offload_work_gc(struct work_struct * work)448 static void nf_flow_offload_work_gc(struct work_struct *work)
449 {
450 struct nf_flowtable *flow_table;
451
452 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
453 nf_flow_table_gc_run(flow_table);
454 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
455 }
456
nf_flow_nat_port_tcp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)457 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
458 __be16 port, __be16 new_port)
459 {
460 struct tcphdr *tcph;
461
462 tcph = (void *)(skb_network_header(skb) + thoff);
463 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
464 }
465
nf_flow_nat_port_udp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)466 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
467 __be16 port, __be16 new_port)
468 {
469 struct udphdr *udph;
470
471 udph = (void *)(skb_network_header(skb) + thoff);
472 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
473 inet_proto_csum_replace2(&udph->check, skb, port,
474 new_port, false);
475 if (!udph->check)
476 udph->check = CSUM_MANGLED_0;
477 }
478 }
479
nf_flow_nat_port(struct sk_buff * skb,unsigned int thoff,u8 protocol,__be16 port,__be16 new_port)480 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
481 u8 protocol, __be16 port, __be16 new_port)
482 {
483 switch (protocol) {
484 case IPPROTO_TCP:
485 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
486 break;
487 case IPPROTO_UDP:
488 nf_flow_nat_port_udp(skb, thoff, port, new_port);
489 break;
490 }
491 }
492
nf_flow_snat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)493 void nf_flow_snat_port(const struct flow_offload *flow,
494 struct sk_buff *skb, unsigned int thoff,
495 u8 protocol, enum flow_offload_tuple_dir dir)
496 {
497 struct flow_ports *hdr;
498 __be16 port, new_port;
499
500 hdr = (void *)(skb_network_header(skb) + thoff);
501
502 switch (dir) {
503 case FLOW_OFFLOAD_DIR_ORIGINAL:
504 port = hdr->source;
505 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
506 hdr->source = new_port;
507 break;
508 case FLOW_OFFLOAD_DIR_REPLY:
509 port = hdr->dest;
510 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
511 hdr->dest = new_port;
512 break;
513 }
514
515 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
516 }
517 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
518
nf_flow_dnat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)519 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
520 unsigned int thoff, u8 protocol,
521 enum flow_offload_tuple_dir dir)
522 {
523 struct flow_ports *hdr;
524 __be16 port, new_port;
525
526 hdr = (void *)(skb_network_header(skb) + thoff);
527
528 switch (dir) {
529 case FLOW_OFFLOAD_DIR_ORIGINAL:
530 port = hdr->dest;
531 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
532 hdr->dest = new_port;
533 break;
534 case FLOW_OFFLOAD_DIR_REPLY:
535 port = hdr->source;
536 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
537 hdr->source = new_port;
538 break;
539 }
540
541 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
542 }
543 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
544
nf_flow_table_init(struct nf_flowtable * flowtable)545 int nf_flow_table_init(struct nf_flowtable *flowtable)
546 {
547 int err;
548
549 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
550 flow_block_init(&flowtable->flow_block);
551 init_rwsem(&flowtable->flow_block_lock);
552
553 err = rhashtable_init(&flowtable->rhashtable,
554 &nf_flow_offload_rhash_params);
555 if (err < 0)
556 return err;
557
558 queue_delayed_work(system_power_efficient_wq,
559 &flowtable->gc_work, HZ);
560
561 mutex_lock(&flowtable_lock);
562 list_add(&flowtable->list, &flowtables);
563 mutex_unlock(&flowtable_lock);
564
565 return 0;
566 }
567 EXPORT_SYMBOL_GPL(nf_flow_table_init);
568
nf_flow_table_do_cleanup(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data)569 static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
570 struct flow_offload *flow, void *data)
571 {
572 struct net_device *dev = data;
573
574 if (!dev) {
575 flow_offload_teardown(flow);
576 return;
577 }
578
579 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
580 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
581 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
582 flow_offload_teardown(flow);
583 }
584
nf_flow_table_gc_cleanup(struct nf_flowtable * flowtable,struct net_device * dev)585 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
586 struct net_device *dev)
587 {
588 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
589 flush_delayed_work(&flowtable->gc_work);
590 nf_flow_table_offload_flush(flowtable);
591 }
592
nf_flow_table_cleanup(struct net_device * dev)593 void nf_flow_table_cleanup(struct net_device *dev)
594 {
595 struct nf_flowtable *flowtable;
596
597 mutex_lock(&flowtable_lock);
598 list_for_each_entry(flowtable, &flowtables, list)
599 nf_flow_table_gc_cleanup(flowtable, dev);
600 mutex_unlock(&flowtable_lock);
601 }
602 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
603
nf_flow_table_free(struct nf_flowtable * flow_table)604 void nf_flow_table_free(struct nf_flowtable *flow_table)
605 {
606 mutex_lock(&flowtable_lock);
607 list_del(&flow_table->list);
608 mutex_unlock(&flowtable_lock);
609
610 cancel_delayed_work_sync(&flow_table->gc_work);
611 nf_flow_table_offload_flush(flow_table);
612 /* ... no more pending work after this stage ... */
613 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
614 nf_flow_table_gc_run(flow_table);
615 nf_flow_table_offload_flush_cleanup(flow_table);
616 rhashtable_destroy(&flow_table->rhashtable);
617 }
618 EXPORT_SYMBOL_GPL(nf_flow_table_free);
619
nf_flow_table_init_net(struct net * net)620 static int nf_flow_table_init_net(struct net *net)
621 {
622 net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
623 return net->ft.stat ? 0 : -ENOMEM;
624 }
625
nf_flow_table_fini_net(struct net * net)626 static void nf_flow_table_fini_net(struct net *net)
627 {
628 free_percpu(net->ft.stat);
629 }
630
nf_flow_table_pernet_init(struct net * net)631 static int nf_flow_table_pernet_init(struct net *net)
632 {
633 int ret;
634
635 ret = nf_flow_table_init_net(net);
636 if (ret < 0)
637 return ret;
638
639 ret = nf_flow_table_init_proc(net);
640 if (ret < 0)
641 goto out_proc;
642
643 return 0;
644
645 out_proc:
646 nf_flow_table_fini_net(net);
647 return ret;
648 }
649
nf_flow_table_pernet_exit(struct list_head * net_exit_list)650 static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
651 {
652 struct net *net;
653
654 list_for_each_entry(net, net_exit_list, exit_list) {
655 nf_flow_table_fini_proc(net);
656 nf_flow_table_fini_net(net);
657 }
658 }
659
660 static struct pernet_operations nf_flow_table_net_ops = {
661 .init = nf_flow_table_pernet_init,
662 .exit_batch = nf_flow_table_pernet_exit,
663 };
664
nf_flow_table_module_init(void)665 static int __init nf_flow_table_module_init(void)
666 {
667 int ret;
668
669 ret = register_pernet_subsys(&nf_flow_table_net_ops);
670 if (ret < 0)
671 return ret;
672
673 ret = nf_flow_table_offload_init();
674 if (ret)
675 goto out_offload;
676
677 return 0;
678
679 out_offload:
680 unregister_pernet_subsys(&nf_flow_table_net_ops);
681 return ret;
682 }
683
nf_flow_table_module_exit(void)684 static void __exit nf_flow_table_module_exit(void)
685 {
686 nf_flow_table_offload_exit();
687 unregister_pernet_subsys(&nf_flow_table_net_ops);
688 }
689
690 module_init(nf_flow_table_module_init);
691 module_exit(nf_flow_table_module_exit);
692
693 MODULE_LICENSE("GPL");
694 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
695 MODULE_DESCRIPTION("Netfilter flow table module");
696