1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Connection state tracking for netfilter. This is separated from,
4 * but required by, the (future) NAT layer; it can also be used by an iptables
5 * extension.
6 *
7 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
8 * - generalize L3 protocol dependent part.
9 *
10 * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
11 */
12
13 #ifndef _NF_CONNTRACK_H
14 #define _NF_CONNTRACK_H
15
16 #include <linux/bitops.h>
17 #include <linux/compiler.h>
18
19 #include <linux/netfilter/nf_conntrack_common.h>
20 #include <linux/netfilter/nf_conntrack_tcp.h>
21 #include <linux/netfilter/nf_conntrack_dccp.h>
22 #include <linux/netfilter/nf_conntrack_sctp.h>
23 #include <linux/netfilter/nf_conntrack_proto_gre.h>
24
25 #include <net/netfilter/nf_conntrack_tuple.h>
26
27 struct nf_ct_udp {
28 unsigned long stream_ts;
29 };
30
31 /* per conntrack: protocol private data */
32 union nf_conntrack_proto {
33 /* insert conntrack proto private data here */
34 struct nf_ct_dccp dccp;
35 struct ip_ct_sctp sctp;
36 struct ip_ct_tcp tcp;
37 struct nf_ct_udp udp;
38 struct nf_ct_gre gre;
39 unsigned int tmpl_padto;
40 };
41
42 union nf_conntrack_expect_proto {
43 /* insert expect proto private data here */
44 };
45
46 struct nf_conntrack_net_ecache {
47 struct delayed_work dwork;
48 spinlock_t dying_lock;
49 struct hlist_nulls_head dying_list;
50 };
51
52 struct nf_conntrack_net {
53 /* only used when new connection is allocated: */
54 atomic_t count;
55 unsigned int expect_count;
56
57 /* only used from work queues, configuration plane, and so on: */
58 unsigned int users4;
59 unsigned int users6;
60 unsigned int users_bridge;
61 #ifdef CONFIG_SYSCTL
62 struct ctl_table_header *sysctl_header;
63 #endif
64 #ifdef CONFIG_NF_CONNTRACK_EVENTS
65 struct nf_conntrack_net_ecache ecache;
66 #endif
67 };
68
69 #include <linux/types.h>
70 #include <linux/skbuff.h>
71
72 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
73 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
74
75 struct nf_conn {
76 /* Usage count in here is 1 for hash table, 1 per skb,
77 * plus 1 for any connection(s) we are `master' for
78 *
79 * Hint, SKB address this struct and refcnt via skb->_nfct and
80 * helpers nf_conntrack_get() and nf_conntrack_put().
81 * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
82 * except that the latter uses internal indirection and does not
83 * result in a conntrack module dependency.
84 * beware nf_ct_get() is different and don't inc refcnt.
85 */
86 struct nf_conntrack ct_general;
87
88 spinlock_t lock;
89 /* jiffies32 when this ct is considered dead */
90 u32 timeout;
91
92 #ifdef CONFIG_NF_CONNTRACK_ZONES
93 struct nf_conntrack_zone zone;
94 #endif
95 /* XXX should I move this to the tail ? - Y.K */
96 /* These are my tuples; original and reply */
97 struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
98
99 /* Have we seen traffic both ways yet? (bitset) */
100 unsigned long status;
101
102 possible_net_t ct_net;
103
104 #if IS_ENABLED(CONFIG_NF_NAT)
105 struct hlist_node nat_bysource;
106 #endif
107 /* all members below initialized via memset */
108 struct { } __nfct_init_offset;
109
110 /* If we were expected by an expectation, this will be it */
111 struct nf_conn *master;
112
113 #if defined(CONFIG_NF_CONNTRACK_MARK)
114 u_int32_t mark;
115 #endif
116
117 #ifdef CONFIG_NF_CONNTRACK_SECMARK
118 u_int32_t secmark;
119 #endif
120
121 /* Extensions */
122 struct nf_ct_ext *ext;
123
124 /* Storage reserved for other modules, must be the last member */
125 union nf_conntrack_proto proto;
126 };
127
128 static inline struct nf_conn *
nf_ct_to_nf_conn(const struct nf_conntrack * nfct)129 nf_ct_to_nf_conn(const struct nf_conntrack *nfct)
130 {
131 return container_of(nfct, struct nf_conn, ct_general);
132 }
133
134 static inline struct nf_conn *
nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash * hash)135 nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
136 {
137 return container_of(hash, struct nf_conn,
138 tuplehash[hash->tuple.dst.dir]);
139 }
140
nf_ct_l3num(const struct nf_conn * ct)141 static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct)
142 {
143 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
144 }
145
nf_ct_protonum(const struct nf_conn * ct)146 static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct)
147 {
148 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
149 }
150
151 #define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
152
153 /* get master conntrack via master expectation */
154 #define master_ct(conntr) (conntr->master)
155
156 extern struct net init_net;
157
nf_ct_net(const struct nf_conn * ct)158 static inline struct net *nf_ct_net(const struct nf_conn *ct)
159 {
160 return read_pnet(&ct->ct_net);
161 }
162
163 /* Alter reply tuple (maybe alter helper). */
164 void nf_conntrack_alter_reply(struct nf_conn *ct,
165 const struct nf_conntrack_tuple *newreply);
166
167 /* Is this tuple taken? (ignoring any belonging to the given
168 conntrack). */
169 int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
170 const struct nf_conn *ignored_conntrack);
171
172 /* Return conntrack_info and tuple hash for given skb. */
173 static inline struct nf_conn *
nf_ct_get(const struct sk_buff * skb,enum ip_conntrack_info * ctinfo)174 nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
175 {
176 unsigned long nfct = skb_get_nfct(skb);
177
178 *ctinfo = nfct & NFCT_INFOMASK;
179 return (struct nf_conn *)(nfct & NFCT_PTRMASK);
180 }
181
182 void nf_ct_destroy(struct nf_conntrack *nfct);
183
184 void nf_conntrack_tcp_set_closing(struct nf_conn *ct);
185
186 /* decrement reference count on a conntrack */
nf_ct_put(struct nf_conn * ct)187 static inline void nf_ct_put(struct nf_conn *ct)
188 {
189 if (ct && refcount_dec_and_test(&ct->ct_general.use))
190 nf_ct_destroy(&ct->ct_general);
191 }
192
193 /* Protocol module loading */
194 int nf_ct_l3proto_try_module_get(unsigned short l3proto);
195 void nf_ct_l3proto_module_put(unsigned short l3proto);
196
197 /* load module; enable/disable conntrack in this namespace */
198 int nf_ct_netns_get(struct net *net, u8 nfproto);
199 void nf_ct_netns_put(struct net *net, u8 nfproto);
200
201 /*
202 * Allocate a hashtable of hlist_head (if nulls == 0),
203 * or hlist_nulls_head (if nulls == 1)
204 */
205 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
206
207 int nf_conntrack_hash_check_insert(struct nf_conn *ct);
208 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
209
210 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
211 u_int16_t l3num, struct net *net,
212 struct nf_conntrack_tuple *tuple);
213
214 void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
215 const struct sk_buff *skb,
216 u32 extra_jiffies, bool do_acct);
217
218 /* Refresh conntrack for this many jiffies and do accounting */
nf_ct_refresh_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,u32 extra_jiffies)219 static inline void nf_ct_refresh_acct(struct nf_conn *ct,
220 enum ip_conntrack_info ctinfo,
221 const struct sk_buff *skb,
222 u32 extra_jiffies)
223 {
224 __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
225 }
226
227 /* Refresh conntrack for this many jiffies */
nf_ct_refresh(struct nf_conn * ct,const struct sk_buff * skb,u32 extra_jiffies)228 static inline void nf_ct_refresh(struct nf_conn *ct,
229 const struct sk_buff *skb,
230 u32 extra_jiffies)
231 {
232 __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
233 }
234
235 /* kill conntrack and do accounting */
236 bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
237 const struct sk_buff *skb);
238
239 /* kill conntrack without accounting */
nf_ct_kill(struct nf_conn * ct)240 static inline bool nf_ct_kill(struct nf_conn *ct)
241 {
242 return nf_ct_delete(ct, 0, 0);
243 }
244
245 struct nf_ct_iter_data {
246 struct net *net;
247 void *data;
248 u32 portid;
249 int report;
250 };
251
252 /* Iterate over all conntracks: if iter returns true, it's deleted. */
253 void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
254 const struct nf_ct_iter_data *iter_data);
255
256 /* also set unconfirmed conntracks as dying. Only use in module exit path. */
257 void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
258 void *data);
259
260 struct nf_conntrack_zone;
261
262 void nf_conntrack_free(struct nf_conn *ct);
263 struct nf_conn *nf_conntrack_alloc(struct net *net,
264 const struct nf_conntrack_zone *zone,
265 const struct nf_conntrack_tuple *orig,
266 const struct nf_conntrack_tuple *repl,
267 gfp_t gfp);
268
nf_ct_is_template(const struct nf_conn * ct)269 static inline int nf_ct_is_template(const struct nf_conn *ct)
270 {
271 return test_bit(IPS_TEMPLATE_BIT, &ct->status);
272 }
273
274 /* It's confirmed if it is, or has been in the hash table. */
nf_ct_is_confirmed(const struct nf_conn * ct)275 static inline int nf_ct_is_confirmed(const struct nf_conn *ct)
276 {
277 return test_bit(IPS_CONFIRMED_BIT, &ct->status);
278 }
279
nf_ct_is_dying(const struct nf_conn * ct)280 static inline int nf_ct_is_dying(const struct nf_conn *ct)
281 {
282 return test_bit(IPS_DYING_BIT, &ct->status);
283 }
284
285 /* Packet is received from loopback */
nf_is_loopback_packet(const struct sk_buff * skb)286 static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
287 {
288 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
289 }
290
291 #define nfct_time_stamp ((u32)(jiffies))
292
293 /* jiffies until ct expires, 0 if already expired */
nf_ct_expires(const struct nf_conn * ct)294 static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
295 {
296 s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
297
298 return max(timeout, 0);
299 }
300
nf_ct_is_expired(const struct nf_conn * ct)301 static inline bool nf_ct_is_expired(const struct nf_conn *ct)
302 {
303 return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
304 }
305
306 /* use after obtaining a reference count */
nf_ct_should_gc(const struct nf_conn * ct)307 static inline bool nf_ct_should_gc(const struct nf_conn *ct)
308 {
309 return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
310 !nf_ct_is_dying(ct);
311 }
312
313 #define NF_CT_DAY (86400 * HZ)
314
315 /* Set an arbitrary timeout large enough not to ever expire, this save
316 * us a check for the IPS_OFFLOAD_BIT from the packet path via
317 * nf_ct_is_expired().
318 */
nf_ct_offload_timeout(struct nf_conn * ct)319 static inline void nf_ct_offload_timeout(struct nf_conn *ct)
320 {
321 if (nf_ct_expires(ct) < NF_CT_DAY / 2)
322 WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
323 }
324
325 struct kernel_param;
326
327 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
328 int nf_conntrack_hash_resize(unsigned int hashsize);
329
330 extern struct hlist_nulls_head *nf_conntrack_hash;
331 extern unsigned int nf_conntrack_htable_size;
332 extern seqcount_spinlock_t nf_conntrack_generation;
333 extern unsigned int nf_conntrack_max;
334
335 /* must be called with rcu read lock held */
336 static inline void
nf_conntrack_get_ht(struct hlist_nulls_head ** hash,unsigned int * hsize)337 nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
338 {
339 struct hlist_nulls_head *hptr;
340 unsigned int sequence, hsz;
341
342 do {
343 sequence = read_seqcount_begin(&nf_conntrack_generation);
344 hsz = nf_conntrack_htable_size;
345 hptr = nf_conntrack_hash;
346 } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
347
348 *hash = hptr;
349 *hsize = hsz;
350 }
351
352 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
353 const struct nf_conntrack_zone *zone,
354 gfp_t flags);
355 void nf_ct_tmpl_free(struct nf_conn *tmpl);
356
357 u32 nf_ct_get_id(const struct nf_conn *ct);
358 u32 nf_conntrack_count(const struct net *net);
359
360 static inline void
nf_ct_set(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info info)361 nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
362 {
363 skb_set_nfct(skb, (unsigned long)ct | info);
364 }
365
366 extern unsigned int nf_conntrack_net_id;
367
nf_ct_pernet(const struct net * net)368 static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
369 {
370 return net_generic(net, nf_conntrack_net_id);
371 }
372
373 int nf_ct_skb_network_trim(struct sk_buff *skb, int family);
374 int nf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
375 u16 zone, u8 family, u8 *proto, u16 *mru);
376
377 #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
378 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
379 #define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
380
381 #define MODULE_ALIAS_NFCT_HELPER(helper) \
382 MODULE_ALIAS("nfct-helper-" helper)
383
384 #endif /* _NF_CONNTRACK_H */
385