1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5  *
6  * Development of this code funded by Astaro AG (http://www.astaro.com/)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25 #include <net/sctp/checksum.h>
26 
nft_payload_rebuild_vlan_hdr(const struct sk_buff * skb,int mac_off,struct vlan_ethhdr * veth)27 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
28 					 struct vlan_ethhdr *veth)
29 {
30 	if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
31 		return false;
32 
33 	veth->h_vlan_proto = skb->vlan_proto;
34 	veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
35 	veth->h_vlan_encapsulated_proto = skb->protocol;
36 
37 	return true;
38 }
39 
40 /* add vlan header into the user buffer for if tag was removed by offloads */
41 static bool
nft_payload_copy_vlan(u32 * d,const struct sk_buff * skb,u8 offset,u8 len)42 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
43 {
44 	int mac_off = skb_mac_header(skb) - skb->data;
45 	u8 *vlanh, *dst_u8 = (u8 *) d;
46 	struct vlan_ethhdr veth;
47 	u8 vlan_hlen = 0;
48 
49 	if ((skb->protocol == htons(ETH_P_8021AD) ||
50 	     skb->protocol == htons(ETH_P_8021Q)) &&
51 	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
52 		vlan_hlen += VLAN_HLEN;
53 
54 	vlanh = (u8 *) &veth;
55 	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
56 		u8 ethlen = len;
57 
58 		if (vlan_hlen &&
59 		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
60 			return false;
61 		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
62 			return false;
63 
64 		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
65 			ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
66 
67 		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
68 
69 		len -= ethlen;
70 		if (len == 0)
71 			return true;
72 
73 		dst_u8 += ethlen;
74 		offset = ETH_HLEN + vlan_hlen;
75 	} else {
76 		offset -= VLAN_HLEN + vlan_hlen;
77 	}
78 
79 	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
80 }
81 
__nft_payload_inner_offset(struct nft_pktinfo * pkt)82 static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
83 {
84 	unsigned int thoff = nft_thoff(pkt);
85 
86 	if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
87 		return -1;
88 
89 	switch (pkt->tprot) {
90 	case IPPROTO_UDP:
91 		pkt->inneroff = thoff + sizeof(struct udphdr);
92 		break;
93 	case IPPROTO_TCP: {
94 		struct tcphdr *th, _tcph;
95 
96 		th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
97 		if (!th)
98 			return -1;
99 
100 		pkt->inneroff = thoff + __tcp_hdrlen(th);
101 		}
102 		break;
103 	default:
104 		return -1;
105 	}
106 
107 	pkt->flags |= NFT_PKTINFO_INNER;
108 
109 	return 0;
110 }
111 
nft_payload_inner_offset(const struct nft_pktinfo * pkt)112 static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
113 {
114 	if (!(pkt->flags & NFT_PKTINFO_INNER) &&
115 	    __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
116 		return -1;
117 
118 	return pkt->inneroff;
119 }
120 
nft_payload_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)121 void nft_payload_eval(const struct nft_expr *expr,
122 		      struct nft_regs *regs,
123 		      const struct nft_pktinfo *pkt)
124 {
125 	const struct nft_payload *priv = nft_expr_priv(expr);
126 	const struct sk_buff *skb = pkt->skb;
127 	u32 *dest = &regs->data[priv->dreg];
128 	int offset;
129 
130 	if (priv->len % NFT_REG32_SIZE)
131 		dest[priv->len / NFT_REG32_SIZE] = 0;
132 
133 	switch (priv->base) {
134 	case NFT_PAYLOAD_LL_HEADER:
135 		if (!skb_mac_header_was_set(skb))
136 			goto err;
137 
138 		if (skb_vlan_tag_present(skb)) {
139 			if (!nft_payload_copy_vlan(dest, skb,
140 						   priv->offset, priv->len))
141 				goto err;
142 			return;
143 		}
144 		offset = skb_mac_header(skb) - skb->data;
145 		break;
146 	case NFT_PAYLOAD_NETWORK_HEADER:
147 		offset = skb_network_offset(skb);
148 		break;
149 	case NFT_PAYLOAD_TRANSPORT_HEADER:
150 		if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
151 			goto err;
152 		offset = nft_thoff(pkt);
153 		break;
154 	case NFT_PAYLOAD_INNER_HEADER:
155 		offset = nft_payload_inner_offset(pkt);
156 		if (offset < 0)
157 			goto err;
158 		break;
159 	default:
160 		BUG();
161 	}
162 	offset += priv->offset;
163 
164 	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
165 		goto err;
166 	return;
167 err:
168 	regs->verdict.code = NFT_BREAK;
169 }
170 
171 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
172 	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
173 	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
174 	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
175 	[NFTA_PAYLOAD_OFFSET]		= { .type = NLA_U32 },
176 	[NFTA_PAYLOAD_LEN]		= { .type = NLA_U32 },
177 	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
178 	[NFTA_PAYLOAD_CSUM_OFFSET]	= { .type = NLA_U32 },
179 	[NFTA_PAYLOAD_CSUM_FLAGS]	= { .type = NLA_U32 },
180 };
181 
nft_payload_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])182 static int nft_payload_init(const struct nft_ctx *ctx,
183 			    const struct nft_expr *expr,
184 			    const struct nlattr * const tb[])
185 {
186 	struct nft_payload *priv = nft_expr_priv(expr);
187 
188 	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
189 	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
190 	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
191 
192 	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
193 					&priv->dreg, NULL, NFT_DATA_VALUE,
194 					priv->len);
195 }
196 
nft_payload_dump(struct sk_buff * skb,const struct nft_expr * expr)197 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
198 {
199 	const struct nft_payload *priv = nft_expr_priv(expr);
200 
201 	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
202 	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
203 	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
204 	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
205 		goto nla_put_failure;
206 	return 0;
207 
208 nla_put_failure:
209 	return -1;
210 }
211 
nft_payload_offload_mask(struct nft_offload_reg * reg,u32 priv_len,u32 field_len)212 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
213 				     u32 priv_len, u32 field_len)
214 {
215 	unsigned int remainder, delta, k;
216 	struct nft_data mask = {};
217 	__be32 remainder_mask;
218 
219 	if (priv_len == field_len) {
220 		memset(&reg->mask, 0xff, priv_len);
221 		return true;
222 	} else if (priv_len > field_len) {
223 		return false;
224 	}
225 
226 	memset(&mask, 0xff, field_len);
227 	remainder = priv_len % sizeof(u32);
228 	if (remainder) {
229 		k = priv_len / sizeof(u32);
230 		delta = field_len - priv_len;
231 		remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
232 		mask.data[k] = (__force u32)remainder_mask;
233 	}
234 
235 	memcpy(&reg->mask, &mask, field_len);
236 
237 	return true;
238 }
239 
nft_payload_offload_ll(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)240 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
241 				  struct nft_flow_rule *flow,
242 				  const struct nft_payload *priv)
243 {
244 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
245 
246 	switch (priv->offset) {
247 	case offsetof(struct ethhdr, h_source):
248 		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
249 			return -EOPNOTSUPP;
250 
251 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
252 				  src, ETH_ALEN, reg);
253 		break;
254 	case offsetof(struct ethhdr, h_dest):
255 		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
256 			return -EOPNOTSUPP;
257 
258 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
259 				  dst, ETH_ALEN, reg);
260 		break;
261 	case offsetof(struct ethhdr, h_proto):
262 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
263 			return -EOPNOTSUPP;
264 
265 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
266 				  n_proto, sizeof(__be16), reg);
267 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
268 		break;
269 	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
270 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
271 			return -EOPNOTSUPP;
272 
273 		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
274 					vlan_tci, sizeof(__be16), reg,
275 					NFT_OFFLOAD_F_NETWORK2HOST);
276 		break;
277 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
278 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
279 			return -EOPNOTSUPP;
280 
281 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
282 				  vlan_tpid, sizeof(__be16), reg);
283 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
284 		break;
285 	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
286 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
287 			return -EOPNOTSUPP;
288 
289 		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
290 					vlan_tci, sizeof(__be16), reg,
291 					NFT_OFFLOAD_F_NETWORK2HOST);
292 		break;
293 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
294 							sizeof(struct vlan_hdr):
295 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
296 			return -EOPNOTSUPP;
297 
298 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
299 				  vlan_tpid, sizeof(__be16), reg);
300 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
301 		break;
302 	default:
303 		return -EOPNOTSUPP;
304 	}
305 
306 	return 0;
307 }
308 
nft_payload_offload_ip(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)309 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
310 				  struct nft_flow_rule *flow,
311 				  const struct nft_payload *priv)
312 {
313 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
314 
315 	switch (priv->offset) {
316 	case offsetof(struct iphdr, saddr):
317 		if (!nft_payload_offload_mask(reg, priv->len,
318 					      sizeof(struct in_addr)))
319 			return -EOPNOTSUPP;
320 
321 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
322 				  sizeof(struct in_addr), reg);
323 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
324 		break;
325 	case offsetof(struct iphdr, daddr):
326 		if (!nft_payload_offload_mask(reg, priv->len,
327 					      sizeof(struct in_addr)))
328 			return -EOPNOTSUPP;
329 
330 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
331 				  sizeof(struct in_addr), reg);
332 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
333 		break;
334 	case offsetof(struct iphdr, protocol):
335 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
336 			return -EOPNOTSUPP;
337 
338 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
339 				  sizeof(__u8), reg);
340 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
341 		break;
342 	default:
343 		return -EOPNOTSUPP;
344 	}
345 
346 	return 0;
347 }
348 
nft_payload_offload_ip6(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)349 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
350 				  struct nft_flow_rule *flow,
351 				  const struct nft_payload *priv)
352 {
353 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
354 
355 	switch (priv->offset) {
356 	case offsetof(struct ipv6hdr, saddr):
357 		if (!nft_payload_offload_mask(reg, priv->len,
358 					      sizeof(struct in6_addr)))
359 			return -EOPNOTSUPP;
360 
361 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
362 				  sizeof(struct in6_addr), reg);
363 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
364 		break;
365 	case offsetof(struct ipv6hdr, daddr):
366 		if (!nft_payload_offload_mask(reg, priv->len,
367 					      sizeof(struct in6_addr)))
368 			return -EOPNOTSUPP;
369 
370 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
371 				  sizeof(struct in6_addr), reg);
372 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
373 		break;
374 	case offsetof(struct ipv6hdr, nexthdr):
375 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
376 			return -EOPNOTSUPP;
377 
378 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
379 				  sizeof(__u8), reg);
380 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
381 		break;
382 	default:
383 		return -EOPNOTSUPP;
384 	}
385 
386 	return 0;
387 }
388 
nft_payload_offload_nh(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)389 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
390 				  struct nft_flow_rule *flow,
391 				  const struct nft_payload *priv)
392 {
393 	int err;
394 
395 	switch (ctx->dep.l3num) {
396 	case htons(ETH_P_IP):
397 		err = nft_payload_offload_ip(ctx, flow, priv);
398 		break;
399 	case htons(ETH_P_IPV6):
400 		err = nft_payload_offload_ip6(ctx, flow, priv);
401 		break;
402 	default:
403 		return -EOPNOTSUPP;
404 	}
405 
406 	return err;
407 }
408 
nft_payload_offload_tcp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)409 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
410 				   struct nft_flow_rule *flow,
411 				   const struct nft_payload *priv)
412 {
413 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
414 
415 	switch (priv->offset) {
416 	case offsetof(struct tcphdr, source):
417 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
418 			return -EOPNOTSUPP;
419 
420 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
421 				  sizeof(__be16), reg);
422 		break;
423 	case offsetof(struct tcphdr, dest):
424 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
425 			return -EOPNOTSUPP;
426 
427 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
428 				  sizeof(__be16), reg);
429 		break;
430 	default:
431 		return -EOPNOTSUPP;
432 	}
433 
434 	return 0;
435 }
436 
nft_payload_offload_udp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)437 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
438 				   struct nft_flow_rule *flow,
439 				   const struct nft_payload *priv)
440 {
441 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
442 
443 	switch (priv->offset) {
444 	case offsetof(struct udphdr, source):
445 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
446 			return -EOPNOTSUPP;
447 
448 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
449 				  sizeof(__be16), reg);
450 		break;
451 	case offsetof(struct udphdr, dest):
452 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
453 			return -EOPNOTSUPP;
454 
455 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
456 				  sizeof(__be16), reg);
457 		break;
458 	default:
459 		return -EOPNOTSUPP;
460 	}
461 
462 	return 0;
463 }
464 
nft_payload_offload_th(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)465 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
466 				  struct nft_flow_rule *flow,
467 				  const struct nft_payload *priv)
468 {
469 	int err;
470 
471 	switch (ctx->dep.protonum) {
472 	case IPPROTO_TCP:
473 		err = nft_payload_offload_tcp(ctx, flow, priv);
474 		break;
475 	case IPPROTO_UDP:
476 		err = nft_payload_offload_udp(ctx, flow, priv);
477 		break;
478 	default:
479 		return -EOPNOTSUPP;
480 	}
481 
482 	return err;
483 }
484 
nft_payload_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)485 static int nft_payload_offload(struct nft_offload_ctx *ctx,
486 			       struct nft_flow_rule *flow,
487 			       const struct nft_expr *expr)
488 {
489 	const struct nft_payload *priv = nft_expr_priv(expr);
490 	int err;
491 
492 	switch (priv->base) {
493 	case NFT_PAYLOAD_LL_HEADER:
494 		err = nft_payload_offload_ll(ctx, flow, priv);
495 		break;
496 	case NFT_PAYLOAD_NETWORK_HEADER:
497 		err = nft_payload_offload_nh(ctx, flow, priv);
498 		break;
499 	case NFT_PAYLOAD_TRANSPORT_HEADER:
500 		err = nft_payload_offload_th(ctx, flow, priv);
501 		break;
502 	default:
503 		err = -EOPNOTSUPP;
504 		break;
505 	}
506 	return err;
507 }
508 
509 static const struct nft_expr_ops nft_payload_ops = {
510 	.type		= &nft_payload_type,
511 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
512 	.eval		= nft_payload_eval,
513 	.init		= nft_payload_init,
514 	.dump		= nft_payload_dump,
515 	.offload	= nft_payload_offload,
516 };
517 
518 const struct nft_expr_ops nft_payload_fast_ops = {
519 	.type		= &nft_payload_type,
520 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
521 	.eval		= nft_payload_eval,
522 	.init		= nft_payload_init,
523 	.dump		= nft_payload_dump,
524 	.offload	= nft_payload_offload,
525 };
526 
nft_csum_replace(__sum16 * sum,__wsum fsum,__wsum tsum)527 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
528 {
529 	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
530 	if (*sum == 0)
531 		*sum = CSUM_MANGLED_0;
532 }
533 
nft_payload_udp_checksum(struct sk_buff * skb,unsigned int thoff)534 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
535 {
536 	struct udphdr *uh, _uh;
537 
538 	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
539 	if (!uh)
540 		return false;
541 
542 	return (__force bool)uh->check;
543 }
544 
nft_payload_l4csum_offset(const struct nft_pktinfo * pkt,struct sk_buff * skb,unsigned int * l4csum_offset)545 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
546 				     struct sk_buff *skb,
547 				     unsigned int *l4csum_offset)
548 {
549 	switch (pkt->tprot) {
550 	case IPPROTO_TCP:
551 		*l4csum_offset = offsetof(struct tcphdr, check);
552 		break;
553 	case IPPROTO_UDP:
554 		if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
555 			return -1;
556 		fallthrough;
557 	case IPPROTO_UDPLITE:
558 		*l4csum_offset = offsetof(struct udphdr, check);
559 		break;
560 	case IPPROTO_ICMPV6:
561 		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
562 		break;
563 	default:
564 		return -1;
565 	}
566 
567 	*l4csum_offset += nft_thoff(pkt);
568 	return 0;
569 }
570 
nft_payload_csum_sctp(struct sk_buff * skb,int offset)571 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
572 {
573 	struct sctphdr *sh;
574 
575 	if (skb_ensure_writable(skb, offset + sizeof(*sh)))
576 		return -1;
577 
578 	sh = (struct sctphdr *)(skb->data + offset);
579 	sh->checksum = sctp_compute_cksum(skb, offset);
580 	skb->ip_summed = CHECKSUM_UNNECESSARY;
581 	return 0;
582 }
583 
nft_payload_l4csum_update(const struct nft_pktinfo * pkt,struct sk_buff * skb,__wsum fsum,__wsum tsum)584 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
585 				     struct sk_buff *skb,
586 				     __wsum fsum, __wsum tsum)
587 {
588 	int l4csum_offset;
589 	__sum16 sum;
590 
591 	/* If we cannot determine layer 4 checksum offset or this packet doesn't
592 	 * require layer 4 checksum recalculation, skip this packet.
593 	 */
594 	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
595 		return 0;
596 
597 	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
598 		return -1;
599 
600 	/* Checksum mangling for an arbitrary amount of bytes, based on
601 	 * inet_proto_csum_replace*() functions.
602 	 */
603 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
604 		nft_csum_replace(&sum, fsum, tsum);
605 		if (skb->ip_summed == CHECKSUM_COMPLETE) {
606 			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
607 					      tsum);
608 		}
609 	} else {
610 		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
611 					  tsum));
612 	}
613 
614 	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
615 	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
616 		return -1;
617 
618 	return 0;
619 }
620 
nft_payload_csum_inet(struct sk_buff * skb,const u32 * src,__wsum fsum,__wsum tsum,int csum_offset)621 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
622 				 __wsum fsum, __wsum tsum, int csum_offset)
623 {
624 	__sum16 sum;
625 
626 	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
627 		return -1;
628 
629 	nft_csum_replace(&sum, fsum, tsum);
630 	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
631 	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
632 		return -1;
633 
634 	return 0;
635 }
636 
nft_payload_set_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)637 static void nft_payload_set_eval(const struct nft_expr *expr,
638 				 struct nft_regs *regs,
639 				 const struct nft_pktinfo *pkt)
640 {
641 	const struct nft_payload_set *priv = nft_expr_priv(expr);
642 	struct sk_buff *skb = pkt->skb;
643 	const u32 *src = &regs->data[priv->sreg];
644 	int offset, csum_offset;
645 	__wsum fsum, tsum;
646 
647 	switch (priv->base) {
648 	case NFT_PAYLOAD_LL_HEADER:
649 		if (!skb_mac_header_was_set(skb))
650 			goto err;
651 		offset = skb_mac_header(skb) - skb->data;
652 		break;
653 	case NFT_PAYLOAD_NETWORK_HEADER:
654 		offset = skb_network_offset(skb);
655 		break;
656 	case NFT_PAYLOAD_TRANSPORT_HEADER:
657 		if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
658 			goto err;
659 		offset = nft_thoff(pkt);
660 		break;
661 	case NFT_PAYLOAD_INNER_HEADER:
662 		offset = nft_payload_inner_offset(pkt);
663 		if (offset < 0)
664 			goto err;
665 		break;
666 	default:
667 		BUG();
668 	}
669 
670 	csum_offset = offset + priv->csum_offset;
671 	offset += priv->offset;
672 
673 	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
674 	    ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
675 	      priv->base != NFT_PAYLOAD_INNER_HEADER) ||
676 	     skb->ip_summed != CHECKSUM_PARTIAL)) {
677 		fsum = skb_checksum(skb, offset, priv->len, 0);
678 		tsum = csum_partial(src, priv->len, 0);
679 
680 		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
681 		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
682 			goto err;
683 
684 		if (priv->csum_flags &&
685 		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
686 			goto err;
687 	}
688 
689 	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
690 	    skb_store_bits(skb, offset, src, priv->len) < 0)
691 		goto err;
692 
693 	if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
694 	    pkt->tprot == IPPROTO_SCTP &&
695 	    skb->ip_summed != CHECKSUM_PARTIAL) {
696 		if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
697 			goto err;
698 	}
699 
700 	return;
701 err:
702 	regs->verdict.code = NFT_BREAK;
703 }
704 
nft_payload_set_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])705 static int nft_payload_set_init(const struct nft_ctx *ctx,
706 				const struct nft_expr *expr,
707 				const struct nlattr * const tb[])
708 {
709 	struct nft_payload_set *priv = nft_expr_priv(expr);
710 
711 	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
712 	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
713 	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
714 
715 	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
716 		priv->csum_type =
717 			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
718 	if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
719 		priv->csum_offset =
720 			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
721 	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
722 		u32 flags;
723 
724 		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
725 		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
726 			return -EINVAL;
727 
728 		priv->csum_flags = flags;
729 	}
730 
731 	switch (priv->csum_type) {
732 	case NFT_PAYLOAD_CSUM_NONE:
733 	case NFT_PAYLOAD_CSUM_INET:
734 		break;
735 	case NFT_PAYLOAD_CSUM_SCTP:
736 		if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
737 			return -EINVAL;
738 
739 		if (priv->csum_offset != offsetof(struct sctphdr, checksum))
740 			return -EINVAL;
741 		break;
742 	default:
743 		return -EOPNOTSUPP;
744 	}
745 
746 	return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
747 				       priv->len);
748 }
749 
nft_payload_set_dump(struct sk_buff * skb,const struct nft_expr * expr)750 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
751 {
752 	const struct nft_payload_set *priv = nft_expr_priv(expr);
753 
754 	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
755 	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
756 	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
757 	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
758 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
759 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
760 			 htonl(priv->csum_offset)) ||
761 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
762 		goto nla_put_failure;
763 	return 0;
764 
765 nla_put_failure:
766 	return -1;
767 }
768 
769 static const struct nft_expr_ops nft_payload_set_ops = {
770 	.type		= &nft_payload_type,
771 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
772 	.eval		= nft_payload_set_eval,
773 	.init		= nft_payload_set_init,
774 	.dump		= nft_payload_set_dump,
775 };
776 
777 static const struct nft_expr_ops *
nft_payload_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])778 nft_payload_select_ops(const struct nft_ctx *ctx,
779 		       const struct nlattr * const tb[])
780 {
781 	enum nft_payload_bases base;
782 	unsigned int offset, len;
783 
784 	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
785 	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
786 	    tb[NFTA_PAYLOAD_LEN] == NULL)
787 		return ERR_PTR(-EINVAL);
788 
789 	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
790 	switch (base) {
791 	case NFT_PAYLOAD_LL_HEADER:
792 	case NFT_PAYLOAD_NETWORK_HEADER:
793 	case NFT_PAYLOAD_TRANSPORT_HEADER:
794 	case NFT_PAYLOAD_INNER_HEADER:
795 		break;
796 	default:
797 		return ERR_PTR(-EOPNOTSUPP);
798 	}
799 
800 	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
801 		if (tb[NFTA_PAYLOAD_DREG] != NULL)
802 			return ERR_PTR(-EINVAL);
803 		return &nft_payload_set_ops;
804 	}
805 
806 	if (tb[NFTA_PAYLOAD_DREG] == NULL)
807 		return ERR_PTR(-EINVAL);
808 
809 	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
810 	len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
811 
812 	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
813 	    base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
814 		return &nft_payload_fast_ops;
815 	else
816 		return &nft_payload_ops;
817 }
818 
819 struct nft_expr_type nft_payload_type __read_mostly = {
820 	.name		= "payload",
821 	.select_ops	= nft_payload_select_ops,
822 	.policy		= nft_payload_policy,
823 	.maxattr	= NFTA_PAYLOAD_MAX,
824 	.owner		= THIS_MODULE,
825 };
826