1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP to API glue.
8 *
9 * Authors: see ip.c
10 *
11 * Fixes:
12 * Many : Split from ip.c , see ip.c for history.
13 * Martin Mares : TOS setting fixed.
14 * Alan Cox : Fixed a couple of oopses in Martin's
15 * TOS tweaks.
16 * Mike McLagan : Routing by source
17 */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/icmp.h>
25 #include <linux/inetdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/slab.h>
28 #include <net/sock.h>
29 #include <net/ip.h>
30 #include <net/icmp.h>
31 #include <net/tcp_states.h>
32 #include <linux/udp.h>
33 #include <linux/igmp.h>
34 #include <linux/netfilter.h>
35 #include <linux/route.h>
36 #include <linux/mroute.h>
37 #include <net/inet_ecn.h>
38 #include <net/route.h>
39 #include <net/xfrm.h>
40 #include <net/compat.h>
41 #include <net/checksum.h>
42 #if IS_ENABLED(CONFIG_IPV6)
43 #include <net/transp_v6.h>
44 #endif
45 #include <net/ip_fib.h>
46
47 #include <linux/errqueue.h>
48 #include <linux/uaccess.h>
49
50 #include <linux/bpfilter.h>
51
52 /*
53 * SOL_IP control messages.
54 */
55
ip_cmsg_recv_pktinfo(struct msghdr * msg,struct sk_buff * skb)56 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
57 {
58 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
59
60 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
61
62 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
63 }
64
ip_cmsg_recv_ttl(struct msghdr * msg,struct sk_buff * skb)65 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
66 {
67 int ttl = ip_hdr(skb)->ttl;
68 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
69 }
70
ip_cmsg_recv_tos(struct msghdr * msg,struct sk_buff * skb)71 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
72 {
73 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
74 }
75
ip_cmsg_recv_opts(struct msghdr * msg,struct sk_buff * skb)76 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
77 {
78 if (IPCB(skb)->opt.optlen == 0)
79 return;
80
81 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
82 ip_hdr(skb) + 1);
83 }
84
85
ip_cmsg_recv_retopts(struct net * net,struct msghdr * msg,struct sk_buff * skb)86 static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
87 struct sk_buff *skb)
88 {
89 unsigned char optbuf[sizeof(struct ip_options) + 40];
90 struct ip_options *opt = (struct ip_options *)optbuf;
91
92 if (IPCB(skb)->opt.optlen == 0)
93 return;
94
95 if (ip_options_echo(net, opt, skb)) {
96 msg->msg_flags |= MSG_CTRUNC;
97 return;
98 }
99 ip_options_undo(opt);
100
101 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
102 }
103
ip_cmsg_recv_fragsize(struct msghdr * msg,struct sk_buff * skb)104 static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
105 {
106 int val;
107
108 if (IPCB(skb)->frag_max_size == 0)
109 return;
110
111 val = IPCB(skb)->frag_max_size;
112 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
113 }
114
ip_cmsg_recv_checksum(struct msghdr * msg,struct sk_buff * skb,int tlen,int offset)115 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
116 int tlen, int offset)
117 {
118 __wsum csum = skb->csum;
119
120 if (skb->ip_summed != CHECKSUM_COMPLETE)
121 return;
122
123 if (offset != 0) {
124 int tend_off = skb_transport_offset(skb) + tlen;
125 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
126 }
127
128 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
129 }
130
ip_cmsg_recv_security(struct msghdr * msg,struct sk_buff * skb)131 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
132 {
133 char *secdata;
134 u32 seclen, secid;
135 int err;
136
137 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
138 if (err)
139 return;
140
141 err = security_secid_to_secctx(secid, &secdata, &seclen);
142 if (err)
143 return;
144
145 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
146 security_release_secctx(secdata, seclen);
147 }
148
ip_cmsg_recv_dstaddr(struct msghdr * msg,struct sk_buff * skb)149 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150 {
151 __be16 _ports[2], *ports;
152 struct sockaddr_in sin;
153
154 /* All current transport protocols have the port numbers in the
155 * first four bytes of the transport header and this function is
156 * written with this assumption in mind.
157 */
158 ports = skb_header_pointer(skb, skb_transport_offset(skb),
159 sizeof(_ports), &_ports);
160 if (!ports)
161 return;
162
163 sin.sin_family = AF_INET;
164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
165 sin.sin_port = ports[1];
166 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
167
168 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
169 }
170
ip_cmsg_recv_offset(struct msghdr * msg,struct sock * sk,struct sk_buff * skb,int tlen,int offset)171 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
172 struct sk_buff *skb, int tlen, int offset)
173 {
174 struct inet_sock *inet = inet_sk(sk);
175 unsigned int flags = inet->cmsg_flags;
176
177 /* Ordered by supposed usage frequency */
178 if (flags & IP_CMSG_PKTINFO) {
179 ip_cmsg_recv_pktinfo(msg, skb);
180
181 flags &= ~IP_CMSG_PKTINFO;
182 if (!flags)
183 return;
184 }
185
186 if (flags & IP_CMSG_TTL) {
187 ip_cmsg_recv_ttl(msg, skb);
188
189 flags &= ~IP_CMSG_TTL;
190 if (!flags)
191 return;
192 }
193
194 if (flags & IP_CMSG_TOS) {
195 ip_cmsg_recv_tos(msg, skb);
196
197 flags &= ~IP_CMSG_TOS;
198 if (!flags)
199 return;
200 }
201
202 if (flags & IP_CMSG_RECVOPTS) {
203 ip_cmsg_recv_opts(msg, skb);
204
205 flags &= ~IP_CMSG_RECVOPTS;
206 if (!flags)
207 return;
208 }
209
210 if (flags & IP_CMSG_RETOPTS) {
211 ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
212
213 flags &= ~IP_CMSG_RETOPTS;
214 if (!flags)
215 return;
216 }
217
218 if (flags & IP_CMSG_PASSSEC) {
219 ip_cmsg_recv_security(msg, skb);
220
221 flags &= ~IP_CMSG_PASSSEC;
222 if (!flags)
223 return;
224 }
225
226 if (flags & IP_CMSG_ORIGDSTADDR) {
227 ip_cmsg_recv_dstaddr(msg, skb);
228
229 flags &= ~IP_CMSG_ORIGDSTADDR;
230 if (!flags)
231 return;
232 }
233
234 if (flags & IP_CMSG_CHECKSUM)
235 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
236
237 if (flags & IP_CMSG_RECVFRAGSIZE)
238 ip_cmsg_recv_fragsize(msg, skb);
239 }
240 EXPORT_SYMBOL(ip_cmsg_recv_offset);
241
ip_cmsg_send(struct sock * sk,struct msghdr * msg,struct ipcm_cookie * ipc,bool allow_ipv6)242 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
243 bool allow_ipv6)
244 {
245 int err, val;
246 struct cmsghdr *cmsg;
247 struct net *net = sock_net(sk);
248
249 for_each_cmsghdr(cmsg, msg) {
250 if (!CMSG_OK(msg, cmsg))
251 return -EINVAL;
252 #if IS_ENABLED(CONFIG_IPV6)
253 if (allow_ipv6 &&
254 cmsg->cmsg_level == SOL_IPV6 &&
255 cmsg->cmsg_type == IPV6_PKTINFO) {
256 struct in6_pktinfo *src_info;
257
258 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
259 return -EINVAL;
260 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
261 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
262 return -EINVAL;
263 if (src_info->ipi6_ifindex)
264 ipc->oif = src_info->ipi6_ifindex;
265 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
266 continue;
267 }
268 #endif
269 if (cmsg->cmsg_level == SOL_SOCKET) {
270 err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
271 if (err)
272 return err;
273 continue;
274 }
275
276 if (cmsg->cmsg_level != SOL_IP)
277 continue;
278 switch (cmsg->cmsg_type) {
279 case IP_RETOPTS:
280 err = cmsg->cmsg_len - sizeof(struct cmsghdr);
281
282 /* Our caller is responsible for freeing ipc->opt */
283 err = ip_options_get(net, &ipc->opt,
284 KERNEL_SOCKPTR(CMSG_DATA(cmsg)),
285 err < 40 ? err : 40);
286 if (err)
287 return err;
288 break;
289 case IP_PKTINFO:
290 {
291 struct in_pktinfo *info;
292 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
293 return -EINVAL;
294 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
295 if (info->ipi_ifindex)
296 ipc->oif = info->ipi_ifindex;
297 ipc->addr = info->ipi_spec_dst.s_addr;
298 break;
299 }
300 case IP_TTL:
301 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
302 return -EINVAL;
303 val = *(int *)CMSG_DATA(cmsg);
304 if (val < 1 || val > 255)
305 return -EINVAL;
306 ipc->ttl = val;
307 break;
308 case IP_TOS:
309 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
310 val = *(int *)CMSG_DATA(cmsg);
311 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
312 val = *(u8 *)CMSG_DATA(cmsg);
313 else
314 return -EINVAL;
315 if (val < 0 || val > 255)
316 return -EINVAL;
317 ipc->tos = val;
318 ipc->priority = rt_tos2priority(ipc->tos);
319 break;
320
321 default:
322 return -EINVAL;
323 }
324 }
325 return 0;
326 }
327
ip_ra_destroy_rcu(struct rcu_head * head)328 static void ip_ra_destroy_rcu(struct rcu_head *head)
329 {
330 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
331
332 sock_put(ra->saved_sk);
333 kfree(ra);
334 }
335
ip_ra_control(struct sock * sk,unsigned char on,void (* destructor)(struct sock *))336 int ip_ra_control(struct sock *sk, unsigned char on,
337 void (*destructor)(struct sock *))
338 {
339 struct ip_ra_chain *ra, *new_ra;
340 struct ip_ra_chain __rcu **rap;
341 struct net *net = sock_net(sk);
342
343 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
344 return -EINVAL;
345
346 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
347 if (on && !new_ra)
348 return -ENOMEM;
349
350 mutex_lock(&net->ipv4.ra_mutex);
351 for (rap = &net->ipv4.ra_chain;
352 (ra = rcu_dereference_protected(*rap,
353 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
354 rap = &ra->next) {
355 if (ra->sk == sk) {
356 if (on) {
357 mutex_unlock(&net->ipv4.ra_mutex);
358 kfree(new_ra);
359 return -EADDRINUSE;
360 }
361 /* dont let ip_call_ra_chain() use sk again */
362 ra->sk = NULL;
363 RCU_INIT_POINTER(*rap, ra->next);
364 mutex_unlock(&net->ipv4.ra_mutex);
365
366 if (ra->destructor)
367 ra->destructor(sk);
368 /*
369 * Delay sock_put(sk) and kfree(ra) after one rcu grace
370 * period. This guarantee ip_call_ra_chain() dont need
371 * to mess with socket refcounts.
372 */
373 ra->saved_sk = sk;
374 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
375 return 0;
376 }
377 }
378 if (!new_ra) {
379 mutex_unlock(&net->ipv4.ra_mutex);
380 return -ENOBUFS;
381 }
382 new_ra->sk = sk;
383 new_ra->destructor = destructor;
384
385 RCU_INIT_POINTER(new_ra->next, ra);
386 rcu_assign_pointer(*rap, new_ra);
387 sock_hold(sk);
388 mutex_unlock(&net->ipv4.ra_mutex);
389
390 return 0;
391 }
392
ipv4_icmp_error_rfc4884(const struct sk_buff * skb,struct sock_ee_data_rfc4884 * out)393 static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb,
394 struct sock_ee_data_rfc4884 *out)
395 {
396 switch (icmp_hdr(skb)->type) {
397 case ICMP_DEST_UNREACH:
398 case ICMP_TIME_EXCEEDED:
399 case ICMP_PARAMETERPROB:
400 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr),
401 icmp_hdr(skb)->un.reserved[1] * 4);
402 }
403 }
404
ip_icmp_error(struct sock * sk,struct sk_buff * skb,int err,__be16 port,u32 info,u8 * payload)405 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
406 __be16 port, u32 info, u8 *payload)
407 {
408 struct sock_exterr_skb *serr;
409
410 skb = skb_clone(skb, GFP_ATOMIC);
411 if (!skb)
412 return;
413
414 serr = SKB_EXT_ERR(skb);
415 serr->ee.ee_errno = err;
416 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
417 serr->ee.ee_type = icmp_hdr(skb)->type;
418 serr->ee.ee_code = icmp_hdr(skb)->code;
419 serr->ee.ee_pad = 0;
420 serr->ee.ee_info = info;
421 serr->ee.ee_data = 0;
422 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
423 skb_network_header(skb);
424 serr->port = port;
425
426 if (skb_pull(skb, payload - skb->data)) {
427 if (inet_sk(sk)->recverr_rfc4884)
428 ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
429
430 skb_reset_transport_header(skb);
431 if (sock_queue_err_skb(sk, skb) == 0)
432 return;
433 }
434 kfree_skb(skb);
435 }
436 EXPORT_SYMBOL_GPL(ip_icmp_error);
437
ip_local_error(struct sock * sk,int err,__be32 daddr,__be16 port,u32 info)438 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
439 {
440 struct inet_sock *inet = inet_sk(sk);
441 struct sock_exterr_skb *serr;
442 struct iphdr *iph;
443 struct sk_buff *skb;
444
445 if (!inet->recverr)
446 return;
447
448 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
449 if (!skb)
450 return;
451
452 skb_put(skb, sizeof(struct iphdr));
453 skb_reset_network_header(skb);
454 iph = ip_hdr(skb);
455 iph->daddr = daddr;
456
457 serr = SKB_EXT_ERR(skb);
458 serr->ee.ee_errno = err;
459 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
460 serr->ee.ee_type = 0;
461 serr->ee.ee_code = 0;
462 serr->ee.ee_pad = 0;
463 serr->ee.ee_info = info;
464 serr->ee.ee_data = 0;
465 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
466 serr->port = port;
467
468 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
469 skb_reset_transport_header(skb);
470
471 if (sock_queue_err_skb(sk, skb))
472 kfree_skb(skb);
473 }
474
475 /* For some errors we have valid addr_offset even with zero payload and
476 * zero port. Also, addr_offset should be supported if port is set.
477 */
ipv4_datagram_support_addr(struct sock_exterr_skb * serr)478 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
479 {
480 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
481 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
482 }
483
484 /* IPv4 supports cmsg on all imcp errors and some timestamps
485 *
486 * Timestamp code paths do not initialize the fields expected by cmsg:
487 * the PKTINFO fields in skb->cb[]. Fill those in here.
488 */
ipv4_datagram_support_cmsg(const struct sock * sk,struct sk_buff * skb,int ee_origin)489 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
490 struct sk_buff *skb,
491 int ee_origin)
492 {
493 struct in_pktinfo *info;
494
495 if (ee_origin == SO_EE_ORIGIN_ICMP)
496 return true;
497
498 if (ee_origin == SO_EE_ORIGIN_LOCAL)
499 return false;
500
501 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
502 * timestamp with egress dev. Not possible for packets without iif
503 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
504 */
505 info = PKTINFO_SKB_CB(skb);
506 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
507 !info->ipi_ifindex)
508 return false;
509
510 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
511 return true;
512 }
513
514 /*
515 * Handle MSG_ERRQUEUE
516 */
ip_recv_error(struct sock * sk,struct msghdr * msg,int len,int * addr_len)517 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
518 {
519 struct sock_exterr_skb *serr;
520 struct sk_buff *skb;
521 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
522 struct {
523 struct sock_extended_err ee;
524 struct sockaddr_in offender;
525 } errhdr;
526 int err;
527 int copied;
528
529 err = -EAGAIN;
530 skb = sock_dequeue_err_skb(sk);
531 if (!skb)
532 goto out;
533
534 copied = skb->len;
535 if (copied > len) {
536 msg->msg_flags |= MSG_TRUNC;
537 copied = len;
538 }
539 err = skb_copy_datagram_msg(skb, 0, msg, copied);
540 if (unlikely(err)) {
541 kfree_skb(skb);
542 return err;
543 }
544 sock_recv_timestamp(msg, sk, skb);
545
546 serr = SKB_EXT_ERR(skb);
547
548 if (sin && ipv4_datagram_support_addr(serr)) {
549 sin->sin_family = AF_INET;
550 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
551 serr->addr_offset);
552 sin->sin_port = serr->port;
553 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
554 *addr_len = sizeof(*sin);
555 }
556
557 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
558 sin = &errhdr.offender;
559 memset(sin, 0, sizeof(*sin));
560
561 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
562 sin->sin_family = AF_INET;
563 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
564 if (inet_sk(sk)->cmsg_flags)
565 ip_cmsg_recv(msg, skb);
566 }
567
568 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
569
570 /* Now we could try to dump offended packet options */
571
572 msg->msg_flags |= MSG_ERRQUEUE;
573 err = copied;
574
575 consume_skb(skb);
576 out:
577 return err;
578 }
579
__ip_sock_set_tos(struct sock * sk,int val)580 void __ip_sock_set_tos(struct sock *sk, int val)
581 {
582 if (sk->sk_type == SOCK_STREAM) {
583 val &= ~INET_ECN_MASK;
584 val |= inet_sk(sk)->tos & INET_ECN_MASK;
585 }
586 if (inet_sk(sk)->tos != val) {
587 inet_sk(sk)->tos = val;
588 sk->sk_priority = rt_tos2priority(val);
589 sk_dst_reset(sk);
590 }
591 }
592
ip_sock_set_tos(struct sock * sk,int val)593 void ip_sock_set_tos(struct sock *sk, int val)
594 {
595 lock_sock(sk);
596 __ip_sock_set_tos(sk, val);
597 release_sock(sk);
598 }
599 EXPORT_SYMBOL(ip_sock_set_tos);
600
ip_sock_set_freebind(struct sock * sk)601 void ip_sock_set_freebind(struct sock *sk)
602 {
603 lock_sock(sk);
604 inet_sk(sk)->freebind = true;
605 release_sock(sk);
606 }
607 EXPORT_SYMBOL(ip_sock_set_freebind);
608
ip_sock_set_recverr(struct sock * sk)609 void ip_sock_set_recverr(struct sock *sk)
610 {
611 lock_sock(sk);
612 inet_sk(sk)->recverr = true;
613 release_sock(sk);
614 }
615 EXPORT_SYMBOL(ip_sock_set_recverr);
616
ip_sock_set_mtu_discover(struct sock * sk,int val)617 int ip_sock_set_mtu_discover(struct sock *sk, int val)
618 {
619 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
620 return -EINVAL;
621 lock_sock(sk);
622 inet_sk(sk)->pmtudisc = val;
623 release_sock(sk);
624 return 0;
625 }
626 EXPORT_SYMBOL(ip_sock_set_mtu_discover);
627
ip_sock_set_pktinfo(struct sock * sk)628 void ip_sock_set_pktinfo(struct sock *sk)
629 {
630 lock_sock(sk);
631 inet_sk(sk)->cmsg_flags |= IP_CMSG_PKTINFO;
632 release_sock(sk);
633 }
634 EXPORT_SYMBOL(ip_sock_set_pktinfo);
635
636 /*
637 * Socket option code for IP. This is the end of the line after any
638 * TCP,UDP etc options on an IP socket.
639 */
setsockopt_needs_rtnl(int optname)640 static bool setsockopt_needs_rtnl(int optname)
641 {
642 switch (optname) {
643 case IP_ADD_MEMBERSHIP:
644 case IP_ADD_SOURCE_MEMBERSHIP:
645 case IP_BLOCK_SOURCE:
646 case IP_DROP_MEMBERSHIP:
647 case IP_DROP_SOURCE_MEMBERSHIP:
648 case IP_MSFILTER:
649 case IP_UNBLOCK_SOURCE:
650 case MCAST_BLOCK_SOURCE:
651 case MCAST_MSFILTER:
652 case MCAST_JOIN_GROUP:
653 case MCAST_JOIN_SOURCE_GROUP:
654 case MCAST_LEAVE_GROUP:
655 case MCAST_LEAVE_SOURCE_GROUP:
656 case MCAST_UNBLOCK_SOURCE:
657 return true;
658 }
659 return false;
660 }
661
set_mcast_msfilter(struct sock * sk,int ifindex,int numsrc,int fmode,struct sockaddr_storage * group,struct sockaddr_storage * list)662 static int set_mcast_msfilter(struct sock *sk, int ifindex,
663 int numsrc, int fmode,
664 struct sockaddr_storage *group,
665 struct sockaddr_storage *list)
666 {
667 struct ip_msfilter *msf;
668 struct sockaddr_in *psin;
669 int err, i;
670
671 msf = kmalloc(IP_MSFILTER_SIZE(numsrc), GFP_KERNEL);
672 if (!msf)
673 return -ENOBUFS;
674
675 psin = (struct sockaddr_in *)group;
676 if (psin->sin_family != AF_INET)
677 goto Eaddrnotavail;
678 msf->imsf_multiaddr = psin->sin_addr.s_addr;
679 msf->imsf_interface = 0;
680 msf->imsf_fmode = fmode;
681 msf->imsf_numsrc = numsrc;
682 for (i = 0; i < numsrc; ++i) {
683 psin = (struct sockaddr_in *)&list[i];
684
685 if (psin->sin_family != AF_INET)
686 goto Eaddrnotavail;
687 msf->imsf_slist_flex[i] = psin->sin_addr.s_addr;
688 }
689 err = ip_mc_msfilter(sk, msf, ifindex);
690 kfree(msf);
691 return err;
692
693 Eaddrnotavail:
694 kfree(msf);
695 return -EADDRNOTAVAIL;
696 }
697
copy_group_source_from_sockptr(struct group_source_req * greqs,sockptr_t optval,int optlen)698 static int copy_group_source_from_sockptr(struct group_source_req *greqs,
699 sockptr_t optval, int optlen)
700 {
701 if (in_compat_syscall()) {
702 struct compat_group_source_req gr32;
703
704 if (optlen != sizeof(gr32))
705 return -EINVAL;
706 if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
707 return -EFAULT;
708 greqs->gsr_interface = gr32.gsr_interface;
709 greqs->gsr_group = gr32.gsr_group;
710 greqs->gsr_source = gr32.gsr_source;
711 } else {
712 if (optlen != sizeof(*greqs))
713 return -EINVAL;
714 if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
715 return -EFAULT;
716 }
717
718 return 0;
719 }
720
do_mcast_group_source(struct sock * sk,int optname,sockptr_t optval,int optlen)721 static int do_mcast_group_source(struct sock *sk, int optname,
722 sockptr_t optval, int optlen)
723 {
724 struct group_source_req greqs;
725 struct ip_mreq_source mreqs;
726 struct sockaddr_in *psin;
727 int omode, add, err;
728
729 err = copy_group_source_from_sockptr(&greqs, optval, optlen);
730 if (err)
731 return err;
732
733 if (greqs.gsr_group.ss_family != AF_INET ||
734 greqs.gsr_source.ss_family != AF_INET)
735 return -EADDRNOTAVAIL;
736
737 psin = (struct sockaddr_in *)&greqs.gsr_group;
738 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
739 psin = (struct sockaddr_in *)&greqs.gsr_source;
740 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
741 mreqs.imr_interface = 0; /* use index for mc_source */
742
743 if (optname == MCAST_BLOCK_SOURCE) {
744 omode = MCAST_EXCLUDE;
745 add = 1;
746 } else if (optname == MCAST_UNBLOCK_SOURCE) {
747 omode = MCAST_EXCLUDE;
748 add = 0;
749 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
750 struct ip_mreqn mreq;
751
752 psin = (struct sockaddr_in *)&greqs.gsr_group;
753 mreq.imr_multiaddr = psin->sin_addr;
754 mreq.imr_address.s_addr = 0;
755 mreq.imr_ifindex = greqs.gsr_interface;
756 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
757 if (err && err != -EADDRINUSE)
758 return err;
759 greqs.gsr_interface = mreq.imr_ifindex;
760 omode = MCAST_INCLUDE;
761 add = 1;
762 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
763 omode = MCAST_INCLUDE;
764 add = 0;
765 }
766 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface);
767 }
768
ip_set_mcast_msfilter(struct sock * sk,sockptr_t optval,int optlen)769 static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
770 {
771 struct group_filter *gsf = NULL;
772 int err;
773
774 if (optlen < GROUP_FILTER_SIZE(0))
775 return -EINVAL;
776 if (optlen > READ_ONCE(sysctl_optmem_max))
777 return -ENOBUFS;
778
779 gsf = memdup_sockptr(optval, optlen);
780 if (IS_ERR(gsf))
781 return PTR_ERR(gsf);
782
783 /* numsrc >= (4G-140)/128 overflow in 32 bits */
784 err = -ENOBUFS;
785 if (gsf->gf_numsrc >= 0x1ffffff ||
786 gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
787 goto out_free_gsf;
788
789 err = -EINVAL;
790 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
791 goto out_free_gsf;
792
793 err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc,
794 gsf->gf_fmode, &gsf->gf_group,
795 gsf->gf_slist_flex);
796 out_free_gsf:
797 kfree(gsf);
798 return err;
799 }
800
compat_ip_set_mcast_msfilter(struct sock * sk,sockptr_t optval,int optlen)801 static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
802 int optlen)
803 {
804 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
805 struct compat_group_filter *gf32;
806 unsigned int n;
807 void *p;
808 int err;
809
810 if (optlen < size0)
811 return -EINVAL;
812 if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
813 return -ENOBUFS;
814
815 p = kmalloc(optlen + 4, GFP_KERNEL);
816 if (!p)
817 return -ENOMEM;
818 gf32 = p + 4; /* we want ->gf_group and ->gf_slist_flex aligned */
819
820 err = -EFAULT;
821 if (copy_from_sockptr(gf32, optval, optlen))
822 goto out_free_gsf;
823
824 /* numsrc >= (4G-140)/128 overflow in 32 bits */
825 n = gf32->gf_numsrc;
826 err = -ENOBUFS;
827 if (n >= 0x1ffffff)
828 goto out_free_gsf;
829
830 err = -EINVAL;
831 if (offsetof(struct compat_group_filter, gf_slist_flex[n]) > optlen)
832 goto out_free_gsf;
833
834 /* numsrc >= (4G-140)/128 overflow in 32 bits */
835 err = -ENOBUFS;
836 if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
837 goto out_free_gsf;
838 err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
839 &gf32->gf_group, gf32->gf_slist_flex);
840 out_free_gsf:
841 kfree(p);
842 return err;
843 }
844
ip_mcast_join_leave(struct sock * sk,int optname,sockptr_t optval,int optlen)845 static int ip_mcast_join_leave(struct sock *sk, int optname,
846 sockptr_t optval, int optlen)
847 {
848 struct ip_mreqn mreq = { };
849 struct sockaddr_in *psin;
850 struct group_req greq;
851
852 if (optlen < sizeof(struct group_req))
853 return -EINVAL;
854 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
855 return -EFAULT;
856
857 psin = (struct sockaddr_in *)&greq.gr_group;
858 if (psin->sin_family != AF_INET)
859 return -EINVAL;
860 mreq.imr_multiaddr = psin->sin_addr;
861 mreq.imr_ifindex = greq.gr_interface;
862 if (optname == MCAST_JOIN_GROUP)
863 return ip_mc_join_group(sk, &mreq);
864 return ip_mc_leave_group(sk, &mreq);
865 }
866
compat_ip_mcast_join_leave(struct sock * sk,int optname,sockptr_t optval,int optlen)867 static int compat_ip_mcast_join_leave(struct sock *sk, int optname,
868 sockptr_t optval, int optlen)
869 {
870 struct compat_group_req greq;
871 struct ip_mreqn mreq = { };
872 struct sockaddr_in *psin;
873
874 if (optlen < sizeof(struct compat_group_req))
875 return -EINVAL;
876 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
877 return -EFAULT;
878
879 psin = (struct sockaddr_in *)&greq.gr_group;
880 if (psin->sin_family != AF_INET)
881 return -EINVAL;
882 mreq.imr_multiaddr = psin->sin_addr;
883 mreq.imr_ifindex = greq.gr_interface;
884
885 if (optname == MCAST_JOIN_GROUP)
886 return ip_mc_join_group(sk, &mreq);
887 return ip_mc_leave_group(sk, &mreq);
888 }
889
890 DEFINE_STATIC_KEY_FALSE(ip4_min_ttl);
891
do_ip_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)892 int do_ip_setsockopt(struct sock *sk, int level, int optname,
893 sockptr_t optval, unsigned int optlen)
894 {
895 struct inet_sock *inet = inet_sk(sk);
896 struct net *net = sock_net(sk);
897 int val = 0, err;
898 bool needs_rtnl = setsockopt_needs_rtnl(optname);
899
900 switch (optname) {
901 case IP_PKTINFO:
902 case IP_RECVTTL:
903 case IP_RECVOPTS:
904 case IP_RECVTOS:
905 case IP_RETOPTS:
906 case IP_TOS:
907 case IP_TTL:
908 case IP_HDRINCL:
909 case IP_MTU_DISCOVER:
910 case IP_RECVERR:
911 case IP_ROUTER_ALERT:
912 case IP_FREEBIND:
913 case IP_PASSSEC:
914 case IP_TRANSPARENT:
915 case IP_MINTTL:
916 case IP_NODEFRAG:
917 case IP_BIND_ADDRESS_NO_PORT:
918 case IP_UNICAST_IF:
919 case IP_MULTICAST_TTL:
920 case IP_MULTICAST_ALL:
921 case IP_MULTICAST_LOOP:
922 case IP_RECVORIGDSTADDR:
923 case IP_CHECKSUM:
924 case IP_RECVFRAGSIZE:
925 case IP_RECVERR_RFC4884:
926 case IP_LOCAL_PORT_RANGE:
927 if (optlen >= sizeof(int)) {
928 if (copy_from_sockptr(&val, optval, sizeof(val)))
929 return -EFAULT;
930 } else if (optlen >= sizeof(char)) {
931 unsigned char ucval;
932
933 if (copy_from_sockptr(&ucval, optval, sizeof(ucval)))
934 return -EFAULT;
935 val = (int) ucval;
936 }
937 }
938
939 /* If optlen==0, it is equivalent to val == 0 */
940
941 if (optname == IP_ROUTER_ALERT)
942 return ip_ra_control(sk, val ? 1 : 0, NULL);
943 if (ip_mroute_opt(optname))
944 return ip_mroute_setsockopt(sk, optname, optval, optlen);
945
946 err = 0;
947 if (needs_rtnl)
948 rtnl_lock();
949 sockopt_lock_sock(sk);
950
951 switch (optname) {
952 case IP_OPTIONS:
953 {
954 struct ip_options_rcu *old, *opt = NULL;
955
956 if (optlen > 40)
957 goto e_inval;
958 err = ip_options_get(sock_net(sk), &opt, optval, optlen);
959 if (err)
960 break;
961 old = rcu_dereference_protected(inet->inet_opt,
962 lockdep_sock_is_held(sk));
963 if (inet->is_icsk) {
964 struct inet_connection_sock *icsk = inet_csk(sk);
965 #if IS_ENABLED(CONFIG_IPV6)
966 if (sk->sk_family == PF_INET ||
967 (!((1 << sk->sk_state) &
968 (TCPF_LISTEN | TCPF_CLOSE)) &&
969 inet->inet_daddr != LOOPBACK4_IPV6)) {
970 #endif
971 if (old)
972 icsk->icsk_ext_hdr_len -= old->opt.optlen;
973 if (opt)
974 icsk->icsk_ext_hdr_len += opt->opt.optlen;
975 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
976 #if IS_ENABLED(CONFIG_IPV6)
977 }
978 #endif
979 }
980 rcu_assign_pointer(inet->inet_opt, opt);
981 if (old)
982 kfree_rcu(old, rcu);
983 break;
984 }
985 case IP_PKTINFO:
986 if (val)
987 inet->cmsg_flags |= IP_CMSG_PKTINFO;
988 else
989 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
990 break;
991 case IP_RECVTTL:
992 if (val)
993 inet->cmsg_flags |= IP_CMSG_TTL;
994 else
995 inet->cmsg_flags &= ~IP_CMSG_TTL;
996 break;
997 case IP_RECVTOS:
998 if (val)
999 inet->cmsg_flags |= IP_CMSG_TOS;
1000 else
1001 inet->cmsg_flags &= ~IP_CMSG_TOS;
1002 break;
1003 case IP_RECVOPTS:
1004 if (val)
1005 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
1006 else
1007 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
1008 break;
1009 case IP_RETOPTS:
1010 if (val)
1011 inet->cmsg_flags |= IP_CMSG_RETOPTS;
1012 else
1013 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
1014 break;
1015 case IP_PASSSEC:
1016 if (val)
1017 inet->cmsg_flags |= IP_CMSG_PASSSEC;
1018 else
1019 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
1020 break;
1021 case IP_RECVORIGDSTADDR:
1022 if (val)
1023 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
1024 else
1025 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
1026 break;
1027 case IP_CHECKSUM:
1028 if (val) {
1029 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
1030 inet_inc_convert_csum(sk);
1031 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
1032 }
1033 } else {
1034 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
1035 inet_dec_convert_csum(sk);
1036 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
1037 }
1038 }
1039 break;
1040 case IP_RECVFRAGSIZE:
1041 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
1042 goto e_inval;
1043 if (val)
1044 inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE;
1045 else
1046 inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
1047 break;
1048 case IP_TOS: /* This sets both TOS and Precedence */
1049 __ip_sock_set_tos(sk, val);
1050 break;
1051 case IP_TTL:
1052 if (optlen < 1)
1053 goto e_inval;
1054 if (val != -1 && (val < 1 || val > 255))
1055 goto e_inval;
1056 inet->uc_ttl = val;
1057 break;
1058 case IP_HDRINCL:
1059 if (sk->sk_type != SOCK_RAW) {
1060 err = -ENOPROTOOPT;
1061 break;
1062 }
1063 inet->hdrincl = val ? 1 : 0;
1064 break;
1065 case IP_NODEFRAG:
1066 if (sk->sk_type != SOCK_RAW) {
1067 err = -ENOPROTOOPT;
1068 break;
1069 }
1070 inet->nodefrag = val ? 1 : 0;
1071 break;
1072 case IP_BIND_ADDRESS_NO_PORT:
1073 inet->bind_address_no_port = val ? 1 : 0;
1074 break;
1075 case IP_MTU_DISCOVER:
1076 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
1077 goto e_inval;
1078 inet->pmtudisc = val;
1079 break;
1080 case IP_RECVERR:
1081 inet->recverr = !!val;
1082 if (!val)
1083 skb_queue_purge(&sk->sk_error_queue);
1084 break;
1085 case IP_RECVERR_RFC4884:
1086 if (val < 0 || val > 1)
1087 goto e_inval;
1088 inet->recverr_rfc4884 = !!val;
1089 break;
1090 case IP_MULTICAST_TTL:
1091 if (sk->sk_type == SOCK_STREAM)
1092 goto e_inval;
1093 if (optlen < 1)
1094 goto e_inval;
1095 if (val == -1)
1096 val = 1;
1097 if (val < 0 || val > 255)
1098 goto e_inval;
1099 inet->mc_ttl = val;
1100 break;
1101 case IP_MULTICAST_LOOP:
1102 if (optlen < 1)
1103 goto e_inval;
1104 inet->mc_loop = !!val;
1105 break;
1106 case IP_UNICAST_IF:
1107 {
1108 struct net_device *dev = NULL;
1109 int ifindex;
1110 int midx;
1111
1112 if (optlen != sizeof(int))
1113 goto e_inval;
1114
1115 ifindex = (__force int)ntohl((__force __be32)val);
1116 if (ifindex == 0) {
1117 inet->uc_index = 0;
1118 err = 0;
1119 break;
1120 }
1121
1122 dev = dev_get_by_index(sock_net(sk), ifindex);
1123 err = -EADDRNOTAVAIL;
1124 if (!dev)
1125 break;
1126
1127 midx = l3mdev_master_ifindex(dev);
1128 dev_put(dev);
1129
1130 err = -EINVAL;
1131 if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
1132 break;
1133
1134 inet->uc_index = ifindex;
1135 err = 0;
1136 break;
1137 }
1138 case IP_MULTICAST_IF:
1139 {
1140 struct ip_mreqn mreq;
1141 struct net_device *dev = NULL;
1142 int midx;
1143
1144 if (sk->sk_type == SOCK_STREAM)
1145 goto e_inval;
1146 /*
1147 * Check the arguments are allowable
1148 */
1149
1150 if (optlen < sizeof(struct in_addr))
1151 goto e_inval;
1152
1153 err = -EFAULT;
1154 if (optlen >= sizeof(struct ip_mreqn)) {
1155 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1156 break;
1157 } else {
1158 memset(&mreq, 0, sizeof(mreq));
1159 if (optlen >= sizeof(struct ip_mreq)) {
1160 if (copy_from_sockptr(&mreq, optval,
1161 sizeof(struct ip_mreq)))
1162 break;
1163 } else if (optlen >= sizeof(struct in_addr)) {
1164 if (copy_from_sockptr(&mreq.imr_address, optval,
1165 sizeof(struct in_addr)))
1166 break;
1167 }
1168 }
1169
1170 if (!mreq.imr_ifindex) {
1171 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
1172 inet->mc_index = 0;
1173 inet->mc_addr = 0;
1174 err = 0;
1175 break;
1176 }
1177 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
1178 if (dev)
1179 mreq.imr_ifindex = dev->ifindex;
1180 } else
1181 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
1182
1183
1184 err = -EADDRNOTAVAIL;
1185 if (!dev)
1186 break;
1187
1188 midx = l3mdev_master_ifindex(dev);
1189
1190 dev_put(dev);
1191
1192 err = -EINVAL;
1193 if (sk->sk_bound_dev_if &&
1194 mreq.imr_ifindex != sk->sk_bound_dev_if &&
1195 midx != sk->sk_bound_dev_if)
1196 break;
1197
1198 inet->mc_index = mreq.imr_ifindex;
1199 inet->mc_addr = mreq.imr_address.s_addr;
1200 err = 0;
1201 break;
1202 }
1203
1204 case IP_ADD_MEMBERSHIP:
1205 case IP_DROP_MEMBERSHIP:
1206 {
1207 struct ip_mreqn mreq;
1208
1209 err = -EPROTO;
1210 if (inet_sk(sk)->is_icsk)
1211 break;
1212
1213 if (optlen < sizeof(struct ip_mreq))
1214 goto e_inval;
1215 err = -EFAULT;
1216 if (optlen >= sizeof(struct ip_mreqn)) {
1217 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1218 break;
1219 } else {
1220 memset(&mreq, 0, sizeof(mreq));
1221 if (copy_from_sockptr(&mreq, optval,
1222 sizeof(struct ip_mreq)))
1223 break;
1224 }
1225
1226 if (optname == IP_ADD_MEMBERSHIP)
1227 err = ip_mc_join_group(sk, &mreq);
1228 else
1229 err = ip_mc_leave_group(sk, &mreq);
1230 break;
1231 }
1232 case IP_MSFILTER:
1233 {
1234 struct ip_msfilter *msf;
1235
1236 if (optlen < IP_MSFILTER_SIZE(0))
1237 goto e_inval;
1238 if (optlen > READ_ONCE(sysctl_optmem_max)) {
1239 err = -ENOBUFS;
1240 break;
1241 }
1242 msf = memdup_sockptr(optval, optlen);
1243 if (IS_ERR(msf)) {
1244 err = PTR_ERR(msf);
1245 break;
1246 }
1247 /* numsrc >= (1G-4) overflow in 32 bits */
1248 if (msf->imsf_numsrc >= 0x3ffffffcU ||
1249 msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
1250 kfree(msf);
1251 err = -ENOBUFS;
1252 break;
1253 }
1254 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
1255 kfree(msf);
1256 err = -EINVAL;
1257 break;
1258 }
1259 err = ip_mc_msfilter(sk, msf, 0);
1260 kfree(msf);
1261 break;
1262 }
1263 case IP_BLOCK_SOURCE:
1264 case IP_UNBLOCK_SOURCE:
1265 case IP_ADD_SOURCE_MEMBERSHIP:
1266 case IP_DROP_SOURCE_MEMBERSHIP:
1267 {
1268 struct ip_mreq_source mreqs;
1269 int omode, add;
1270
1271 if (optlen != sizeof(struct ip_mreq_source))
1272 goto e_inval;
1273 if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) {
1274 err = -EFAULT;
1275 break;
1276 }
1277 if (optname == IP_BLOCK_SOURCE) {
1278 omode = MCAST_EXCLUDE;
1279 add = 1;
1280 } else if (optname == IP_UNBLOCK_SOURCE) {
1281 omode = MCAST_EXCLUDE;
1282 add = 0;
1283 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
1284 struct ip_mreqn mreq;
1285
1286 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
1287 mreq.imr_address.s_addr = mreqs.imr_interface;
1288 mreq.imr_ifindex = 0;
1289 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1290 if (err && err != -EADDRINUSE)
1291 break;
1292 omode = MCAST_INCLUDE;
1293 add = 1;
1294 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
1295 omode = MCAST_INCLUDE;
1296 add = 0;
1297 }
1298 err = ip_mc_source(add, omode, sk, &mreqs, 0);
1299 break;
1300 }
1301 case MCAST_JOIN_GROUP:
1302 case MCAST_LEAVE_GROUP:
1303 if (in_compat_syscall())
1304 err = compat_ip_mcast_join_leave(sk, optname, optval,
1305 optlen);
1306 else
1307 err = ip_mcast_join_leave(sk, optname, optval, optlen);
1308 break;
1309 case MCAST_JOIN_SOURCE_GROUP:
1310 case MCAST_LEAVE_SOURCE_GROUP:
1311 case MCAST_BLOCK_SOURCE:
1312 case MCAST_UNBLOCK_SOURCE:
1313 err = do_mcast_group_source(sk, optname, optval, optlen);
1314 break;
1315 case MCAST_MSFILTER:
1316 if (in_compat_syscall())
1317 err = compat_ip_set_mcast_msfilter(sk, optval, optlen);
1318 else
1319 err = ip_set_mcast_msfilter(sk, optval, optlen);
1320 break;
1321 case IP_MULTICAST_ALL:
1322 if (optlen < 1)
1323 goto e_inval;
1324 if (val != 0 && val != 1)
1325 goto e_inval;
1326 inet->mc_all = val;
1327 break;
1328
1329 case IP_FREEBIND:
1330 if (optlen < 1)
1331 goto e_inval;
1332 inet->freebind = !!val;
1333 break;
1334
1335 case IP_IPSEC_POLICY:
1336 case IP_XFRM_POLICY:
1337 err = -EPERM;
1338 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1339 break;
1340 err = xfrm_user_policy(sk, optname, optval, optlen);
1341 break;
1342
1343 case IP_TRANSPARENT:
1344 if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1345 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1346 err = -EPERM;
1347 break;
1348 }
1349 if (optlen < 1)
1350 goto e_inval;
1351 inet->transparent = !!val;
1352 break;
1353
1354 case IP_MINTTL:
1355 if (optlen < 1)
1356 goto e_inval;
1357 if (val < 0 || val > 255)
1358 goto e_inval;
1359
1360 if (val)
1361 static_branch_enable(&ip4_min_ttl);
1362
1363 /* tcp_v4_err() and tcp_v4_rcv() might read min_ttl
1364 * while we are changint it.
1365 */
1366 WRITE_ONCE(inet->min_ttl, val);
1367 break;
1368
1369 case IP_LOCAL_PORT_RANGE:
1370 {
1371 const __u16 lo = val;
1372 const __u16 hi = val >> 16;
1373
1374 if (optlen != sizeof(__u32))
1375 goto e_inval;
1376 if (lo != 0 && hi != 0 && lo > hi)
1377 goto e_inval;
1378
1379 inet->local_port_range.lo = lo;
1380 inet->local_port_range.hi = hi;
1381 break;
1382 }
1383 default:
1384 err = -ENOPROTOOPT;
1385 break;
1386 }
1387 sockopt_release_sock(sk);
1388 if (needs_rtnl)
1389 rtnl_unlock();
1390 return err;
1391
1392 e_inval:
1393 sockopt_release_sock(sk);
1394 if (needs_rtnl)
1395 rtnl_unlock();
1396 return -EINVAL;
1397 }
1398
1399 /**
1400 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1401 * @sk: socket
1402 * @skb: buffer
1403 *
1404 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1405 * destination in skb->cb[] before dst drop.
1406 * This way, receiver doesn't make cache line misses to read rtable.
1407 */
ipv4_pktinfo_prepare(const struct sock * sk,struct sk_buff * skb)1408 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1409 {
1410 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1411 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1412 ipv6_sk_rxinfo(sk);
1413
1414 if (prepare && skb_rtable(skb)) {
1415 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1416 * which has interface index (iif) as the first member of the
1417 * underlying inet{6}_skb_parm struct. This code then overlays
1418 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1419 * element so the iif is picked up from the prior IPCB. If iif
1420 * is the loopback interface, then return the sending interface
1421 * (e.g., process binds socket to eth0 for Tx which is
1422 * redirected to loopback in the rtable/dst).
1423 */
1424 struct rtable *rt = skb_rtable(skb);
1425 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
1426
1427 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1428 pktinfo->ipi_ifindex = inet_iif(skb);
1429 else if (l3slave && rt && rt->rt_iif)
1430 pktinfo->ipi_ifindex = rt->rt_iif;
1431
1432 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1433 } else {
1434 pktinfo->ipi_ifindex = 0;
1435 pktinfo->ipi_spec_dst.s_addr = 0;
1436 }
1437 skb_dst_drop(skb);
1438 }
1439
ip_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1440 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1441 unsigned int optlen)
1442 {
1443 int err;
1444
1445 if (level != SOL_IP)
1446 return -ENOPROTOOPT;
1447
1448 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1449 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1450 if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
1451 optname < BPFILTER_IPT_SET_MAX)
1452 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
1453 #endif
1454 #ifdef CONFIG_NETFILTER
1455 /* we need to exclude all possible ENOPROTOOPTs except default case */
1456 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1457 optname != IP_IPSEC_POLICY &&
1458 optname != IP_XFRM_POLICY &&
1459 !ip_mroute_opt(optname))
1460 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1461 #endif
1462 return err;
1463 }
1464 EXPORT_SYMBOL(ip_setsockopt);
1465
1466 /*
1467 * Get the options. Note for future reference. The GET of IP options gets
1468 * the _received_ ones. The set sets the _sent_ ones.
1469 */
1470
getsockopt_needs_rtnl(int optname)1471 static bool getsockopt_needs_rtnl(int optname)
1472 {
1473 switch (optname) {
1474 case IP_MSFILTER:
1475 case MCAST_MSFILTER:
1476 return true;
1477 }
1478 return false;
1479 }
1480
ip_get_mcast_msfilter(struct sock * sk,sockptr_t optval,sockptr_t optlen,int len)1481 static int ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1482 sockptr_t optlen, int len)
1483 {
1484 const int size0 = offsetof(struct group_filter, gf_slist_flex);
1485 struct group_filter gsf;
1486 int num, gsf_size;
1487 int err;
1488
1489 if (len < size0)
1490 return -EINVAL;
1491 if (copy_from_sockptr(&gsf, optval, size0))
1492 return -EFAULT;
1493
1494 num = gsf.gf_numsrc;
1495 err = ip_mc_gsfget(sk, &gsf, optval,
1496 offsetof(struct group_filter, gf_slist_flex));
1497 if (err)
1498 return err;
1499 if (gsf.gf_numsrc < num)
1500 num = gsf.gf_numsrc;
1501 gsf_size = GROUP_FILTER_SIZE(num);
1502 if (copy_to_sockptr(optlen, &gsf_size, sizeof(int)) ||
1503 copy_to_sockptr(optval, &gsf, size0))
1504 return -EFAULT;
1505 return 0;
1506 }
1507
compat_ip_get_mcast_msfilter(struct sock * sk,sockptr_t optval,sockptr_t optlen,int len)1508 static int compat_ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval,
1509 sockptr_t optlen, int len)
1510 {
1511 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex);
1512 struct compat_group_filter gf32;
1513 struct group_filter gf;
1514 int num;
1515 int err;
1516
1517 if (len < size0)
1518 return -EINVAL;
1519 if (copy_from_sockptr(&gf32, optval, size0))
1520 return -EFAULT;
1521
1522 gf.gf_interface = gf32.gf_interface;
1523 gf.gf_fmode = gf32.gf_fmode;
1524 num = gf.gf_numsrc = gf32.gf_numsrc;
1525 gf.gf_group = gf32.gf_group;
1526
1527 err = ip_mc_gsfget(sk, &gf, optval,
1528 offsetof(struct compat_group_filter, gf_slist_flex));
1529 if (err)
1530 return err;
1531 if (gf.gf_numsrc < num)
1532 num = gf.gf_numsrc;
1533 len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
1534 if (copy_to_sockptr(optlen, &len, sizeof(int)) ||
1535 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode),
1536 &gf.gf_fmode, sizeof(gf.gf_fmode)) ||
1537 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc),
1538 &gf.gf_numsrc, sizeof(gf.gf_numsrc)))
1539 return -EFAULT;
1540 return 0;
1541 }
1542
do_ip_getsockopt(struct sock * sk,int level,int optname,sockptr_t optval,sockptr_t optlen)1543 int do_ip_getsockopt(struct sock *sk, int level, int optname,
1544 sockptr_t optval, sockptr_t optlen)
1545 {
1546 struct inet_sock *inet = inet_sk(sk);
1547 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1548 int val, err = 0;
1549 int len;
1550
1551 if (level != SOL_IP)
1552 return -EOPNOTSUPP;
1553
1554 if (ip_mroute_opt(optname))
1555 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1556
1557 if (copy_from_sockptr(&len, optlen, sizeof(int)))
1558 return -EFAULT;
1559 if (len < 0)
1560 return -EINVAL;
1561
1562 if (needs_rtnl)
1563 rtnl_lock();
1564 sockopt_lock_sock(sk);
1565
1566 switch (optname) {
1567 case IP_OPTIONS:
1568 {
1569 unsigned char optbuf[sizeof(struct ip_options)+40];
1570 struct ip_options *opt = (struct ip_options *)optbuf;
1571 struct ip_options_rcu *inet_opt;
1572
1573 inet_opt = rcu_dereference_protected(inet->inet_opt,
1574 lockdep_sock_is_held(sk));
1575 opt->optlen = 0;
1576 if (inet_opt)
1577 memcpy(optbuf, &inet_opt->opt,
1578 sizeof(struct ip_options) +
1579 inet_opt->opt.optlen);
1580 sockopt_release_sock(sk);
1581
1582 if (opt->optlen == 0) {
1583 len = 0;
1584 return copy_to_sockptr(optlen, &len, sizeof(int));
1585 }
1586
1587 ip_options_undo(opt);
1588
1589 len = min_t(unsigned int, len, opt->optlen);
1590 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1591 return -EFAULT;
1592 if (copy_to_sockptr(optval, opt->__data, len))
1593 return -EFAULT;
1594 return 0;
1595 }
1596 case IP_PKTINFO:
1597 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1598 break;
1599 case IP_RECVTTL:
1600 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1601 break;
1602 case IP_RECVTOS:
1603 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1604 break;
1605 case IP_RECVOPTS:
1606 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1607 break;
1608 case IP_RETOPTS:
1609 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1610 break;
1611 case IP_PASSSEC:
1612 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1613 break;
1614 case IP_RECVORIGDSTADDR:
1615 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1616 break;
1617 case IP_CHECKSUM:
1618 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1619 break;
1620 case IP_RECVFRAGSIZE:
1621 val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0;
1622 break;
1623 case IP_TOS:
1624 val = inet->tos;
1625 break;
1626 case IP_TTL:
1627 {
1628 struct net *net = sock_net(sk);
1629 val = (inet->uc_ttl == -1 ?
1630 READ_ONCE(net->ipv4.sysctl_ip_default_ttl) :
1631 inet->uc_ttl);
1632 break;
1633 }
1634 case IP_HDRINCL:
1635 val = inet->hdrincl;
1636 break;
1637 case IP_NODEFRAG:
1638 val = inet->nodefrag;
1639 break;
1640 case IP_BIND_ADDRESS_NO_PORT:
1641 val = inet->bind_address_no_port;
1642 break;
1643 case IP_MTU_DISCOVER:
1644 val = inet->pmtudisc;
1645 break;
1646 case IP_MTU:
1647 {
1648 struct dst_entry *dst;
1649 val = 0;
1650 dst = sk_dst_get(sk);
1651 if (dst) {
1652 val = dst_mtu(dst);
1653 dst_release(dst);
1654 }
1655 if (!val) {
1656 sockopt_release_sock(sk);
1657 return -ENOTCONN;
1658 }
1659 break;
1660 }
1661 case IP_RECVERR:
1662 val = inet->recverr;
1663 break;
1664 case IP_RECVERR_RFC4884:
1665 val = inet->recverr_rfc4884;
1666 break;
1667 case IP_MULTICAST_TTL:
1668 val = inet->mc_ttl;
1669 break;
1670 case IP_MULTICAST_LOOP:
1671 val = inet->mc_loop;
1672 break;
1673 case IP_UNICAST_IF:
1674 val = (__force int)htonl((__u32) inet->uc_index);
1675 break;
1676 case IP_MULTICAST_IF:
1677 {
1678 struct in_addr addr;
1679 len = min_t(unsigned int, len, sizeof(struct in_addr));
1680 addr.s_addr = inet->mc_addr;
1681 sockopt_release_sock(sk);
1682
1683 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1684 return -EFAULT;
1685 if (copy_to_sockptr(optval, &addr, len))
1686 return -EFAULT;
1687 return 0;
1688 }
1689 case IP_MSFILTER:
1690 {
1691 struct ip_msfilter msf;
1692
1693 if (len < IP_MSFILTER_SIZE(0)) {
1694 err = -EINVAL;
1695 goto out;
1696 }
1697 if (copy_from_sockptr(&msf, optval, IP_MSFILTER_SIZE(0))) {
1698 err = -EFAULT;
1699 goto out;
1700 }
1701 err = ip_mc_msfget(sk, &msf, optval, optlen);
1702 goto out;
1703 }
1704 case MCAST_MSFILTER:
1705 if (in_compat_syscall())
1706 err = compat_ip_get_mcast_msfilter(sk, optval, optlen,
1707 len);
1708 else
1709 err = ip_get_mcast_msfilter(sk, optval, optlen, len);
1710 goto out;
1711 case IP_MULTICAST_ALL:
1712 val = inet->mc_all;
1713 break;
1714 case IP_PKTOPTIONS:
1715 {
1716 struct msghdr msg;
1717
1718 sockopt_release_sock(sk);
1719
1720 if (sk->sk_type != SOCK_STREAM)
1721 return -ENOPROTOOPT;
1722
1723 if (optval.is_kernel) {
1724 msg.msg_control_is_user = false;
1725 msg.msg_control = optval.kernel;
1726 } else {
1727 msg.msg_control_is_user = true;
1728 msg.msg_control_user = optval.user;
1729 }
1730 msg.msg_controllen = len;
1731 msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
1732
1733 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1734 struct in_pktinfo info;
1735
1736 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1737 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1738 info.ipi_ifindex = inet->mc_index;
1739 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1740 }
1741 if (inet->cmsg_flags & IP_CMSG_TTL) {
1742 int hlim = inet->mc_ttl;
1743 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1744 }
1745 if (inet->cmsg_flags & IP_CMSG_TOS) {
1746 int tos = inet->rcv_tos;
1747 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1748 }
1749 len -= msg.msg_controllen;
1750 return copy_to_sockptr(optlen, &len, sizeof(int));
1751 }
1752 case IP_FREEBIND:
1753 val = inet->freebind;
1754 break;
1755 case IP_TRANSPARENT:
1756 val = inet->transparent;
1757 break;
1758 case IP_MINTTL:
1759 val = inet->min_ttl;
1760 break;
1761 case IP_LOCAL_PORT_RANGE:
1762 val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
1763 break;
1764 default:
1765 sockopt_release_sock(sk);
1766 return -ENOPROTOOPT;
1767 }
1768 sockopt_release_sock(sk);
1769
1770 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1771 unsigned char ucval = (unsigned char)val;
1772 len = 1;
1773 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1774 return -EFAULT;
1775 if (copy_to_sockptr(optval, &ucval, 1))
1776 return -EFAULT;
1777 } else {
1778 len = min_t(unsigned int, sizeof(int), len);
1779 if (copy_to_sockptr(optlen, &len, sizeof(int)))
1780 return -EFAULT;
1781 if (copy_to_sockptr(optval, &val, len))
1782 return -EFAULT;
1783 }
1784 return 0;
1785
1786 out:
1787 sockopt_release_sock(sk);
1788 if (needs_rtnl)
1789 rtnl_unlock();
1790 return err;
1791 }
1792
ip_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1793 int ip_getsockopt(struct sock *sk, int level,
1794 int optname, char __user *optval, int __user *optlen)
1795 {
1796 int err;
1797
1798 err = do_ip_getsockopt(sk, level, optname,
1799 USER_SOCKPTR(optval), USER_SOCKPTR(optlen));
1800
1801 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1802 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1803 optname < BPFILTER_IPT_GET_MAX)
1804 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
1805 #endif
1806 #ifdef CONFIG_NETFILTER
1807 /* we need to exclude all possible ENOPROTOOPTs except default case */
1808 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1809 !ip_mroute_opt(optname)) {
1810 int len;
1811
1812 if (get_user(len, optlen))
1813 return -EFAULT;
1814
1815 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1816 if (err >= 0)
1817 err = put_user(len, optlen);
1818 return err;
1819 }
1820 #endif
1821 return err;
1822 }
1823 EXPORT_SYMBOL(ip_getsockopt);
1824