1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16
17 #include "br_private.h"
18
19 static bool
br_ip4_rports_get_timer(struct net_bridge_mcast_port * pmctx,unsigned long * timer)20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 unsigned long *timer)
22 {
23 *timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 return !hlist_unhashed(&pmctx->ip4_rlist);
25 }
26
27 static bool
br_ip6_rports_get_timer(struct net_bridge_mcast_port * pmctx,unsigned long * timer)28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 unsigned long *timer)
30 {
31 #if IS_ENABLED(CONFIG_IPV6)
32 *timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 return !hlist_unhashed(&pmctx->ip6_rlist);
34 #else
35 *timer = 0;
36 return false;
37 #endif
38 }
39
__br_rports_one_size(void)40 static size_t __br_rports_one_size(void)
41 {
42 return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 nla_total_size(sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
45 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 nla_total_size(sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
48 }
49
br_rports_size(const struct net_bridge_mcast * brmctx)50 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
51 {
52 struct net_bridge_mcast_port *pmctx;
53 size_t size = nla_total_size(0); /* MDBA_ROUTER */
54
55 rcu_read_lock();
56 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
57 ip4_rlist)
58 size += __br_rports_one_size();
59
60 #if IS_ENABLED(CONFIG_IPV6)
61 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
62 ip6_rlist)
63 size += __br_rports_one_size();
64 #endif
65 rcu_read_unlock();
66
67 return size;
68 }
69
br_rports_fill_info(struct sk_buff * skb,const struct net_bridge_mcast * brmctx)70 int br_rports_fill_info(struct sk_buff *skb,
71 const struct net_bridge_mcast *brmctx)
72 {
73 u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 unsigned long ip4_timer, ip6_timer;
76 struct nlattr *nest, *port_nest;
77 struct net_bridge_port *p;
78
79 if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
80 return 0;
81
82 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
83 if (nest == NULL)
84 return -EMSGSIZE;
85
86 list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 struct net_bridge_mcast_port *pmctx;
88
89 if (vid) {
90 struct net_bridge_vlan *v;
91
92 v = br_vlan_find(nbp_vlan_group(p), vid);
93 if (!v)
94 continue;
95 pmctx = &v->port_mcast_ctx;
96 } else {
97 pmctx = &p->multicast_ctx;
98 }
99
100 have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
101 have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
102
103 if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
104 continue;
105
106 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
107 if (!port_nest)
108 goto fail;
109
110 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
111 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
112 max(ip4_timer, ip6_timer)) ||
113 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
114 p->multicast_ctx.multicast_router) ||
115 (have_ip4_mc_rtr &&
116 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
117 ip4_timer)) ||
118 (have_ip6_mc_rtr &&
119 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
120 ip6_timer)) ||
121 (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
122 nla_nest_cancel(skb, port_nest);
123 goto fail;
124 }
125 nla_nest_end(skb, port_nest);
126 }
127
128 nla_nest_end(skb, nest);
129 return 0;
130 fail:
131 nla_nest_cancel(skb, nest);
132 return -EMSGSIZE;
133 }
134
__mdb_entry_fill_flags(struct br_mdb_entry * e,unsigned char flags)135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
136 {
137 e->state = flags & MDB_PG_FLAGS_PERMANENT;
138 e->flags = 0;
139 if (flags & MDB_PG_FLAGS_OFFLOAD)
140 e->flags |= MDB_FLAGS_OFFLOAD;
141 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 e->flags |= MDB_FLAGS_FAST_LEAVE;
143 if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 e->flags |= MDB_FLAGS_STAR_EXCL;
145 if (flags & MDB_PG_FLAGS_BLOCKED)
146 e->flags |= MDB_FLAGS_BLOCKED;
147 }
148
__mdb_entry_to_br_ip(struct br_mdb_entry * entry,struct br_ip * ip,struct nlattr ** mdb_attrs)149 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
150 struct nlattr **mdb_attrs)
151 {
152 memset(ip, 0, sizeof(struct br_ip));
153 ip->vid = entry->vid;
154 ip->proto = entry->addr.proto;
155 switch (ip->proto) {
156 case htons(ETH_P_IP):
157 ip->dst.ip4 = entry->addr.u.ip4;
158 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
159 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
160 break;
161 #if IS_ENABLED(CONFIG_IPV6)
162 case htons(ETH_P_IPV6):
163 ip->dst.ip6 = entry->addr.u.ip6;
164 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
165 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
166 break;
167 #endif
168 default:
169 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
170 }
171
172 }
173
__mdb_fill_srcs(struct sk_buff * skb,struct net_bridge_port_group * p)174 static int __mdb_fill_srcs(struct sk_buff *skb,
175 struct net_bridge_port_group *p)
176 {
177 struct net_bridge_group_src *ent;
178 struct nlattr *nest, *nest_ent;
179
180 if (hlist_empty(&p->src_list))
181 return 0;
182
183 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
184 if (!nest)
185 return -EMSGSIZE;
186
187 hlist_for_each_entry_rcu(ent, &p->src_list, node,
188 lockdep_is_held(&p->key.port->br->multicast_lock)) {
189 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
190 if (!nest_ent)
191 goto out_cancel_err;
192 switch (ent->addr.proto) {
193 case htons(ETH_P_IP):
194 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
195 ent->addr.src.ip4)) {
196 nla_nest_cancel(skb, nest_ent);
197 goto out_cancel_err;
198 }
199 break;
200 #if IS_ENABLED(CONFIG_IPV6)
201 case htons(ETH_P_IPV6):
202 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
203 &ent->addr.src.ip6)) {
204 nla_nest_cancel(skb, nest_ent);
205 goto out_cancel_err;
206 }
207 break;
208 #endif
209 default:
210 nla_nest_cancel(skb, nest_ent);
211 continue;
212 }
213 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
214 br_timer_value(&ent->timer))) {
215 nla_nest_cancel(skb, nest_ent);
216 goto out_cancel_err;
217 }
218 nla_nest_end(skb, nest_ent);
219 }
220
221 nla_nest_end(skb, nest);
222
223 return 0;
224
225 out_cancel_err:
226 nla_nest_cancel(skb, nest);
227 return -EMSGSIZE;
228 }
229
__mdb_fill_info(struct sk_buff * skb,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * p)230 static int __mdb_fill_info(struct sk_buff *skb,
231 struct net_bridge_mdb_entry *mp,
232 struct net_bridge_port_group *p)
233 {
234 bool dump_srcs_mode = false;
235 struct timer_list *mtimer;
236 struct nlattr *nest_ent;
237 struct br_mdb_entry e;
238 u8 flags = 0;
239 int ifindex;
240
241 memset(&e, 0, sizeof(e));
242 if (p) {
243 ifindex = p->key.port->dev->ifindex;
244 mtimer = &p->timer;
245 flags = p->flags;
246 } else {
247 ifindex = mp->br->dev->ifindex;
248 mtimer = &mp->timer;
249 }
250
251 __mdb_entry_fill_flags(&e, flags);
252 e.ifindex = ifindex;
253 e.vid = mp->addr.vid;
254 if (mp->addr.proto == htons(ETH_P_IP)) {
255 e.addr.u.ip4 = mp->addr.dst.ip4;
256 #if IS_ENABLED(CONFIG_IPV6)
257 } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
258 e.addr.u.ip6 = mp->addr.dst.ip6;
259 #endif
260 } else {
261 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
262 e.state = MDB_PERMANENT;
263 }
264 e.addr.proto = mp->addr.proto;
265 nest_ent = nla_nest_start_noflag(skb,
266 MDBA_MDB_ENTRY_INFO);
267 if (!nest_ent)
268 return -EMSGSIZE;
269
270 if (nla_put_nohdr(skb, sizeof(e), &e) ||
271 nla_put_u32(skb,
272 MDBA_MDB_EATTR_TIMER,
273 br_timer_value(mtimer)))
274 goto nest_err;
275
276 switch (mp->addr.proto) {
277 case htons(ETH_P_IP):
278 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
279 if (mp->addr.src.ip4) {
280 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
281 mp->addr.src.ip4))
282 goto nest_err;
283 break;
284 }
285 break;
286 #if IS_ENABLED(CONFIG_IPV6)
287 case htons(ETH_P_IPV6):
288 dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
289 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
290 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
291 &mp->addr.src.ip6))
292 goto nest_err;
293 break;
294 }
295 break;
296 #endif
297 default:
298 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
299 }
300 if (p) {
301 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
302 goto nest_err;
303 if (dump_srcs_mode &&
304 (__mdb_fill_srcs(skb, p) ||
305 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
306 p->filter_mode)))
307 goto nest_err;
308 }
309 nla_nest_end(skb, nest_ent);
310
311 return 0;
312
313 nest_err:
314 nla_nest_cancel(skb, nest_ent);
315 return -EMSGSIZE;
316 }
317
br_mdb_fill_info(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev)318 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
319 struct net_device *dev)
320 {
321 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
322 struct net_bridge *br = netdev_priv(dev);
323 struct net_bridge_mdb_entry *mp;
324 struct nlattr *nest, *nest2;
325
326 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
327 return 0;
328
329 nest = nla_nest_start_noflag(skb, MDBA_MDB);
330 if (nest == NULL)
331 return -EMSGSIZE;
332
333 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
334 struct net_bridge_port_group *p;
335 struct net_bridge_port_group __rcu **pp;
336
337 if (idx < s_idx)
338 goto skip;
339
340 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
341 if (!nest2) {
342 err = -EMSGSIZE;
343 break;
344 }
345
346 if (!s_pidx && mp->host_joined) {
347 err = __mdb_fill_info(skb, mp, NULL);
348 if (err) {
349 nla_nest_cancel(skb, nest2);
350 break;
351 }
352 }
353
354 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
355 pp = &p->next) {
356 if (!p->key.port)
357 continue;
358 if (pidx < s_pidx)
359 goto skip_pg;
360
361 err = __mdb_fill_info(skb, mp, p);
362 if (err) {
363 nla_nest_end(skb, nest2);
364 goto out;
365 }
366 skip_pg:
367 pidx++;
368 }
369 pidx = 0;
370 s_pidx = 0;
371 nla_nest_end(skb, nest2);
372 skip:
373 idx++;
374 }
375
376 out:
377 cb->args[1] = idx;
378 cb->args[2] = pidx;
379 nla_nest_end(skb, nest);
380 return err;
381 }
382
br_mdb_valid_dump_req(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)383 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
384 struct netlink_ext_ack *extack)
385 {
386 struct br_port_msg *bpm;
387
388 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
389 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
390 return -EINVAL;
391 }
392
393 bpm = nlmsg_data(nlh);
394 if (bpm->ifindex) {
395 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
396 return -EINVAL;
397 }
398 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
399 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
400 return -EINVAL;
401 }
402
403 return 0;
404 }
405
br_mdb_dump(struct sk_buff * skb,struct netlink_callback * cb)406 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
407 {
408 struct net_device *dev;
409 struct net *net = sock_net(skb->sk);
410 struct nlmsghdr *nlh = NULL;
411 int idx = 0, s_idx;
412
413 if (cb->strict_check) {
414 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
415
416 if (err < 0)
417 return err;
418 }
419
420 s_idx = cb->args[0];
421
422 rcu_read_lock();
423
424 for_each_netdev_rcu(net, dev) {
425 if (netif_is_bridge_master(dev)) {
426 struct net_bridge *br = netdev_priv(dev);
427 struct br_port_msg *bpm;
428
429 if (idx < s_idx)
430 goto skip;
431
432 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
433 cb->nlh->nlmsg_seq, RTM_GETMDB,
434 sizeof(*bpm), NLM_F_MULTI);
435 if (nlh == NULL)
436 break;
437
438 bpm = nlmsg_data(nlh);
439 memset(bpm, 0, sizeof(*bpm));
440 bpm->ifindex = dev->ifindex;
441 if (br_mdb_fill_info(skb, cb, dev) < 0)
442 goto out;
443 if (br_rports_fill_info(skb, &br->multicast_ctx) < 0)
444 goto out;
445
446 cb->args[1] = 0;
447 nlmsg_end(skb, nlh);
448 skip:
449 idx++;
450 }
451 }
452
453 out:
454 if (nlh)
455 nlmsg_end(skb, nlh);
456 rcu_read_unlock();
457 cb->args[0] = idx;
458 return skb->len;
459 }
460
nlmsg_populate_mdb_fill(struct sk_buff * skb,struct net_device * dev,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,int type)461 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
462 struct net_device *dev,
463 struct net_bridge_mdb_entry *mp,
464 struct net_bridge_port_group *pg,
465 int type)
466 {
467 struct nlmsghdr *nlh;
468 struct br_port_msg *bpm;
469 struct nlattr *nest, *nest2;
470
471 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
472 if (!nlh)
473 return -EMSGSIZE;
474
475 bpm = nlmsg_data(nlh);
476 memset(bpm, 0, sizeof(*bpm));
477 bpm->family = AF_BRIDGE;
478 bpm->ifindex = dev->ifindex;
479 nest = nla_nest_start_noflag(skb, MDBA_MDB);
480 if (nest == NULL)
481 goto cancel;
482 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
483 if (nest2 == NULL)
484 goto end;
485
486 if (__mdb_fill_info(skb, mp, pg))
487 goto end;
488
489 nla_nest_end(skb, nest2);
490 nla_nest_end(skb, nest);
491 nlmsg_end(skb, nlh);
492 return 0;
493
494 end:
495 nla_nest_end(skb, nest);
496 cancel:
497 nlmsg_cancel(skb, nlh);
498 return -EMSGSIZE;
499 }
500
rtnl_mdb_nlmsg_size(struct net_bridge_port_group * pg)501 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
502 {
503 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
504 nla_total_size(sizeof(struct br_mdb_entry)) +
505 nla_total_size(sizeof(u32));
506 struct net_bridge_group_src *ent;
507 size_t addr_size = 0;
508
509 if (!pg)
510 goto out;
511
512 /* MDBA_MDB_EATTR_RTPROT */
513 nlmsg_size += nla_total_size(sizeof(u8));
514
515 switch (pg->key.addr.proto) {
516 case htons(ETH_P_IP):
517 /* MDBA_MDB_EATTR_SOURCE */
518 if (pg->key.addr.src.ip4)
519 nlmsg_size += nla_total_size(sizeof(__be32));
520 if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
521 goto out;
522 addr_size = sizeof(__be32);
523 break;
524 #if IS_ENABLED(CONFIG_IPV6)
525 case htons(ETH_P_IPV6):
526 /* MDBA_MDB_EATTR_SOURCE */
527 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
528 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
529 if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
530 goto out;
531 addr_size = sizeof(struct in6_addr);
532 break;
533 #endif
534 }
535
536 /* MDBA_MDB_EATTR_GROUP_MODE */
537 nlmsg_size += nla_total_size(sizeof(u8));
538
539 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
540 if (!hlist_empty(&pg->src_list))
541 nlmsg_size += nla_total_size(0);
542
543 hlist_for_each_entry(ent, &pg->src_list, node) {
544 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
545 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
546 */
547 nlmsg_size += nla_total_size(0) +
548 nla_total_size(addr_size) +
549 nla_total_size(sizeof(u32));
550 }
551 out:
552 return nlmsg_size;
553 }
554
br_mdb_notify(struct net_device * dev,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,int type)555 void br_mdb_notify(struct net_device *dev,
556 struct net_bridge_mdb_entry *mp,
557 struct net_bridge_port_group *pg,
558 int type)
559 {
560 struct net *net = dev_net(dev);
561 struct sk_buff *skb;
562 int err = -ENOBUFS;
563
564 br_switchdev_mdb_notify(dev, mp, pg, type);
565
566 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
567 if (!skb)
568 goto errout;
569
570 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
571 if (err < 0) {
572 kfree_skb(skb);
573 goto errout;
574 }
575
576 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
577 return;
578 errout:
579 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
580 }
581
nlmsg_populate_rtr_fill(struct sk_buff * skb,struct net_device * dev,int ifindex,u16 vid,u32 pid,u32 seq,int type,unsigned int flags)582 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
583 struct net_device *dev,
584 int ifindex, u16 vid, u32 pid,
585 u32 seq, int type, unsigned int flags)
586 {
587 struct nlattr *nest, *port_nest;
588 struct br_port_msg *bpm;
589 struct nlmsghdr *nlh;
590
591 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
592 if (!nlh)
593 return -EMSGSIZE;
594
595 bpm = nlmsg_data(nlh);
596 memset(bpm, 0, sizeof(*bpm));
597 bpm->family = AF_BRIDGE;
598 bpm->ifindex = dev->ifindex;
599 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
600 if (!nest)
601 goto cancel;
602
603 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
604 if (!port_nest)
605 goto end;
606 if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
607 nla_nest_cancel(skb, port_nest);
608 goto end;
609 }
610 if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
611 nla_nest_cancel(skb, port_nest);
612 goto end;
613 }
614 nla_nest_end(skb, port_nest);
615
616 nla_nest_end(skb, nest);
617 nlmsg_end(skb, nlh);
618 return 0;
619
620 end:
621 nla_nest_end(skb, nest);
622 cancel:
623 nlmsg_cancel(skb, nlh);
624 return -EMSGSIZE;
625 }
626
rtnl_rtr_nlmsg_size(void)627 static inline size_t rtnl_rtr_nlmsg_size(void)
628 {
629 return NLMSG_ALIGN(sizeof(struct br_port_msg))
630 + nla_total_size(sizeof(__u32))
631 + nla_total_size(sizeof(u16));
632 }
633
br_rtr_notify(struct net_device * dev,struct net_bridge_mcast_port * pmctx,int type)634 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
635 int type)
636 {
637 struct net *net = dev_net(dev);
638 struct sk_buff *skb;
639 int err = -ENOBUFS;
640 int ifindex;
641 u16 vid;
642
643 ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
644 vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
645 0;
646 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
647 if (!skb)
648 goto errout;
649
650 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
651 NTF_SELF);
652 if (err < 0) {
653 kfree_skb(skb);
654 goto errout;
655 }
656
657 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
658 return;
659
660 errout:
661 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
662 }
663
664 static const struct nla_policy
665 br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
666 [MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
667 sizeof(struct in_addr),
668 sizeof(struct in6_addr)),
669 };
670
671 static const struct nla_policy
672 br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
673 [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
674 };
675
676 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
677 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
678 sizeof(struct in_addr),
679 sizeof(struct in6_addr)),
680 [MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
681 MCAST_INCLUDE),
682 [MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
683 [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
684 };
685
validate_mdb_entry(const struct nlattr * attr,struct netlink_ext_ack * extack)686 static int validate_mdb_entry(const struct nlattr *attr,
687 struct netlink_ext_ack *extack)
688 {
689 struct br_mdb_entry *entry = nla_data(attr);
690
691 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
692 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
693 return -EINVAL;
694 }
695
696 if (entry->ifindex == 0) {
697 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
698 return -EINVAL;
699 }
700
701 if (entry->addr.proto == htons(ETH_P_IP)) {
702 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
703 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
704 return -EINVAL;
705 }
706 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
707 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
708 return -EINVAL;
709 }
710 #if IS_ENABLED(CONFIG_IPV6)
711 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
712 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
713 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
714 return -EINVAL;
715 }
716 #endif
717 } else if (entry->addr.proto == 0) {
718 /* L2 mdb */
719 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
720 NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
721 return -EINVAL;
722 }
723 } else {
724 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
725 return -EINVAL;
726 }
727
728 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
729 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
730 return -EINVAL;
731 }
732 if (entry->vid >= VLAN_VID_MASK) {
733 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
734 return -EINVAL;
735 }
736
737 return 0;
738 }
739
is_valid_mdb_source(struct nlattr * attr,__be16 proto,struct netlink_ext_ack * extack)740 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
741 struct netlink_ext_ack *extack)
742 {
743 switch (proto) {
744 case htons(ETH_P_IP):
745 if (nla_len(attr) != sizeof(struct in_addr)) {
746 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
747 return false;
748 }
749 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
750 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
751 return false;
752 }
753 break;
754 #if IS_ENABLED(CONFIG_IPV6)
755 case htons(ETH_P_IPV6): {
756 struct in6_addr src;
757
758 if (nla_len(attr) != sizeof(struct in6_addr)) {
759 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
760 return false;
761 }
762 src = nla_get_in6_addr(attr);
763 if (ipv6_addr_is_multicast(&src)) {
764 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
765 return false;
766 }
767 break;
768 }
769 #endif
770 default:
771 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
772 return false;
773 }
774
775 return true;
776 }
777
778 static struct net_bridge_mcast *
__br_mdb_choose_context(struct net_bridge * br,const struct br_mdb_entry * entry,struct netlink_ext_ack * extack)779 __br_mdb_choose_context(struct net_bridge *br,
780 const struct br_mdb_entry *entry,
781 struct netlink_ext_ack *extack)
782 {
783 struct net_bridge_mcast *brmctx = NULL;
784 struct net_bridge_vlan *v;
785
786 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
787 brmctx = &br->multicast_ctx;
788 goto out;
789 }
790
791 if (!entry->vid) {
792 NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
793 goto out;
794 }
795
796 v = br_vlan_find(br_vlan_group(br), entry->vid);
797 if (!v) {
798 NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
799 goto out;
800 }
801 if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
802 NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
803 goto out;
804 }
805 brmctx = &v->br_mcast_ctx;
806 out:
807 return brmctx;
808 }
809
br_mdb_replace_group_sg(const struct br_mdb_config * cfg,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,struct net_bridge_mcast * brmctx,unsigned char flags)810 static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
811 struct net_bridge_mdb_entry *mp,
812 struct net_bridge_port_group *pg,
813 struct net_bridge_mcast *brmctx,
814 unsigned char flags)
815 {
816 unsigned long now = jiffies;
817
818 pg->flags = flags;
819 pg->rt_protocol = cfg->rt_protocol;
820 if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
821 mod_timer(&pg->timer,
822 now + brmctx->multicast_membership_interval);
823 else
824 del_timer(&pg->timer);
825
826 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
827
828 return 0;
829 }
830
br_mdb_add_group_sg(const struct br_mdb_config * cfg,struct net_bridge_mdb_entry * mp,struct net_bridge_mcast * brmctx,unsigned char flags,struct netlink_ext_ack * extack)831 static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
832 struct net_bridge_mdb_entry *mp,
833 struct net_bridge_mcast *brmctx,
834 unsigned char flags,
835 struct netlink_ext_ack *extack)
836 {
837 struct net_bridge_port_group __rcu **pp;
838 struct net_bridge_port_group *p;
839 unsigned long now = jiffies;
840
841 for (pp = &mp->ports;
842 (p = mlock_dereference(*pp, cfg->br)) != NULL;
843 pp = &p->next) {
844 if (p->key.port == cfg->p) {
845 if (!(cfg->nlflags & NLM_F_REPLACE)) {
846 NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
847 return -EEXIST;
848 }
849 return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
850 flags);
851 }
852 if ((unsigned long)p->key.port < (unsigned long)cfg->p)
853 break;
854 }
855
856 p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
857 MCAST_INCLUDE, cfg->rt_protocol, extack);
858 if (unlikely(!p))
859 return -ENOMEM;
860
861 rcu_assign_pointer(*pp, p);
862 if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
863 mod_timer(&p->timer,
864 now + brmctx->multicast_membership_interval);
865 br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
866
867 /* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
868 * proper replication.
869 */
870 if (br_multicast_should_handle_mode(brmctx, cfg->group.proto)) {
871 struct net_bridge_mdb_entry *star_mp;
872 struct br_ip star_group;
873
874 star_group = p->key.addr;
875 memset(&star_group.src, 0, sizeof(star_group.src));
876 star_mp = br_mdb_ip_get(cfg->br, &star_group);
877 if (star_mp)
878 br_multicast_sg_add_exclude_ports(star_mp, p);
879 }
880
881 return 0;
882 }
883
br_mdb_add_group_src_fwd(const struct br_mdb_config * cfg,struct br_ip * src_ip,struct net_bridge_mcast * brmctx,struct netlink_ext_ack * extack)884 static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
885 struct br_ip *src_ip,
886 struct net_bridge_mcast *brmctx,
887 struct netlink_ext_ack *extack)
888 {
889 struct net_bridge_mdb_entry *sgmp;
890 struct br_mdb_config sg_cfg;
891 struct br_ip sg_ip;
892 u8 flags = 0;
893
894 sg_ip = cfg->group;
895 sg_ip.src = src_ip->src;
896 sgmp = br_multicast_new_group(cfg->br, &sg_ip);
897 if (IS_ERR(sgmp)) {
898 NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
899 return PTR_ERR(sgmp);
900 }
901
902 if (cfg->entry->state == MDB_PERMANENT)
903 flags |= MDB_PG_FLAGS_PERMANENT;
904 if (cfg->filter_mode == MCAST_EXCLUDE)
905 flags |= MDB_PG_FLAGS_BLOCKED;
906
907 memset(&sg_cfg, 0, sizeof(sg_cfg));
908 sg_cfg.br = cfg->br;
909 sg_cfg.p = cfg->p;
910 sg_cfg.entry = cfg->entry;
911 sg_cfg.group = sg_ip;
912 sg_cfg.src_entry = true;
913 sg_cfg.filter_mode = MCAST_INCLUDE;
914 sg_cfg.rt_protocol = cfg->rt_protocol;
915 sg_cfg.nlflags = cfg->nlflags;
916 return br_mdb_add_group_sg(&sg_cfg, sgmp, brmctx, flags, extack);
917 }
918
br_mdb_add_group_src(const struct br_mdb_config * cfg,struct net_bridge_port_group * pg,struct net_bridge_mcast * brmctx,struct br_mdb_src_entry * src,struct netlink_ext_ack * extack)919 static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
920 struct net_bridge_port_group *pg,
921 struct net_bridge_mcast *brmctx,
922 struct br_mdb_src_entry *src,
923 struct netlink_ext_ack *extack)
924 {
925 struct net_bridge_group_src *ent;
926 unsigned long now = jiffies;
927 int err;
928
929 ent = br_multicast_find_group_src(pg, &src->addr);
930 if (!ent) {
931 ent = br_multicast_new_group_src(pg, &src->addr);
932 if (!ent) {
933 NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
934 return -ENOSPC;
935 }
936 } else if (!(cfg->nlflags & NLM_F_REPLACE)) {
937 NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
938 return -EEXIST;
939 }
940
941 if (cfg->filter_mode == MCAST_INCLUDE &&
942 cfg->entry->state == MDB_TEMPORARY)
943 mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
944 else
945 del_timer(&ent->timer);
946
947 /* Install a (S, G) forwarding entry for the source. */
948 err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
949 if (err)
950 goto err_del_sg;
951
952 ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
953
954 return 0;
955
956 err_del_sg:
957 __br_multicast_del_group_src(ent);
958 return err;
959 }
960
br_mdb_del_group_src(struct net_bridge_port_group * pg,struct br_mdb_src_entry * src)961 static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
962 struct br_mdb_src_entry *src)
963 {
964 struct net_bridge_group_src *ent;
965
966 ent = br_multicast_find_group_src(pg, &src->addr);
967 if (WARN_ON_ONCE(!ent))
968 return;
969 br_multicast_del_group_src(ent, false);
970 }
971
br_mdb_add_group_srcs(const struct br_mdb_config * cfg,struct net_bridge_port_group * pg,struct net_bridge_mcast * brmctx,struct netlink_ext_ack * extack)972 static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
973 struct net_bridge_port_group *pg,
974 struct net_bridge_mcast *brmctx,
975 struct netlink_ext_ack *extack)
976 {
977 int i, err;
978
979 for (i = 0; i < cfg->num_src_entries; i++) {
980 err = br_mdb_add_group_src(cfg, pg, brmctx,
981 &cfg->src_entries[i], extack);
982 if (err)
983 goto err_del_group_srcs;
984 }
985
986 return 0;
987
988 err_del_group_srcs:
989 for (i--; i >= 0; i--)
990 br_mdb_del_group_src(pg, &cfg->src_entries[i]);
991 return err;
992 }
993
br_mdb_replace_group_srcs(const struct br_mdb_config * cfg,struct net_bridge_port_group * pg,struct net_bridge_mcast * brmctx,struct netlink_ext_ack * extack)994 static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
995 struct net_bridge_port_group *pg,
996 struct net_bridge_mcast *brmctx,
997 struct netlink_ext_ack *extack)
998 {
999 struct net_bridge_group_src *ent;
1000 struct hlist_node *tmp;
1001 int err;
1002
1003 hlist_for_each_entry(ent, &pg->src_list, node)
1004 ent->flags |= BR_SGRP_F_DELETE;
1005
1006 err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
1007 if (err)
1008 goto err_clear_delete;
1009
1010 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
1011 if (ent->flags & BR_SGRP_F_DELETE)
1012 br_multicast_del_group_src(ent, false);
1013 }
1014
1015 return 0;
1016
1017 err_clear_delete:
1018 hlist_for_each_entry(ent, &pg->src_list, node)
1019 ent->flags &= ~BR_SGRP_F_DELETE;
1020 return err;
1021 }
1022
br_mdb_replace_group_star_g(const struct br_mdb_config * cfg,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,struct net_bridge_mcast * brmctx,unsigned char flags,struct netlink_ext_ack * extack)1023 static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
1024 struct net_bridge_mdb_entry *mp,
1025 struct net_bridge_port_group *pg,
1026 struct net_bridge_mcast *brmctx,
1027 unsigned char flags,
1028 struct netlink_ext_ack *extack)
1029 {
1030 unsigned long now = jiffies;
1031 int err;
1032
1033 err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
1034 if (err)
1035 return err;
1036
1037 pg->flags = flags;
1038 pg->filter_mode = cfg->filter_mode;
1039 pg->rt_protocol = cfg->rt_protocol;
1040 if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
1041 cfg->filter_mode == MCAST_EXCLUDE)
1042 mod_timer(&pg->timer,
1043 now + brmctx->multicast_membership_interval);
1044 else
1045 del_timer(&pg->timer);
1046
1047 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
1048
1049 if (br_multicast_should_handle_mode(brmctx, cfg->group.proto))
1050 br_multicast_star_g_handle_mode(pg, cfg->filter_mode);
1051
1052 return 0;
1053 }
1054
br_mdb_add_group_star_g(const struct br_mdb_config * cfg,struct net_bridge_mdb_entry * mp,struct net_bridge_mcast * brmctx,unsigned char flags,struct netlink_ext_ack * extack)1055 static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
1056 struct net_bridge_mdb_entry *mp,
1057 struct net_bridge_mcast *brmctx,
1058 unsigned char flags,
1059 struct netlink_ext_ack *extack)
1060 {
1061 struct net_bridge_port_group __rcu **pp;
1062 struct net_bridge_port_group *p;
1063 unsigned long now = jiffies;
1064 int err;
1065
1066 for (pp = &mp->ports;
1067 (p = mlock_dereference(*pp, cfg->br)) != NULL;
1068 pp = &p->next) {
1069 if (p->key.port == cfg->p) {
1070 if (!(cfg->nlflags & NLM_F_REPLACE)) {
1071 NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
1072 return -EEXIST;
1073 }
1074 return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
1075 flags, extack);
1076 }
1077 if ((unsigned long)p->key.port < (unsigned long)cfg->p)
1078 break;
1079 }
1080
1081 p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
1082 cfg->filter_mode, cfg->rt_protocol,
1083 extack);
1084 if (unlikely(!p))
1085 return -ENOMEM;
1086
1087 err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
1088 if (err)
1089 goto err_del_port_group;
1090
1091 rcu_assign_pointer(*pp, p);
1092 if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
1093 cfg->filter_mode == MCAST_EXCLUDE)
1094 mod_timer(&p->timer,
1095 now + brmctx->multicast_membership_interval);
1096 br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
1097 /* If we are adding a new EXCLUDE port group (*, G), it needs to be
1098 * also added to all (S, G) entries for proper replication.
1099 */
1100 if (br_multicast_should_handle_mode(brmctx, cfg->group.proto) &&
1101 cfg->filter_mode == MCAST_EXCLUDE)
1102 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1103
1104 return 0;
1105
1106 err_del_port_group:
1107 br_multicast_del_port_group(p);
1108 return err;
1109 }
1110
br_mdb_add_group(const struct br_mdb_config * cfg,struct netlink_ext_ack * extack)1111 static int br_mdb_add_group(const struct br_mdb_config *cfg,
1112 struct netlink_ext_ack *extack)
1113 {
1114 struct br_mdb_entry *entry = cfg->entry;
1115 struct net_bridge_port *port = cfg->p;
1116 struct net_bridge_mdb_entry *mp;
1117 struct net_bridge *br = cfg->br;
1118 struct net_bridge_mcast *brmctx;
1119 struct br_ip group = cfg->group;
1120 unsigned char flags = 0;
1121
1122 brmctx = __br_mdb_choose_context(br, entry, extack);
1123 if (!brmctx)
1124 return -EINVAL;
1125
1126 mp = br_multicast_new_group(br, &group);
1127 if (IS_ERR(mp))
1128 return PTR_ERR(mp);
1129
1130 /* host join */
1131 if (!port) {
1132 if (mp->host_joined) {
1133 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1134 return -EEXIST;
1135 }
1136
1137 br_multicast_host_join(brmctx, mp, false);
1138 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1139
1140 return 0;
1141 }
1142
1143 if (entry->state == MDB_PERMANENT)
1144 flags |= MDB_PG_FLAGS_PERMANENT;
1145
1146 if (br_multicast_is_star_g(&group))
1147 return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
1148 else
1149 return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
1150 }
1151
__br_mdb_add(const struct br_mdb_config * cfg,struct netlink_ext_ack * extack)1152 static int __br_mdb_add(const struct br_mdb_config *cfg,
1153 struct netlink_ext_ack *extack)
1154 {
1155 int ret;
1156
1157 spin_lock_bh(&cfg->br->multicast_lock);
1158 ret = br_mdb_add_group(cfg, extack);
1159 spin_unlock_bh(&cfg->br->multicast_lock);
1160
1161 return ret;
1162 }
1163
br_mdb_config_src_entry_init(struct nlattr * src_entry,struct br_mdb_src_entry * src,__be16 proto,struct netlink_ext_ack * extack)1164 static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
1165 struct br_mdb_src_entry *src,
1166 __be16 proto,
1167 struct netlink_ext_ack *extack)
1168 {
1169 struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
1170 int err;
1171
1172 err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
1173 br_mdbe_src_list_entry_pol, extack);
1174 if (err)
1175 return err;
1176
1177 if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
1178 return -EINVAL;
1179
1180 if (!is_valid_mdb_source(tb[MDBE_SRCATTR_ADDRESS], proto, extack))
1181 return -EINVAL;
1182
1183 src->addr.proto = proto;
1184 nla_memcpy(&src->addr.src, tb[MDBE_SRCATTR_ADDRESS],
1185 nla_len(tb[MDBE_SRCATTR_ADDRESS]));
1186
1187 return 0;
1188 }
1189
br_mdb_config_src_list_init(struct nlattr * src_list,struct br_mdb_config * cfg,struct netlink_ext_ack * extack)1190 static int br_mdb_config_src_list_init(struct nlattr *src_list,
1191 struct br_mdb_config *cfg,
1192 struct netlink_ext_ack *extack)
1193 {
1194 struct nlattr *src_entry;
1195 int rem, err;
1196 int i = 0;
1197
1198 nla_for_each_nested(src_entry, src_list, rem)
1199 cfg->num_src_entries++;
1200
1201 if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
1202 NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
1203 PG_SRC_ENT_LIMIT - 1);
1204 return -EINVAL;
1205 }
1206
1207 cfg->src_entries = kcalloc(cfg->num_src_entries,
1208 sizeof(struct br_mdb_src_entry), GFP_KERNEL);
1209 if (!cfg->src_entries)
1210 return -ENOMEM;
1211
1212 nla_for_each_nested(src_entry, src_list, rem) {
1213 err = br_mdb_config_src_entry_init(src_entry,
1214 &cfg->src_entries[i],
1215 cfg->entry->addr.proto,
1216 extack);
1217 if (err)
1218 goto err_src_entry_init;
1219 i++;
1220 }
1221
1222 return 0;
1223
1224 err_src_entry_init:
1225 kfree(cfg->src_entries);
1226 return err;
1227 }
1228
br_mdb_config_src_list_fini(struct br_mdb_config * cfg)1229 static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
1230 {
1231 kfree(cfg->src_entries);
1232 }
1233
br_mdb_config_attrs_init(struct nlattr * set_attrs,struct br_mdb_config * cfg,struct netlink_ext_ack * extack)1234 static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
1235 struct br_mdb_config *cfg,
1236 struct netlink_ext_ack *extack)
1237 {
1238 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1239 int err;
1240
1241 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX, set_attrs,
1242 br_mdbe_attrs_pol, extack);
1243 if (err)
1244 return err;
1245
1246 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1247 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1248 cfg->entry->addr.proto, extack))
1249 return -EINVAL;
1250
1251 __mdb_entry_to_br_ip(cfg->entry, &cfg->group, mdb_attrs);
1252
1253 if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1254 if (!cfg->p) {
1255 NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
1256 return -EINVAL;
1257 }
1258 if (!br_multicast_is_star_g(&cfg->group)) {
1259 NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
1260 return -EINVAL;
1261 }
1262 cfg->filter_mode = nla_get_u8(mdb_attrs[MDBE_ATTR_GROUP_MODE]);
1263 } else {
1264 cfg->filter_mode = MCAST_EXCLUDE;
1265 }
1266
1267 if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
1268 if (!cfg->p) {
1269 NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
1270 return -EINVAL;
1271 }
1272 if (!br_multicast_is_star_g(&cfg->group)) {
1273 NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
1274 return -EINVAL;
1275 }
1276 if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1277 NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
1278 return -EINVAL;
1279 }
1280 err = br_mdb_config_src_list_init(mdb_attrs[MDBE_ATTR_SRC_LIST],
1281 cfg, extack);
1282 if (err)
1283 return err;
1284 }
1285
1286 if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
1287 NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
1288 return -EINVAL;
1289 }
1290
1291 if (mdb_attrs[MDBE_ATTR_RTPROT]) {
1292 if (!cfg->p) {
1293 NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
1294 return -EINVAL;
1295 }
1296 cfg->rt_protocol = nla_get_u8(mdb_attrs[MDBE_ATTR_RTPROT]);
1297 }
1298
1299 return 0;
1300 }
1301
1302 static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
1303 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
1304 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
1305 validate_mdb_entry,
1306 sizeof(struct br_mdb_entry)),
1307 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
1308 };
1309
br_mdb_config_init(struct net * net,const struct nlmsghdr * nlh,struct br_mdb_config * cfg,struct netlink_ext_ack * extack)1310 static int br_mdb_config_init(struct net *net, const struct nlmsghdr *nlh,
1311 struct br_mdb_config *cfg,
1312 struct netlink_ext_ack *extack)
1313 {
1314 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
1315 struct br_port_msg *bpm;
1316 struct net_device *dev;
1317 int err;
1318
1319 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
1320 MDBA_SET_ENTRY_MAX, mdba_policy, extack);
1321 if (err)
1322 return err;
1323
1324 memset(cfg, 0, sizeof(*cfg));
1325 cfg->filter_mode = MCAST_EXCLUDE;
1326 cfg->rt_protocol = RTPROT_STATIC;
1327 cfg->nlflags = nlh->nlmsg_flags;
1328
1329 bpm = nlmsg_data(nlh);
1330 if (!bpm->ifindex) {
1331 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
1332 return -EINVAL;
1333 }
1334
1335 dev = __dev_get_by_index(net, bpm->ifindex);
1336 if (!dev) {
1337 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
1338 return -ENODEV;
1339 }
1340
1341 if (!netif_is_bridge_master(dev)) {
1342 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
1343 return -EOPNOTSUPP;
1344 }
1345
1346 cfg->br = netdev_priv(dev);
1347
1348 if (!netif_running(cfg->br->dev)) {
1349 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1350 return -EINVAL;
1351 }
1352
1353 if (!br_opt_get(cfg->br, BROPT_MULTICAST_ENABLED)) {
1354 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1355 return -EINVAL;
1356 }
1357
1358 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
1359 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
1360 return -EINVAL;
1361 }
1362
1363 cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
1364
1365 if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
1366 struct net_device *pdev;
1367
1368 pdev = __dev_get_by_index(net, cfg->entry->ifindex);
1369 if (!pdev) {
1370 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1371 return -ENODEV;
1372 }
1373
1374 cfg->p = br_port_get_rtnl(pdev);
1375 if (!cfg->p) {
1376 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1377 return -EINVAL;
1378 }
1379
1380 if (cfg->p->br != cfg->br) {
1381 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1382 return -EINVAL;
1383 }
1384 }
1385
1386 if (tb[MDBA_SET_ENTRY_ATTRS])
1387 return br_mdb_config_attrs_init(tb[MDBA_SET_ENTRY_ATTRS], cfg,
1388 extack);
1389 else
1390 __mdb_entry_to_br_ip(cfg->entry, &cfg->group, NULL);
1391
1392 return 0;
1393 }
1394
br_mdb_config_fini(struct br_mdb_config * cfg)1395 static void br_mdb_config_fini(struct br_mdb_config *cfg)
1396 {
1397 br_mdb_config_src_list_fini(cfg);
1398 }
1399
br_mdb_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1400 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1401 struct netlink_ext_ack *extack)
1402 {
1403 struct net *net = sock_net(skb->sk);
1404 struct net_bridge_vlan_group *vg;
1405 struct net_bridge_vlan *v;
1406 struct br_mdb_config cfg;
1407 int err;
1408
1409 err = br_mdb_config_init(net, nlh, &cfg, extack);
1410 if (err)
1411 return err;
1412
1413 err = -EINVAL;
1414 /* host join errors which can happen before creating the group */
1415 if (!cfg.p && !br_group_is_l2(&cfg.group)) {
1416 /* don't allow any flags for host-joined IP groups */
1417 if (cfg.entry->state) {
1418 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1419 goto out;
1420 }
1421 if (!br_multicast_is_star_g(&cfg.group)) {
1422 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1423 goto out;
1424 }
1425 }
1426
1427 if (br_group_is_l2(&cfg.group) && cfg.entry->state != MDB_PERMANENT) {
1428 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1429 goto out;
1430 }
1431
1432 if (cfg.p) {
1433 if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
1434 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
1435 goto out;
1436 }
1437 vg = nbp_vlan_group(cfg.p);
1438 } else {
1439 vg = br_vlan_group(cfg.br);
1440 }
1441
1442 /* If vlan filtering is enabled and VLAN is not specified
1443 * install mdb entry on all vlans configured on the port.
1444 */
1445 if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1446 list_for_each_entry(v, &vg->vlan_list, vlist) {
1447 cfg.entry->vid = v->vid;
1448 cfg.group.vid = v->vid;
1449 err = __br_mdb_add(&cfg, extack);
1450 if (err)
1451 break;
1452 }
1453 } else {
1454 err = __br_mdb_add(&cfg, extack);
1455 }
1456
1457 out:
1458 br_mdb_config_fini(&cfg);
1459 return err;
1460 }
1461
__br_mdb_del(const struct br_mdb_config * cfg)1462 static int __br_mdb_del(const struct br_mdb_config *cfg)
1463 {
1464 struct br_mdb_entry *entry = cfg->entry;
1465 struct net_bridge *br = cfg->br;
1466 struct net_bridge_mdb_entry *mp;
1467 struct net_bridge_port_group *p;
1468 struct net_bridge_port_group __rcu **pp;
1469 struct br_ip ip = cfg->group;
1470 int err = -EINVAL;
1471
1472 spin_lock_bh(&br->multicast_lock);
1473 mp = br_mdb_ip_get(br, &ip);
1474 if (!mp)
1475 goto unlock;
1476
1477 /* host leave */
1478 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1479 br_multicast_host_leave(mp, false);
1480 err = 0;
1481 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1482 if (!mp->ports && netif_running(br->dev))
1483 mod_timer(&mp->timer, jiffies);
1484 goto unlock;
1485 }
1486
1487 for (pp = &mp->ports;
1488 (p = mlock_dereference(*pp, br)) != NULL;
1489 pp = &p->next) {
1490 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1491 continue;
1492
1493 br_multicast_del_pg(mp, p, pp);
1494 err = 0;
1495 break;
1496 }
1497
1498 unlock:
1499 spin_unlock_bh(&br->multicast_lock);
1500 return err;
1501 }
1502
br_mdb_del(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1503 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1504 struct netlink_ext_ack *extack)
1505 {
1506 struct net *net = sock_net(skb->sk);
1507 struct net_bridge_vlan_group *vg;
1508 struct net_bridge_vlan *v;
1509 struct br_mdb_config cfg;
1510 int err;
1511
1512 err = br_mdb_config_init(net, nlh, &cfg, extack);
1513 if (err)
1514 return err;
1515
1516 if (cfg.p)
1517 vg = nbp_vlan_group(cfg.p);
1518 else
1519 vg = br_vlan_group(cfg.br);
1520
1521 /* If vlan filtering is enabled and VLAN is not specified
1522 * delete mdb entry on all vlans configured on the port.
1523 */
1524 if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1525 list_for_each_entry(v, &vg->vlan_list, vlist) {
1526 cfg.entry->vid = v->vid;
1527 cfg.group.vid = v->vid;
1528 err = __br_mdb_del(&cfg);
1529 }
1530 } else {
1531 err = __br_mdb_del(&cfg);
1532 }
1533
1534 br_mdb_config_fini(&cfg);
1535 return err;
1536 }
1537
br_mdb_init(void)1538 void br_mdb_init(void)
1539 {
1540 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1541 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1542 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1543 }
1544
br_mdb_uninit(void)1545 void br_mdb_uninit(void)
1546 {
1547 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1548 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1549 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1550 }
1551