Lines Matching refs:idev

77 static void mld_ifc_event(struct inet6_dev *idev);
78 static bool mld_in_v1_mode(const struct inet6_dev *idev);
82 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
85 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
89 struct inet6_dev *idev);
111 #define mc_dereference(e, idev) \ argument
112 rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
128 for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
130 psf = mc_dereference(psf->sf_next, mc->idev))
138 for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
140 psf = mc_dereference(psf->sf_next, mc->idev))
142 #define for_each_mc_mclock(idev, mc) \ argument
143 for (mc = mc_dereference((idev)->mc_list, idev); \
145 mc = mc_dereference(mc->next, idev))
147 #define for_each_mc_rcu(idev, mc) \ argument
148 for (mc = rcu_dereference((idev)->mc_list); \
152 #define for_each_mc_tomb(idev, mc) \ argument
153 for (mc = mc_dereference((idev)->mc_tomb, idev); \
155 mc = mc_dereference(mc->next, idev))
157 static int unsolicited_report_interval(struct inet6_dev *idev) in unsolicited_report_interval() argument
161 if (mld_in_v1_mode(idev)) in unsolicited_report_interval()
162 iv = idev->cnf.mldv1_unsolicited_report_interval; in unsolicited_report_interval()
164 iv = idev->cnf.mldv2_unsolicited_report_interval; in unsolicited_report_interval()
271 struct inet6_dev *idev = __in6_dev_get(dev); in ipv6_sock_mc_drop() local
273 ip6_mc_leave_src(sk, mc_lst, idev); in ipv6_sock_mc_drop()
274 if (idev) in ipv6_sock_mc_drop()
275 __ipv6_dev_mc_dec(idev, &mc_lst->addr); in ipv6_sock_mc_drop()
295 struct inet6_dev *idev = NULL; in ip6_mc_find_dev_rtnl() local
310 idev = __in6_dev_get(dev); in ip6_mc_find_dev_rtnl()
311 if (!idev) in ip6_mc_find_dev_rtnl()
313 if (idev->dead) in ip6_mc_find_dev_rtnl()
315 return idev; in ip6_mc_find_dev_rtnl()
333 struct inet6_dev *idev = __in6_dev_get(dev); in __ipv6_sock_mc_close() local
335 ip6_mc_leave_src(sk, mc_lst, idev); in __ipv6_sock_mc_close()
336 if (idev) in __ipv6_sock_mc_close()
337 __ipv6_dev_mc_dec(idev, &mc_lst->addr); in __ipv6_sock_mc_close()
366 struct inet6_dev *idev; in ip6_mc_source() local
380 idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface); in ip6_mc_source()
381 if (!idev) in ip6_mc_source()
386 mutex_lock(&idev->mc_lock); in ip6_mc_source()
405 ip6_mc_add_src(idev, group, omode, 0, NULL, 0); in ip6_mc_source()
406 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); in ip6_mc_source()
430 ip6_mc_del_src(idev, group, omode, 1, source, 1); in ip6_mc_source()
480 ip6_mc_add_src(idev, group, omode, 1, source, 1); in ip6_mc_source()
482 mutex_unlock(&idev->mc_lock); in ip6_mc_source()
493 struct inet6_dev *idev; in ip6_mc_msfilter() local
508 idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface); in ip6_mc_msfilter()
509 if (!idev) in ip6_mc_msfilter()
544 mutex_lock(&idev->mc_lock); in ip6_mc_msfilter()
545 err = ip6_mc_add_src(idev, group, gsf->gf_fmode, in ip6_mc_msfilter()
548 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
553 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
556 mutex_lock(&idev->mc_lock); in ip6_mc_msfilter()
557 ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); in ip6_mc_msfilter()
558 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
561 mutex_lock(&idev->mc_lock); in ip6_mc_msfilter()
564 ip6_mc_del_src(idev, group, pmc->sfmode, in ip6_mc_msfilter()
569 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); in ip6_mc_msfilter()
572 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
670 struct net_device *dev = mc->idev->dev; in igmp6_group_added()
686 if (mld_in_v1_mode(mc->idev)) { in igmp6_group_added()
697 mc->mca_crcount = mc->idev->mc_qrv; in igmp6_group_added()
699 mld_ifc_event(mc->idev); in igmp6_group_added()
705 struct net_device *dev = mc->idev->dev; in igmp6_group_dropped()
721 if (!mc->idev->dead) in igmp6_group_dropped()
732 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) in mld_add_delrec() argument
746 pmc->idev = im->idev; in mld_add_delrec()
747 in6_dev_hold(idev); in mld_add_delrec()
749 pmc->mca_crcount = idev->mc_qrv; in mld_add_delrec()
755 mc_dereference(im->mca_tomb, idev)); in mld_add_delrec()
757 mc_dereference(im->mca_sources, idev)); in mld_add_delrec()
765 rcu_assign_pointer(pmc->next, idev->mc_tomb); in mld_add_delrec()
766 rcu_assign_pointer(idev->mc_tomb, pmc); in mld_add_delrec()
770 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) in mld_del_delrec() argument
777 for_each_mc_tomb(idev, pmc) { in mld_del_delrec()
786 rcu_assign_pointer(idev->mc_tomb, pmc->next); in mld_del_delrec()
790 im->idev = pmc->idev; in mld_del_delrec()
793 mc_dereference(pmc->mca_tomb, pmc->idev), in mld_del_delrec()
794 lockdep_is_held(&im->idev->mc_lock)); in mld_del_delrec()
798 mc_dereference(pmc->mca_sources, pmc->idev), in mld_del_delrec()
799 lockdep_is_held(&im->idev->mc_lock)); in mld_del_delrec()
802 psf->sf_crcount = idev->mc_qrv; in mld_del_delrec()
804 im->mca_crcount = idev->mc_qrv; in mld_del_delrec()
806 in6_dev_put(pmc->idev); in mld_del_delrec()
813 static void mld_clear_delrec(struct inet6_dev *idev) in mld_clear_delrec() argument
817 pmc = mc_dereference(idev->mc_tomb, idev); in mld_clear_delrec()
818 RCU_INIT_POINTER(idev->mc_tomb, NULL); in mld_clear_delrec()
821 nextpmc = mc_dereference(pmc->next, idev); in mld_clear_delrec()
823 in6_dev_put(pmc->idev); in mld_clear_delrec()
828 for_each_mc_mclock(idev, pmc) { in mld_clear_delrec()
831 psf = mc_dereference(pmc->mca_tomb, idev); in mld_clear_delrec()
834 psf_next = mc_dereference(psf->sf_next, idev); in mld_clear_delrec()
840 static void mld_clear_query(struct inet6_dev *idev) in mld_clear_query() argument
844 spin_lock_bh(&idev->mc_query_lock); in mld_clear_query()
845 while ((skb = __skb_dequeue(&idev->mc_query_queue))) in mld_clear_query()
847 spin_unlock_bh(&idev->mc_query_lock); in mld_clear_query()
850 static void mld_clear_report(struct inet6_dev *idev) in mld_clear_report() argument
854 spin_lock_bh(&idev->mc_report_lock); in mld_clear_report()
855 while ((skb = __skb_dequeue(&idev->mc_report_queue))) in mld_clear_report()
857 spin_unlock_bh(&idev->mc_report_lock); in mld_clear_report()
868 in6_dev_put(mc->idev); in ma_put()
874 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, in mca_alloc() argument
887 mc->idev = idev; /* reference taken by caller */ in mca_alloc()
910 struct inet6_dev *idev; in __ipv6_dev_mc_inc() local
915 idev = in6_dev_get(dev); in __ipv6_dev_mc_inc()
917 if (!idev) in __ipv6_dev_mc_inc()
920 if (idev->dead) { in __ipv6_dev_mc_inc()
921 in6_dev_put(idev); in __ipv6_dev_mc_inc()
925 mutex_lock(&idev->mc_lock); in __ipv6_dev_mc_inc()
926 for_each_mc_mclock(idev, mc) { in __ipv6_dev_mc_inc()
929 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0); in __ipv6_dev_mc_inc()
930 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_inc()
931 in6_dev_put(idev); in __ipv6_dev_mc_inc()
936 mc = mca_alloc(idev, addr, mode); in __ipv6_dev_mc_inc()
938 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_inc()
939 in6_dev_put(idev); in __ipv6_dev_mc_inc()
943 rcu_assign_pointer(mc->next, idev->mc_list); in __ipv6_dev_mc_inc()
944 rcu_assign_pointer(idev->mc_list, mc); in __ipv6_dev_mc_inc()
948 mld_del_delrec(idev, mc); in __ipv6_dev_mc_inc()
950 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_inc()
964 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) in __ipv6_dev_mc_dec() argument
970 mutex_lock(&idev->mc_lock); in __ipv6_dev_mc_dec()
971 for (map = &idev->mc_list; in __ipv6_dev_mc_dec()
972 (ma = mc_dereference(*map, idev)); in __ipv6_dev_mc_dec()
980 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_dec()
985 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_dec()
990 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_dec()
996 struct inet6_dev *idev; in ipv6_dev_mc_dec() local
1001 idev = __in6_dev_get(dev); in ipv6_dev_mc_dec()
1002 if (!idev) in ipv6_dev_mc_dec()
1005 err = __ipv6_dev_mc_dec(idev, addr); in ipv6_dev_mc_dec()
1017 struct inet6_dev *idev; in ipv6_chk_mcast_addr() local
1022 idev = __in6_dev_get(dev); in ipv6_chk_mcast_addr()
1023 if (idev) { in ipv6_chk_mcast_addr()
1024 for_each_mc_rcu(idev, mc) { in ipv6_chk_mcast_addr()
1051 static void mld_gq_start_work(struct inet6_dev *idev) in mld_gq_start_work() argument
1053 unsigned long tv = get_random_u32_below(idev->mc_maxdelay); in mld_gq_start_work()
1055 idev->mc_gq_running = 1; in mld_gq_start_work()
1056 if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2)) in mld_gq_start_work()
1057 in6_dev_hold(idev); in mld_gq_start_work()
1061 static void mld_gq_stop_work(struct inet6_dev *idev) in mld_gq_stop_work() argument
1063 idev->mc_gq_running = 0; in mld_gq_stop_work()
1064 if (cancel_delayed_work(&idev->mc_gq_work)) in mld_gq_stop_work()
1065 __in6_dev_put(idev); in mld_gq_stop_work()
1069 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay) in mld_ifc_start_work() argument
1073 if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2)) in mld_ifc_start_work()
1074 in6_dev_hold(idev); in mld_ifc_start_work()
1078 static void mld_ifc_stop_work(struct inet6_dev *idev) in mld_ifc_stop_work() argument
1080 idev->mc_ifc_count = 0; in mld_ifc_stop_work()
1081 if (cancel_delayed_work(&idev->mc_ifc_work)) in mld_ifc_stop_work()
1082 __in6_dev_put(idev); in mld_ifc_stop_work()
1086 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay) in mld_dad_start_work() argument
1090 if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2)) in mld_dad_start_work()
1091 in6_dev_hold(idev); in mld_dad_start_work()
1094 static void mld_dad_stop_work(struct inet6_dev *idev) in mld_dad_stop_work() argument
1096 if (cancel_delayed_work(&idev->mc_dad_work)) in mld_dad_stop_work()
1097 __in6_dev_put(idev); in mld_dad_stop_work()
1100 static void mld_query_stop_work(struct inet6_dev *idev) in mld_query_stop_work() argument
1102 spin_lock_bh(&idev->mc_query_lock); in mld_query_stop_work()
1103 if (cancel_delayed_work(&idev->mc_query_work)) in mld_query_stop_work()
1104 __in6_dev_put(idev); in mld_query_stop_work()
1105 spin_unlock_bh(&idev->mc_query_lock); in mld_query_stop_work()
1108 static void mld_report_stop_work(struct inet6_dev *idev) in mld_report_stop_work() argument
1110 if (cancel_delayed_work_sync(&idev->mc_report_work)) in mld_report_stop_work()
1111 __in6_dev_put(idev); in mld_report_stop_work()
1203 static int mld_force_mld_version(const struct inet6_dev *idev) in mld_force_mld_version() argument
1210 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0) in mld_force_mld_version()
1211 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version; in mld_force_mld_version()
1213 return idev->cnf.force_mld_version; in mld_force_mld_version()
1216 static bool mld_in_v2_mode_only(const struct inet6_dev *idev) in mld_in_v2_mode_only() argument
1218 return mld_force_mld_version(idev) == 2; in mld_in_v2_mode_only()
1221 static bool mld_in_v1_mode_only(const struct inet6_dev *idev) in mld_in_v1_mode_only() argument
1223 return mld_force_mld_version(idev) == 1; in mld_in_v1_mode_only()
1226 static bool mld_in_v1_mode(const struct inet6_dev *idev) in mld_in_v1_mode() argument
1228 if (mld_in_v2_mode_only(idev)) in mld_in_v1_mode()
1230 if (mld_in_v1_mode_only(idev)) in mld_in_v1_mode()
1232 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen)) in mld_in_v1_mode()
1238 static void mld_set_v1_mode(struct inet6_dev *idev) in mld_set_v1_mode() argument
1248 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri; in mld_set_v1_mode()
1250 idev->mc_v1_seen = jiffies + switchback; in mld_set_v1_mode()
1253 static void mld_update_qrv(struct inet6_dev *idev, in mld_update_qrv() argument
1266 WARN_ON(idev->mc_qrv == 0); in mld_update_qrv()
1269 idev->mc_qrv = mlh2->mld2q_qrv; in mld_update_qrv()
1271 if (unlikely(idev->mc_qrv < min_qrv)) { in mld_update_qrv()
1273 idev->mc_qrv, min_qrv); in mld_update_qrv()
1274 idev->mc_qrv = min_qrv; in mld_update_qrv()
1278 static void mld_update_qi(struct inet6_dev *idev, in mld_update_qi() argument
1300 idev->mc_qi = mc_qqi * HZ; in mld_update_qi()
1303 static void mld_update_qri(struct inet6_dev *idev, in mld_update_qri() argument
1310 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2)); in mld_update_qri()
1313 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, in mld_process_v1() argument
1319 if (mld_in_v2_mode_only(idev)) in mld_process_v1()
1347 mld_set_v1_mode(idev); in mld_process_v1()
1350 mld_gq_stop_work(idev); in mld_process_v1()
1352 mld_ifc_stop_work(idev); in mld_process_v1()
1354 mld_clear_delrec(idev); in mld_process_v1()
1359 static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, in mld_process_v2() argument
1364 mld_update_qrv(idev, mld); in mld_process_v2()
1365 mld_update_qi(idev, mld); in mld_process_v2()
1366 mld_update_qri(idev, mld); in mld_process_v2()
1368 idev->mc_maxdelay = *max_delay; in mld_process_v2()
1376 struct inet6_dev *idev = __in6_dev_get(skb->dev); in igmp6_event_query() local
1378 if (!idev || idev->dead) in igmp6_event_query()
1381 spin_lock_bh(&idev->mc_query_lock); in igmp6_event_query()
1382 if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) { in igmp6_event_query()
1383 __skb_queue_tail(&idev->mc_query_queue, skb); in igmp6_event_query()
1384 if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0)) in igmp6_event_query()
1385 in6_dev_hold(idev); in igmp6_event_query()
1388 spin_unlock_bh(&idev->mc_query_lock); in igmp6_event_query()
1398 struct inet6_dev *idev; in __mld_query_work() local
1425 idev = in6_dev_get(skb->dev); in __mld_query_work()
1426 if (!idev) in __mld_query_work()
1439 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) { in __mld_query_work()
1440 err = mld_process_v1(idev, mld, &max_delay, in __mld_query_work()
1453 mld_process_v2(idev, mlh2, &max_delay); in __mld_query_work()
1459 mld_gq_start_work(idev); in __mld_query_work()
1476 for_each_mc_mclock(idev, ma) { in __mld_query_work()
1480 for_each_mc_mclock(idev, ma) { in __mld_query_work()
1502 in6_dev_put(idev); in __mld_query_work()
1509 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_query_work() local
1519 spin_lock_bh(&idev->mc_query_lock); in mld_query_work()
1520 while ((skb = __skb_dequeue(&idev->mc_query_queue))) { in mld_query_work()
1528 spin_unlock_bh(&idev->mc_query_lock); in mld_query_work()
1530 mutex_lock(&idev->mc_lock); in mld_query_work()
1533 mutex_unlock(&idev->mc_lock); in mld_query_work()
1535 if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0)) in mld_query_work()
1538 in6_dev_put(idev); in mld_query_work()
1544 struct inet6_dev *idev = __in6_dev_get(skb->dev); in igmp6_event_report() local
1546 if (!idev || idev->dead) in igmp6_event_report()
1549 spin_lock_bh(&idev->mc_report_lock); in igmp6_event_report()
1550 if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) { in igmp6_event_report()
1551 __skb_queue_tail(&idev->mc_report_queue, skb); in igmp6_event_report()
1552 if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0)) in igmp6_event_report()
1553 in6_dev_hold(idev); in igmp6_event_report()
1556 spin_unlock_bh(&idev->mc_report_lock); in igmp6_event_report()
1563 struct inet6_dev *idev; in __mld_report_work() local
1588 idev = in6_dev_get(skb->dev); in __mld_report_work()
1589 if (!idev) in __mld_report_work()
1596 for_each_mc_mclock(idev, ma) { in __mld_report_work()
1606 in6_dev_put(idev); in __mld_report_work()
1613 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_report_work() local
1622 spin_lock_bh(&idev->mc_report_lock); in mld_report_work()
1623 while ((skb = __skb_dequeue(&idev->mc_report_queue))) { in mld_report_work()
1631 spin_unlock_bh(&idev->mc_report_lock); in mld_report_work()
1633 mutex_lock(&idev->mc_lock); in mld_report_work()
1636 mutex_unlock(&idev->mc_lock); in mld_report_work()
1638 if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0)) in mld_report_work()
1641 in6_dev_put(idev); in mld_report_work()
1727 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) in mld_newpack() argument
1731 struct net_device *dev = idev->dev; in mld_newpack()
1786 struct inet6_dev *idev; in mld_sendpack() local
1793 idev = __in6_dev_get(skb->dev); in mld_sendpack()
1794 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); in mld_sendpack()
1825 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); in mld_sendpack()
1826 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); in mld_sendpack()
1828 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); in mld_sendpack()
1851 skb = mld_newpack(pmc->idev, mtu); in add_grhead()
1876 struct inet6_dev *idev = pmc->idev; in add_grec() local
1877 struct net_device *dev = idev->dev; in add_grec()
1909 skb = mld_newpack(idev, mtu); in add_grec()
1914 for (psf = mc_dereference(*psf_list, idev); in add_grec()
1919 psf_next = mc_dereference(psf->sf_next, idev); in add_grec()
1947 skb = mld_newpack(idev, mtu); in add_grec()
1967 mc_dereference(psf->sf_next, idev)); in add_grec()
1970 mc_dereference(psf->sf_next, idev)); in add_grec()
2001 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) in mld_send_report() argument
2007 for_each_mc_mclock(idev, pmc) { in mld_send_report()
2031 static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev) in mld_clear_zeros() argument
2036 for (psf = mc_dereference(*ppsf, idev); in mld_clear_zeros()
2039 psf_next = mc_dereference(psf->sf_next, idev); in mld_clear_zeros()
2043 mc_dereference(psf->sf_next, idev)); in mld_clear_zeros()
2046 mc_dereference(psf->sf_next, idev)); in mld_clear_zeros()
2055 static void mld_send_cr(struct inet6_dev *idev) in mld_send_cr() argument
2063 for (pmc = mc_dereference(idev->mc_tomb, idev); in mld_send_cr()
2066 pmc_next = mc_dereference(pmc->next, idev); in mld_send_cr()
2080 mld_clear_zeros(&pmc->mca_tomb, idev); in mld_send_cr()
2081 mld_clear_zeros(&pmc->mca_sources, idev); in mld_send_cr()
2090 rcu_assign_pointer(idev->mc_tomb, pmc_next); in mld_send_cr()
2091 in6_dev_put(pmc->idev); in mld_send_cr()
2098 for_each_mc_mclock(idev, pmc) { in mld_send_cr()
2128 struct inet6_dev *idev; in igmp6_send() local
2190 idev = __in6_dev_get(skb->dev); in igmp6_send()
2207 ICMP6MSGOUT_INC_STATS(net, idev, type); in igmp6_send()
2208 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); in igmp6_send()
2210 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); in igmp6_send()
2221 static void mld_send_initial_cr(struct inet6_dev *idev) in mld_send_initial_cr() argument
2227 if (mld_in_v1_mode(idev)) in mld_send_initial_cr()
2231 for_each_mc_mclock(idev, pmc) { in mld_send_initial_cr()
2242 void ipv6_mc_dad_complete(struct inet6_dev *idev) in ipv6_mc_dad_complete() argument
2244 mutex_lock(&idev->mc_lock); in ipv6_mc_dad_complete()
2245 idev->mc_dad_count = idev->mc_qrv; in ipv6_mc_dad_complete()
2246 if (idev->mc_dad_count) { in ipv6_mc_dad_complete()
2247 mld_send_initial_cr(idev); in ipv6_mc_dad_complete()
2248 idev->mc_dad_count--; in ipv6_mc_dad_complete()
2249 if (idev->mc_dad_count) in ipv6_mc_dad_complete()
2250 mld_dad_start_work(idev, in ipv6_mc_dad_complete()
2251 unsolicited_report_interval(idev)); in ipv6_mc_dad_complete()
2253 mutex_unlock(&idev->mc_lock); in ipv6_mc_dad_complete()
2258 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_dad_work() local
2261 mutex_lock(&idev->mc_lock); in mld_dad_work()
2262 mld_send_initial_cr(idev); in mld_dad_work()
2263 if (idev->mc_dad_count) { in mld_dad_work()
2264 idev->mc_dad_count--; in mld_dad_work()
2265 if (idev->mc_dad_count) in mld_dad_work()
2266 mld_dad_start_work(idev, in mld_dad_work()
2267 unsolicited_report_interval(idev)); in mld_dad_work()
2269 mutex_unlock(&idev->mc_lock); in mld_dad_work()
2270 in6_dev_put(idev); in mld_dad_work()
2292 struct inet6_dev *idev = pmc->idev; in ip6_mc_del1_src() local
2297 mc_dereference(psf->sf_next, idev)); in ip6_mc_del1_src()
2300 mc_dereference(psf->sf_next, idev)); in ip6_mc_del1_src()
2303 !mld_in_v1_mode(idev)) { in ip6_mc_del1_src()
2304 psf->sf_crcount = idev->mc_qrv; in ip6_mc_del1_src()
2306 mc_dereference(pmc->mca_tomb, idev)); in ip6_mc_del1_src()
2317 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, in ip6_mc_del_src() argument
2325 if (!idev) in ip6_mc_del_src()
2328 for_each_mc_mclock(idev, pmc) { in ip6_mc_del_src()
2357 pmc->mca_crcount = idev->mc_qrv; in ip6_mc_del_src()
2358 idev->mc_ifc_count = pmc->mca_crcount; in ip6_mc_del_src()
2361 mld_ifc_event(pmc->idev); in ip6_mc_del_src()
2363 mld_ifc_event(pmc->idev); in ip6_mc_del_src()
2422 int qrv = pmc->idev->mc_qrv; in sf_setstate()
2446 pmc->idev)); in sf_setstate()
2450 pmc->idev)); in sf_setstate()
2473 mc_dereference(pmc->mca_tomb, pmc->idev)); in sf_setstate()
2487 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, in ip6_mc_add_src() argument
2495 if (!idev) in ip6_mc_add_src()
2498 for_each_mc_mclock(idev, pmc) { in ip6_mc_add_src()
2532 pmc->mca_crcount = idev->mc_qrv; in ip6_mc_add_src()
2533 idev->mc_ifc_count = pmc->mca_crcount; in ip6_mc_add_src()
2536 mld_ifc_event(idev); in ip6_mc_add_src()
2538 mld_ifc_event(idev); in ip6_mc_add_src()
2548 for (psf = mc_dereference(pmc->mca_tomb, pmc->idev); in ip6_mc_clear_src()
2551 nextpsf = mc_dereference(psf->sf_next, pmc->idev); in ip6_mc_clear_src()
2555 for (psf = mc_dereference(pmc->mca_sources, pmc->idev); in ip6_mc_clear_src()
2558 nextpsf = mc_dereference(psf->sf_next, pmc->idev); in ip6_mc_clear_src()
2575 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); in igmp6_join_group()
2577 delay = get_random_u32_below(unsolicited_report_interval(ma->idev)); in igmp6_join_group()
2590 struct inet6_dev *idev) in ip6_mc_leave_src() argument
2597 if (idev) in ip6_mc_leave_src()
2598 mutex_lock(&idev->mc_lock); in ip6_mc_leave_src()
2602 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); in ip6_mc_leave_src()
2604 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, in ip6_mc_leave_src()
2612 if (idev) in ip6_mc_leave_src()
2613 mutex_unlock(&idev->mc_lock); in ip6_mc_leave_src()
2621 if (mld_in_v1_mode(ma->idev)) { in igmp6_leave_group()
2623 igmp6_send(&ma->mca_addr, ma->idev->dev, in igmp6_leave_group()
2627 mld_add_delrec(ma->idev, ma); in igmp6_leave_group()
2628 mld_ifc_event(ma->idev); in igmp6_leave_group()
2634 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_gq_work() local
2638 mutex_lock(&idev->mc_lock); in mld_gq_work()
2639 mld_send_report(idev, NULL); in mld_gq_work()
2640 idev->mc_gq_running = 0; in mld_gq_work()
2641 mutex_unlock(&idev->mc_lock); in mld_gq_work()
2643 in6_dev_put(idev); in mld_gq_work()
2648 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_ifc_work() local
2652 mutex_lock(&idev->mc_lock); in mld_ifc_work()
2653 mld_send_cr(idev); in mld_ifc_work()
2655 if (idev->mc_ifc_count) { in mld_ifc_work()
2656 idev->mc_ifc_count--; in mld_ifc_work()
2657 if (idev->mc_ifc_count) in mld_ifc_work()
2658 mld_ifc_start_work(idev, in mld_ifc_work()
2659 unsolicited_report_interval(idev)); in mld_ifc_work()
2661 mutex_unlock(&idev->mc_lock); in mld_ifc_work()
2662 in6_dev_put(idev); in mld_ifc_work()
2666 static void mld_ifc_event(struct inet6_dev *idev) in mld_ifc_event() argument
2668 if (mld_in_v1_mode(idev)) in mld_ifc_event()
2671 idev->mc_ifc_count = idev->mc_qrv; in mld_ifc_event()
2672 mld_ifc_start_work(idev, 1); in mld_ifc_event()
2680 mutex_lock(&ma->idev->mc_lock); in mld_mca_work()
2681 if (mld_in_v1_mode(ma->idev)) in mld_mca_work()
2682 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); in mld_mca_work()
2684 mld_send_report(ma->idev, ma); in mld_mca_work()
2687 mutex_unlock(&ma->idev->mc_lock); in mld_mca_work()
2694 void ipv6_mc_unmap(struct inet6_dev *idev) in ipv6_mc_unmap() argument
2700 mutex_lock(&idev->mc_lock); in ipv6_mc_unmap()
2701 for_each_mc_mclock(idev, i) in ipv6_mc_unmap()
2703 mutex_unlock(&idev->mc_lock); in ipv6_mc_unmap()
2706 void ipv6_mc_remap(struct inet6_dev *idev) in ipv6_mc_remap() argument
2708 ipv6_mc_up(idev); in ipv6_mc_remap()
2712 void ipv6_mc_down(struct inet6_dev *idev) in ipv6_mc_down() argument
2716 mutex_lock(&idev->mc_lock); in ipv6_mc_down()
2718 for_each_mc_mclock(idev, i) in ipv6_mc_down()
2720 mutex_unlock(&idev->mc_lock); in ipv6_mc_down()
2726 mld_query_stop_work(idev); in ipv6_mc_down()
2727 mld_report_stop_work(idev); in ipv6_mc_down()
2728 mld_ifc_stop_work(idev); in ipv6_mc_down()
2729 mld_gq_stop_work(idev); in ipv6_mc_down()
2730 mld_dad_stop_work(idev); in ipv6_mc_down()
2733 static void ipv6_mc_reset(struct inet6_dev *idev) in ipv6_mc_reset() argument
2735 idev->mc_qrv = sysctl_mld_qrv; in ipv6_mc_reset()
2736 idev->mc_qi = MLD_QI_DEFAULT; in ipv6_mc_reset()
2737 idev->mc_qri = MLD_QRI_DEFAULT; in ipv6_mc_reset()
2738 idev->mc_v1_seen = 0; in ipv6_mc_reset()
2739 idev->mc_maxdelay = unsolicited_report_interval(idev); in ipv6_mc_reset()
2744 void ipv6_mc_up(struct inet6_dev *idev) in ipv6_mc_up() argument
2750 ipv6_mc_reset(idev); in ipv6_mc_up()
2751 mutex_lock(&idev->mc_lock); in ipv6_mc_up()
2752 for_each_mc_mclock(idev, i) { in ipv6_mc_up()
2753 mld_del_delrec(idev, i); in ipv6_mc_up()
2756 mutex_unlock(&idev->mc_lock); in ipv6_mc_up()
2761 void ipv6_mc_init_dev(struct inet6_dev *idev) in ipv6_mc_init_dev() argument
2763 idev->mc_gq_running = 0; in ipv6_mc_init_dev()
2764 INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work); in ipv6_mc_init_dev()
2765 RCU_INIT_POINTER(idev->mc_tomb, NULL); in ipv6_mc_init_dev()
2766 idev->mc_ifc_count = 0; in ipv6_mc_init_dev()
2767 INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work); in ipv6_mc_init_dev()
2768 INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work); in ipv6_mc_init_dev()
2769 INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work); in ipv6_mc_init_dev()
2770 INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work); in ipv6_mc_init_dev()
2771 skb_queue_head_init(&idev->mc_query_queue); in ipv6_mc_init_dev()
2772 skb_queue_head_init(&idev->mc_report_queue); in ipv6_mc_init_dev()
2773 spin_lock_init(&idev->mc_query_lock); in ipv6_mc_init_dev()
2774 spin_lock_init(&idev->mc_report_lock); in ipv6_mc_init_dev()
2775 mutex_init(&idev->mc_lock); in ipv6_mc_init_dev()
2776 ipv6_mc_reset(idev); in ipv6_mc_init_dev()
2783 void ipv6_mc_destroy_dev(struct inet6_dev *idev) in ipv6_mc_destroy_dev() argument
2788 ipv6_mc_down(idev); in ipv6_mc_destroy_dev()
2789 mutex_lock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2790 mld_clear_delrec(idev); in ipv6_mc_destroy_dev()
2791 mutex_unlock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2792 mld_clear_query(idev); in ipv6_mc_destroy_dev()
2793 mld_clear_report(idev); in ipv6_mc_destroy_dev()
2800 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes); in ipv6_mc_destroy_dev()
2802 if (idev->cnf.forwarding) in ipv6_mc_destroy_dev()
2803 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters); in ipv6_mc_destroy_dev()
2805 mutex_lock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2806 while ((i = mc_dereference(idev->mc_list, idev))) { in ipv6_mc_destroy_dev()
2807 rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev)); in ipv6_mc_destroy_dev()
2812 mutex_unlock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2815 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev) in ipv6_mc_rejoin_groups() argument
2821 mutex_lock(&idev->mc_lock); in ipv6_mc_rejoin_groups()
2822 if (mld_in_v1_mode(idev)) { in ipv6_mc_rejoin_groups()
2823 for_each_mc_mclock(idev, pmc) in ipv6_mc_rejoin_groups()
2826 mld_send_report(idev, NULL); in ipv6_mc_rejoin_groups()
2828 mutex_unlock(&idev->mc_lock); in ipv6_mc_rejoin_groups()
2836 struct inet6_dev *idev = __in6_dev_get(dev); in ipv6_mc_netdev_event() local
2840 if (idev) in ipv6_mc_netdev_event()
2841 ipv6_mc_rejoin_groups(idev); in ipv6_mc_netdev_event()
2858 struct inet6_dev *idev; member
2869 state->idev = NULL; in igmp6_mc_get_first()
2871 struct inet6_dev *idev; in igmp6_mc_get_first() local
2872 idev = __in6_dev_get(state->dev); in igmp6_mc_get_first()
2873 if (!idev) in igmp6_mc_get_first()
2876 im = rcu_dereference(idev->mc_list); in igmp6_mc_get_first()
2878 state->idev = idev; in igmp6_mc_get_first()
2893 state->idev = NULL; in igmp6_mc_get_next()
2896 state->idev = __in6_dev_get(state->dev); in igmp6_mc_get_next()
2897 if (!state->idev) in igmp6_mc_get_next()
2899 im = rcu_dereference(state->idev->mc_list); in igmp6_mc_get_next()
2933 if (likely(state->idev)) in igmp6_mc_seq_stop()
2934 state->idev = NULL; in igmp6_mc_seq_stop()
2964 struct inet6_dev *idev; member
2977 state->idev = NULL; in igmp6_mcf_get_first()
2980 struct inet6_dev *idev; in igmp6_mcf_get_first() local
2981 idev = __in6_dev_get(state->dev); in igmp6_mcf_get_first()
2982 if (unlikely(idev == NULL)) in igmp6_mcf_get_first()
2985 im = rcu_dereference(idev->mc_list); in igmp6_mcf_get_first()
2990 state->idev = idev; in igmp6_mcf_get_first()
3008 state->idev = NULL; in igmp6_mcf_get_next()
3011 state->idev = __in6_dev_get(state->dev); in igmp6_mcf_get_next()
3012 if (!state->idev) in igmp6_mcf_get_next()
3014 state->im = rcu_dereference(state->idev->mc_list); in igmp6_mcf_get_next()
3058 if (likely(state->idev)) in igmp6_mcf_seq_stop()
3059 state->idev = NULL; in igmp6_mcf_seq_stop()