Lines Matching refs:vxlan

60 static int vxlan_sock_add(struct vxlan_dev *vxlan);
62 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
225 if (node->vxlan->default_dst.remote_vni != vni) in vxlan_vs_find_vni()
229 const struct vxlan_config *cfg = &node->vxlan->cfg; in vxlan_vs_find_vni()
236 return node->vxlan; in vxlan_vs_find_vni()
257 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, in vxlan_fdb_info() argument
299 ndm->ndm_ifindex = vxlan->dev->ifindex; in vxlan_fdb_info()
305 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && in vxlan_fdb_info()
307 peernet2id(dev_net(vxlan->dev), vxlan->net))) in vxlan_fdb_info()
321 rdst->remote_port != vxlan->cfg.dst_port && in vxlan_fdb_info()
324 if (rdst->remote_vni != vxlan->default_dst.remote_vni && in vxlan_fdb_info()
332 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && in vxlan_fdb_info()
365 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in __vxlan_fdb_notify() argument
368 struct net *net = dev_net(vxlan->dev); in __vxlan_fdb_notify()
376 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); in __vxlan_fdb_notify()
391 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_notifier_info() argument
397 fdb_info->info.dev = vxlan->dev; in vxlan_fdb_switchdev_notifier_info()
409 static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_call_notifiers() argument
424 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info); in vxlan_fdb_switchdev_call_notifiers()
425 ret = call_switchdev_notifiers(notifier_type, vxlan->dev, in vxlan_fdb_switchdev_call_notifiers()
430 static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_notify() argument
439 err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
445 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
451 __vxlan_fdb_notify(vxlan, fdb, rd, type); in vxlan_fdb_notify()
457 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_ip_miss() local
466 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_ip_miss()
469 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) in vxlan_fdb_miss() argument
478 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_fdb_miss()
503 static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni) in fdb_head_index() argument
505 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) in fdb_head_index()
512 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, in vxlan_fdb_head() argument
515 return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)]; in vxlan_fdb_head()
519 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, in __vxlan_find_mac() argument
522 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); in __vxlan_find_mac()
527 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in __vxlan_find_mac()
539 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, in vxlan_find_mac() argument
544 f = __vxlan_find_mac(vxlan, mac, vni); in vxlan_find_mac()
572 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_find_uc() local
586 f = __vxlan_find_mac(vxlan, eth_addr, vni); in vxlan_fdb_find_uc()
593 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info); in vxlan_fdb_find_uc()
602 const struct vxlan_dev *vxlan, in vxlan_fdb_notify_one() argument
610 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info); in vxlan_fdb_notify_one()
620 struct vxlan_dev *vxlan; in vxlan_fdb_replay() local
628 vxlan = netdev_priv(dev); in vxlan_fdb_replay()
631 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
632 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_replay()
635 rc = vxlan_fdb_notify_one(nb, vxlan, in vxlan_fdb_replay()
643 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
648 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
655 struct vxlan_dev *vxlan; in vxlan_fdb_clear_offload() local
662 vxlan = netdev_priv(dev); in vxlan_fdb_clear_offload()
665 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
666 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) in vxlan_fdb_clear_offload()
670 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
828 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_alloc() argument
842 RCU_INIT_POINTER(f->vdev, vxlan); in vxlan_fdb_alloc()
850 static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_insert() argument
853 ++vxlan->addrcnt; in vxlan_fdb_insert()
855 vxlan_fdb_head(vxlan, mac, src_vni)); in vxlan_fdb_insert()
858 static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_nh_update() argument
868 nh = nexthop_find_by_id(vxlan->net, nhid); in vxlan_fdb_nh_update()
891 switch (vxlan->default_dst.remote_ip.sa.sa_family) { in vxlan_fdb_nh_update()
922 static int vxlan_fdb_create(struct vxlan_dev *vxlan, in vxlan_fdb_create() argument
933 if (vxlan->cfg.addrmax && in vxlan_fdb_create()
934 vxlan->addrcnt >= vxlan->cfg.addrmax) in vxlan_fdb_create()
937 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_create()
938 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); in vxlan_fdb_create()
943 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_create()
984 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_destroy() argument
989 netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); in vxlan_fdb_destroy()
991 --vxlan->addrcnt; in vxlan_fdb_destroy()
994 vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH, in vxlan_fdb_destroy()
998 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, in vxlan_fdb_destroy()
1015 static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, in vxlan_fdb_update_existing() argument
1065 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_update_existing()
1095 err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, in vxlan_fdb_update_existing()
1115 static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, in vxlan_fdb_update_create() argument
1132 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_update_create()
1133 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, in vxlan_fdb_update_create()
1138 vxlan_fdb_insert(vxlan, mac, src_vni, f); in vxlan_fdb_update_create()
1139 rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, in vxlan_fdb_update_create()
1147 vxlan_fdb_destroy(vxlan, f, false, false); in vxlan_fdb_update_create()
1152 static int vxlan_fdb_update(struct vxlan_dev *vxlan, in vxlan_fdb_update() argument
1162 f = __vxlan_find_mac(vxlan, mac, src_vni); in vxlan_fdb_update()
1165 netdev_dbg(vxlan->dev, in vxlan_fdb_update()
1170 return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, in vxlan_fdb_update()
1177 return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, in vxlan_fdb_update()
1184 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_dst_destroy() argument
1188 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL); in vxlan_fdb_dst_destroy()
1192 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, in vxlan_fdb_parse() argument
1196 struct net *net = dev_net(vxlan->dev); in vxlan_fdb_parse()
1208 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; in vxlan_fdb_parse()
1226 *port = vxlan->cfg.dst_port; in vxlan_fdb_parse()
1234 *vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1242 *src_vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1272 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_add() local
1290 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_add()
1295 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) in vxlan_fdb_add()
1298 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_add()
1299 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1300 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, in vxlan_fdb_add()
1304 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1309 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, in __vxlan_fdb_delete() argument
1318 f = vxlan_find_mac(vxlan, addr, src_vni); in __vxlan_fdb_delete()
1332 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify); in __vxlan_fdb_delete()
1336 vxlan_fdb_destroy(vxlan, f, true, swdev_notify); in __vxlan_fdb_delete()
1347 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_delete() local
1355 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_delete()
1360 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_delete()
1361 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1362 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, in vxlan_fdb_delete()
1364 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1374 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_dump() local
1382 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_dump()
1388 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1406 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1432 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_get() local
1440 vni = vxlan->default_dst.remote_vni; in vxlan_fdb_get()
1444 f = __vxlan_find_mac(vxlan, addr, vni); in vxlan_fdb_get()
1451 err = vxlan_fdb_info(skb, vxlan, f, portid, seq, in vxlan_fdb_get()
1466 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_snoop() local
1476 f = vxlan_find_mac(vxlan, src_mac, vni); in vxlan_snoop()
1499 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); in vxlan_snoop()
1501 u32 hash_index = fdb_head_index(vxlan, src_mac, vni); in vxlan_snoop()
1504 spin_lock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1508 vxlan_fdb_update(vxlan, src_mac, src_ip, in vxlan_snoop()
1511 vxlan->cfg.dst_port, in vxlan_snoop()
1513 vxlan->default_dst.remote_vni, in vxlan_snoop()
1515 spin_unlock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1524 struct vxlan_dev *vxlan; in vxlan_group_used() local
1544 list_for_each_entry(vxlan, &vn->vxlan_list, next) { in vxlan_group_used()
1545 if (!netif_running(vxlan->dev) || vxlan == dev) in vxlan_group_used()
1549 rtnl_dereference(vxlan->vn4_sock) != sock4) in vxlan_group_used()
1553 rtnl_dereference(vxlan->vn6_sock) != sock6) in vxlan_group_used()
1557 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, in vxlan_group_used()
1561 if (vxlan->default_dst.remote_ifindex != in vxlan_group_used()
1592 static void vxlan_sock_release(struct vxlan_dev *vxlan) in vxlan_sock_release() argument
1594 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_sock_release()
1596 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_sock_release()
1598 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_release()
1601 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_release()
1604 vxlan_vs_del_dev(vxlan); in vxlan_sock_release()
1622 static int vxlan_igmp_join(struct vxlan_dev *vxlan) in vxlan_igmp_join() argument
1625 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; in vxlan_igmp_join()
1626 int ifindex = vxlan->default_dst.remote_ifindex; in vxlan_igmp_join()
1630 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_igmp_join()
1642 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_igmp_join()
1656 static int vxlan_igmp_leave(struct vxlan_dev *vxlan) in vxlan_igmp_leave() argument
1659 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; in vxlan_igmp_leave()
1660 int ifindex = vxlan->default_dst.remote_ifindex; in vxlan_igmp_leave()
1664 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_igmp_leave()
1676 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_igmp_leave()
1770 static bool vxlan_set_mac(struct vxlan_dev *vxlan, in vxlan_set_mac() argument
1778 skb->protocol = eth_type_trans(skb, vxlan->dev); in vxlan_set_mac()
1782 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) in vxlan_set_mac()
1796 if ((vxlan->cfg.flags & VXLAN_F_LEARN) && in vxlan_set_mac()
1830 struct vxlan_dev *vxlan; in vxlan_rcv() local
1862 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); in vxlan_rcv()
1863 if (!vxlan) in vxlan_rcv()
1876 !net_eq(vxlan->net, dev_net(vxlan->dev)))) in vxlan_rcv()
1918 if (!vxlan_set_mac(vxlan, vs, skb, vni)) in vxlan_rcv()
1922 skb->dev = vxlan->dev; in vxlan_rcv()
1930 ++vxlan->dev->stats.rx_frame_errors; in vxlan_rcv()
1931 ++vxlan->dev->stats.rx_errors; in vxlan_rcv()
1937 if (unlikely(!(vxlan->dev->flags & IFF_UP))) { in vxlan_rcv()
1939 atomic_long_inc(&vxlan->dev->rx_dropped); in vxlan_rcv()
1943 dev_sw_netstats_rx_add(vxlan->dev, skb->len); in vxlan_rcv()
1944 gro_cells_receive(&vxlan->gro_cells, skb); in vxlan_rcv()
1959 struct vxlan_dev *vxlan; in vxlan_err_lookup() local
1977 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); in vxlan_err_lookup()
1978 if (!vxlan) in vxlan_err_lookup()
1986 struct vxlan_dev *vxlan = netdev_priv(dev); in arp_reduce() local
2031 f = vxlan_find_mac(vxlan, n->ha, vni); in arp_reduce()
2053 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in arp_reduce()
2160 struct vxlan_dev *vxlan = netdev_priv(dev); in neigh_reduce() local
2191 f = vxlan_find_mac(vxlan, n->ha, vni); in neigh_reduce()
2209 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in neigh_reduce()
2227 struct vxlan_dev *vxlan = netdev_priv(dev); in route_shortcircuit() local
2243 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2264 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2393 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, in vxlan_get_route() argument
2425 rt = ip_route_output_key(vxlan->net, &fl4); in vxlan_get_route()
2444 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, in vxlan6_get_route() argument
2480 ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, in vxlan6_get_route()
2556 struct vxlan_dev *vxlan, in encap_bypass_if_local() argument
2575 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, in encap_bypass_if_local()
2577 vxlan->cfg.flags); in encap_bypass_if_local()
2584 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true); in encap_bypass_if_local()
2597 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit_one() local
2609 u32 flags = vxlan->cfg.flags; in vxlan_xmit_one()
2611 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); in vxlan_xmit_one()
2620 vxlan_encap_bypass(skb, vxlan, vxlan, in vxlan_xmit_one()
2627 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; in vxlan_xmit_one()
2630 local_ip = vxlan->cfg.saddr; in vxlan_xmit_one()
2636 ttl = vxlan->cfg.ttl; in vxlan_xmit_one()
2641 tos = vxlan->cfg.tos; in vxlan_xmit_one()
2649 label = vxlan->cfg.label; in vxlan_xmit_one()
2665 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_xmit_one()
2679 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_xmit_one()
2680 vxlan->cfg.port_max, true); in vxlan_xmit_one()
2684 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_xmit_one()
2691 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, in vxlan_xmit_one()
2703 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2709 if (vxlan->cfg.df == VXLAN_DF_SET) { in vxlan_xmit_one()
2711 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { in vxlan_xmit_one()
2742 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2759 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_xmit_one()
2764 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, in vxlan_xmit_one()
2778 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2804 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2883 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit() local
2895 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in vxlan_xmit()
2908 if (vxlan->cfg.flags & VXLAN_F_PROXY) { in vxlan_xmit()
2927 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2930 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) && in vxlan_xmit()
2935 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2939 f = vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_xmit()
2941 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) && in vxlan_xmit()
2943 vxlan_fdb_miss(vxlan, eth->h_dest); in vxlan_xmit()
2953 (vni ? : vxlan->default_dst.remote_vni), did_rsc); in vxlan_xmit()
2978 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer); in vxlan_cleanup() local
2982 if (!netif_running(vxlan->dev)) in vxlan_cleanup()
2988 spin_lock(&vxlan->hash_lock[h]); in vxlan_cleanup()
2989 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_cleanup()
3000 timeout = f->used + vxlan->cfg.age_interval * HZ; in vxlan_cleanup()
3002 netdev_dbg(vxlan->dev, in vxlan_cleanup()
3006 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_cleanup()
3010 spin_unlock(&vxlan->hash_lock[h]); in vxlan_cleanup()
3013 mod_timer(&vxlan->age_timer, next_timer); in vxlan_cleanup()
3016 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) in vxlan_vs_del_dev() argument
3018 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_del_dev()
3021 hlist_del_init_rcu(&vxlan->hlist4.hlist); in vxlan_vs_del_dev()
3023 hlist_del_init_rcu(&vxlan->hlist6.hlist); in vxlan_vs_del_dev()
3028 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, in vxlan_vs_add_dev() argument
3031 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_add_dev()
3032 __be32 vni = vxlan->default_dst.remote_vni; in vxlan_vs_add_dev()
3034 node->vxlan = vxlan; in vxlan_vs_add_dev()
3043 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_init() local
3050 err = gro_cells_init(&vxlan->gro_cells, dev); in vxlan_init()
3059 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) in vxlan_fdb_delete_default() argument
3062 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3064 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3065 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3067 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_fdb_delete_default()
3068 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3073 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_uninit() local
3075 gro_cells_destroy(&vxlan->gro_cells); in vxlan_uninit()
3077 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); in vxlan_uninit()
3085 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_open() local
3088 ret = vxlan_sock_add(vxlan); in vxlan_open()
3092 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { in vxlan_open()
3093 ret = vxlan_igmp_join(vxlan); in vxlan_open()
3097 vxlan_sock_release(vxlan); in vxlan_open()
3102 if (vxlan->cfg.age_interval) in vxlan_open()
3103 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); in vxlan_open()
3109 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) in vxlan_flush() argument
3116 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3117 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_flush()
3124 f->vni == vxlan->cfg.vni) in vxlan_flush()
3126 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_flush()
3128 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3135 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_stop() local
3136 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_stop()
3139 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && in vxlan_stop()
3140 !vxlan_group_used(vn, vxlan)) in vxlan_stop()
3141 ret = vxlan_igmp_leave(vxlan); in vxlan_stop()
3143 del_timer_sync(&vxlan->age_timer); in vxlan_stop()
3145 vxlan_flush(vxlan, false); in vxlan_stop()
3146 vxlan_sock_release(vxlan); in vxlan_stop()
3158 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_change_mtu() local
3159 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_change_mtu()
3160 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_change_mtu()
3162 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6); in vxlan_change_mtu()
3180 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_metadata_dst() local
3184 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_fill_metadata_dst()
3185 vxlan->cfg.port_max, true); in vxlan_fill_metadata_dst()
3186 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_fill_metadata_dst()
3189 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_fill_metadata_dst()
3192 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3201 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_fill_metadata_dst()
3204 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3288 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_setup() local
3313 INIT_LIST_HEAD(&vxlan->next); in vxlan_setup()
3315 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); in vxlan_setup()
3317 vxlan->dev = dev; in vxlan_setup()
3320 spin_lock_init(&vxlan->hash_lock[h]); in vxlan_setup()
3321 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); in vxlan_setup()
3451 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_ksettings() local
3452 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_get_link_ksettings()
3453 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_get_link_ksettings()
3554 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) in __vxlan_sock_add() argument
3556 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in __vxlan_sock_add()
3561 if (vxlan->cfg.remote_ifindex) in __vxlan_sock_add()
3563 vxlan->net, vxlan->cfg.remote_ifindex); in __vxlan_sock_add()
3565 if (!vxlan->cfg.no_share) { in __vxlan_sock_add()
3567 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, in __vxlan_sock_add()
3568 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3577 vs = vxlan_socket_create(vxlan->net, ipv6, in __vxlan_sock_add()
3578 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3584 rcu_assign_pointer(vxlan->vn6_sock, vs); in __vxlan_sock_add()
3585 node = &vxlan->hlist6; in __vxlan_sock_add()
3589 rcu_assign_pointer(vxlan->vn4_sock, vs); in __vxlan_sock_add()
3590 node = &vxlan->hlist4; in __vxlan_sock_add()
3592 vxlan_vs_add_dev(vs, vxlan, node); in __vxlan_sock_add()
3596 static int vxlan_sock_add(struct vxlan_dev *vxlan) in vxlan_sock_add() argument
3598 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; in vxlan_sock_add()
3599 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata; in vxlan_sock_add()
3603 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_add()
3605 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_add()
3607 ret = __vxlan_sock_add(vxlan, true); in vxlan_sock_add()
3613 ret = __vxlan_sock_add(vxlan, false); in vxlan_sock_add()
3615 vxlan_sock_release(vxlan); in vxlan_sock_add()
3788 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_config_apply() local
3789 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_config_apply()
3803 vxlan->net = src_net; in vxlan_config_apply()
3839 memcpy(&vxlan->cfg, conf, sizeof(*conf)); in vxlan_config_apply()
3846 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dev_configure() local
3850 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); in vxlan_dev_configure()
3864 struct vxlan_dev *vxlan = netdev_priv(dev); in __vxlan_dev_create() local
3871 dst = &vxlan->default_dst; in __vxlan_dev_create()
3880 err = vxlan_fdb_create(vxlan, all_zeros_mac, in __vxlan_dev_create()
3883 vxlan->cfg.dst_port, in __vxlan_dev_create()
3914 vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f); in __vxlan_dev_create()
3917 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), in __vxlan_dev_create()
3920 vxlan_fdb_destroy(vxlan, f, false, false); in __vxlan_dev_create()
3927 list_add(&vxlan->next, &vn->vxlan_list); in __vxlan_dev_create()
3979 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_nl2conf() local
3986 memcpy(conf, &vxlan->cfg, sizeof(*conf)); in vxlan_nl2conf()
4250 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_changelink() local
4256 dst = &vxlan->default_dst; in vxlan_changelink()
4261 err = vxlan_config_validate(vxlan->net, &conf, &lowerdev, in vxlan_changelink()
4262 vxlan, extack); in vxlan_changelink()
4276 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); in vxlan_changelink()
4278 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4280 err = vxlan_fdb_update(vxlan, all_zeros_mac, in vxlan_changelink()
4284 vxlan->cfg.dst_port, in vxlan_changelink()
4289 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4296 __vxlan_fdb_delete(vxlan, all_zeros_mac, in vxlan_changelink()
4298 vxlan->cfg.dst_port, in vxlan_changelink()
4303 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4306 if (conf.age_interval != vxlan->cfg.age_interval) in vxlan_changelink()
4307 mod_timer(&vxlan->age_timer, jiffies); in vxlan_changelink()
4312 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); in vxlan_changelink()
4318 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dellink() local
4320 vxlan_flush(vxlan, true); in vxlan_dellink()
4322 list_del(&vxlan->next); in vxlan_dellink()
4324 if (vxlan->default_dst.remote_dev) in vxlan_dellink()
4325 netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev); in vxlan_dellink()
4360 const struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_info() local
4361 const struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_fill_info()
4363 .low = htons(vxlan->cfg.port_min), in vxlan_fill_info()
4364 .high = htons(vxlan->cfg.port_max), in vxlan_fill_info()
4387 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { in vxlan_fill_info()
4388 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { in vxlan_fill_info()
4390 vxlan->cfg.saddr.sin.sin_addr.s_addr)) in vxlan_fill_info()
4395 &vxlan->cfg.saddr.sin6.sin6_addr)) in vxlan_fill_info()
4401 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || in vxlan_fill_info()
4403 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || in vxlan_fill_info()
4404 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || in vxlan_fill_info()
4405 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || in vxlan_fill_info()
4406 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || in vxlan_fill_info()
4408 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || in vxlan_fill_info()
4410 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) || in vxlan_fill_info()
4412 !!(vxlan->cfg.flags & VXLAN_F_RSC)) || in vxlan_fill_info()
4414 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) || in vxlan_fill_info()
4416 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) || in vxlan_fill_info()
4418 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) || in vxlan_fill_info()
4419 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || in vxlan_fill_info()
4420 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || in vxlan_fill_info()
4421 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || in vxlan_fill_info()
4423 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || in vxlan_fill_info()
4425 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || in vxlan_fill_info()
4427 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || in vxlan_fill_info()
4429 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) || in vxlan_fill_info()
4431 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX))) in vxlan_fill_info()
4437 if (vxlan->cfg.flags & VXLAN_F_GBP && in vxlan_fill_info()
4441 if (vxlan->cfg.flags & VXLAN_F_GPE && in vxlan_fill_info()
4445 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL && in vxlan_fill_info()
4457 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_net() local
4459 return vxlan->net; in vxlan_get_link_net()
4514 struct vxlan_dev *vxlan, *next; in vxlan_handle_lowerdev_unregister() local
4517 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_handle_lowerdev_unregister()
4518 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_handle_lowerdev_unregister()
4527 vxlan_dellink(vxlan->dev, &list_kill); in vxlan_handle_lowerdev_unregister()
4557 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_offloaded_set() local
4562 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4564 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4566 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4580 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4587 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_add() local
4592 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_add()
4595 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4596 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, in vxlan_fdb_external_learn_add()
4605 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4614 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_del() local
4619 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4620 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4622 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4626 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr, in vxlan_fdb_external_learn_del()
4634 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4682 struct vxlan_dev *vxlan; in vxlan_fdb_nh_flush() local
4687 vxlan = rcu_dereference(fdb->vdev); in vxlan_fdb_nh_flush()
4688 WARN_ON(!vxlan); in vxlan_fdb_nh_flush()
4689 hash_index = fdb_head_index(vxlan, fdb->eth_addr, in vxlan_fdb_nh_flush()
4690 vxlan->default_dst.remote_vni); in vxlan_fdb_nh_flush()
4691 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4693 vxlan_fdb_destroy(vxlan, fdb, false, false); in vxlan_fdb_nh_flush()
4694 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4736 struct vxlan_dev *vxlan, *next; in vxlan_destroy_tunnels() local
4743 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_destroy_tunnels()
4747 if (!net_eq(dev_net(vxlan->dev), net)) in vxlan_destroy_tunnels()
4748 unregister_netdevice_queue(vxlan->dev, head); in vxlan_destroy_tunnels()