Lines Matching refs:vxlan

63 static int vxlan_sock_add(struct vxlan_dev *vxlan);
65 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
114 if (!node->vxlan) in vxlan_vs_find_vni()
117 if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) { in vxlan_vs_find_vni()
118 vnode = vxlan_vnifilter_lookup(node->vxlan, vni); in vxlan_vs_find_vni()
121 } else if (node->vxlan->default_dst.remote_vni != vni) { in vxlan_vs_find_vni()
126 const struct vxlan_config *cfg = &node->vxlan->cfg; in vxlan_vs_find_vni()
135 return node->vxlan; in vxlan_vs_find_vni()
156 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, in vxlan_fdb_info() argument
198 ndm->ndm_ifindex = vxlan->dev->ifindex; in vxlan_fdb_info()
204 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && in vxlan_fdb_info()
206 peernet2id(dev_net(vxlan->dev), vxlan->net))) in vxlan_fdb_info()
220 rdst->remote_port != vxlan->cfg.dst_port && in vxlan_fdb_info()
223 if (rdst->remote_vni != vxlan->default_dst.remote_vni && in vxlan_fdb_info()
231 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->key.vni && in vxlan_fdb_info()
264 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in __vxlan_fdb_notify() argument
267 struct net *net = dev_net(vxlan->dev); in __vxlan_fdb_notify()
275 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); in __vxlan_fdb_notify()
289 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_notifier_info() argument
295 fdb_info->info.dev = vxlan->dev; in vxlan_fdb_switchdev_notifier_info()
307 static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_call_notifiers() argument
322 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info); in vxlan_fdb_switchdev_call_notifiers()
323 ret = call_switchdev_notifiers(notifier_type, vxlan->dev, in vxlan_fdb_switchdev_call_notifiers()
328 static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_notify() argument
337 err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
343 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
349 __vxlan_fdb_notify(vxlan, fdb, rd, type); in vxlan_fdb_notify()
355 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_ip_miss() local
364 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_ip_miss()
367 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) in vxlan_fdb_miss() argument
376 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_fdb_miss()
380 static struct vxlan_fdb *vxlan_find_mac_rcu(struct vxlan_dev *vxlan, in vxlan_find_mac_rcu() argument
387 if (!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) in vxlan_find_mac_rcu()
388 key.vni = vxlan->default_dst.remote_vni; in vxlan_find_mac_rcu()
392 return rhashtable_lookup(&vxlan->fdb_hash_tbl, &key, in vxlan_find_mac_rcu()
396 static struct vxlan_fdb *vxlan_find_mac_tx(struct vxlan_dev *vxlan, in vxlan_find_mac_tx() argument
401 f = vxlan_find_mac_rcu(vxlan, mac, vni); in vxlan_find_mac_tx()
412 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, in vxlan_find_mac() argument
417 lockdep_assert_held_once(&vxlan->hash_lock); in vxlan_find_mac()
420 f = vxlan_find_mac_rcu(vxlan, mac, vni); in vxlan_find_mac()
447 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_find_uc() local
461 f = vxlan_find_mac_rcu(vxlan, eth_addr, vni); in vxlan_fdb_find_uc()
468 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info); in vxlan_fdb_find_uc()
477 const struct vxlan_dev *vxlan, in vxlan_fdb_notify_one() argument
485 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info); in vxlan_fdb_notify_one()
495 struct vxlan_dev *vxlan; in vxlan_fdb_replay() local
502 vxlan = netdev_priv(dev); in vxlan_fdb_replay()
504 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_replay()
505 hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) { in vxlan_fdb_replay()
508 rc = vxlan_fdb_notify_one(nb, vxlan, f, rdst, in vxlan_fdb_replay()
515 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_replay()
519 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_replay()
526 struct vxlan_dev *vxlan; in vxlan_fdb_clear_offload() local
532 vxlan = netdev_priv(dev); in vxlan_fdb_clear_offload()
534 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_clear_offload()
535 hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) { in vxlan_fdb_clear_offload()
541 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_clear_offload()
770 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_alloc() argument
785 RCU_INIT_POINTER(f->vdev, vxlan); in vxlan_fdb_alloc()
793 static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_nh_update() argument
803 nh = nexthop_find_by_id(vxlan->net, nhid); in vxlan_fdb_nh_update()
825 switch (vxlan->default_dst.remote_ip.sa.sa_family) { in vxlan_fdb_nh_update()
855 int vxlan_fdb_create(struct vxlan_dev *vxlan, in vxlan_fdb_create() argument
866 if (vxlan->cfg.addrmax && in vxlan_fdb_create()
867 vxlan->addrcnt >= vxlan->cfg.addrmax) in vxlan_fdb_create()
870 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_create()
871 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); in vxlan_fdb_create()
876 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_create()
882 rc = rhashtable_lookup_insert_fast(&vxlan->fdb_hash_tbl, &f->rhnode, in vxlan_fdb_create()
887 ++vxlan->addrcnt; in vxlan_fdb_create()
888 hlist_add_head_rcu(&f->fdb_node, &vxlan->fdb_list); in vxlan_fdb_create()
934 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_destroy() argument
939 netdev_dbg(vxlan->dev, "delete %pM\n", f->key.eth_addr); in vxlan_fdb_destroy()
941 --vxlan->addrcnt; in vxlan_fdb_destroy()
944 vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH, in vxlan_fdb_destroy()
948 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, in vxlan_fdb_destroy()
953 rhashtable_remove_fast(&vxlan->fdb_hash_tbl, &f->rhnode, in vxlan_fdb_destroy()
967 static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, in vxlan_fdb_update_existing() argument
1015 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_update_existing()
1046 err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, in vxlan_fdb_update_existing()
1066 static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, in vxlan_fdb_update_create() argument
1083 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_update_create()
1084 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, in vxlan_fdb_update_create()
1089 rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, in vxlan_fdb_update_create()
1097 vxlan_fdb_destroy(vxlan, f, false, false); in vxlan_fdb_update_create()
1102 int vxlan_fdb_update(struct vxlan_dev *vxlan, in vxlan_fdb_update() argument
1112 f = vxlan_find_mac(vxlan, mac, src_vni); in vxlan_fdb_update()
1115 netdev_dbg(vxlan->dev, in vxlan_fdb_update()
1120 return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, in vxlan_fdb_update()
1127 return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, in vxlan_fdb_update()
1134 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_dst_destroy() argument
1138 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL); in vxlan_fdb_dst_destroy()
1142 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, in vxlan_fdb_parse() argument
1147 struct net *net = dev_net(vxlan->dev); in vxlan_fdb_parse()
1163 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; in vxlan_fdb_parse()
1183 *port = vxlan->cfg.dst_port; in vxlan_fdb_parse()
1193 *vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1203 *src_vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1234 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_add() local
1251 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_add()
1256 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) in vxlan_fdb_add()
1259 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_add()
1260 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, in vxlan_fdb_add()
1264 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_add()
1272 int __vxlan_fdb_delete(struct vxlan_dev *vxlan, in __vxlan_fdb_delete() argument
1281 f = vxlan_find_mac(vxlan, addr, src_vni); in __vxlan_fdb_delete()
1295 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify); in __vxlan_fdb_delete()
1299 vxlan_fdb_destroy(vxlan, f, true, swdev_notify); in __vxlan_fdb_delete()
1311 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_delete() local
1318 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_delete()
1323 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_delete()
1324 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, in vxlan_fdb_delete()
1326 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_delete()
1340 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_dump() local
1345 hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) { in vxlan_fdb_dump()
1351 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1368 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1392 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_get() local
1400 vni = vxlan->default_dst.remote_vni; in vxlan_fdb_get()
1404 f = vxlan_find_mac_rcu(vxlan, addr, vni); in vxlan_fdb_get()
1411 err = vxlan_fdb_info(skb, vxlan, f, portid, seq, in vxlan_fdb_get()
1426 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_snoop() local
1440 f = vxlan_find_mac_rcu(vxlan, src_mac, vni); in vxlan_snoop()
1466 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); in vxlan_snoop()
1469 spin_lock(&vxlan->hash_lock); in vxlan_snoop()
1473 vxlan_fdb_update(vxlan, src_mac, src_ip, in vxlan_snoop()
1476 vxlan->cfg.dst_port, in vxlan_snoop()
1478 vxlan->default_dst.remote_vni, in vxlan_snoop()
1480 spin_unlock(&vxlan->hash_lock); in vxlan_snoop()
1504 static void vxlan_sock_release(struct vxlan_dev *vxlan) in vxlan_sock_release() argument
1506 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_sock_release()
1508 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_sock_release()
1510 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_release()
1513 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_release()
1516 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) in vxlan_sock_release()
1517 vxlan_vs_del_vnigrp(vxlan); in vxlan_sock_release()
1519 vxlan_vs_del_dev(vxlan); in vxlan_sock_release()
1586 static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan, in vxlan_set_mac() argument
1594 skb->protocol = eth_type_trans(skb, vxlan->dev); in vxlan_set_mac()
1598 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) in vxlan_set_mac()
1612 if (!(vxlan->cfg.flags & VXLAN_F_LEARN)) in vxlan_set_mac()
1647 struct vxlan_dev *vxlan; in vxlan_rcv() local
1679 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode); in vxlan_rcv()
1680 if (!vxlan) { in vxlan_rcv()
1685 if (vh->vx_flags & vxlan->cfg.reserved_bits.vx_flags || in vxlan_rcv()
1686 vh->vx_vni & vxlan->cfg.reserved_bits.vx_vni) { in vxlan_rcv()
1696 DEV_STATS_INC(vxlan->dev, rx_frame_errors); in vxlan_rcv()
1697 DEV_STATS_INC(vxlan->dev, rx_errors); in vxlan_rcv()
1698 vxlan_vnifilter_count(vxlan, vni, vninode, in vxlan_rcv()
1703 if (vxlan->cfg.flags & VXLAN_F_GPE) { in vxlan_rcv()
1710 !net_eq(vxlan->net, dev_net(vxlan->dev)))) { in vxlan_rcv()
1715 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_RX) { in vxlan_rcv()
1716 reason = vxlan_remcsum(skb, vxlan->cfg.flags); in vxlan_rcv()
1741 if (vxlan->cfg.flags & VXLAN_F_GBP) in vxlan_rcv()
1742 vxlan_parse_gbp_hdr(skb, vxlan->cfg.flags, md); in vxlan_rcv()
1748 reason = vxlan_set_mac(vxlan, vs, skb, vni); in vxlan_rcv()
1753 skb->dev = vxlan->dev; in vxlan_rcv()
1767 DEV_STATS_INC(vxlan->dev, rx_length_errors); in vxlan_rcv()
1768 DEV_STATS_INC(vxlan->dev, rx_errors); in vxlan_rcv()
1769 vxlan_vnifilter_count(vxlan, vni, vninode, in vxlan_rcv()
1779 DEV_STATS_INC(vxlan->dev, rx_frame_errors); in vxlan_rcv()
1780 DEV_STATS_INC(vxlan->dev, rx_errors); in vxlan_rcv()
1781 vxlan_vnifilter_count(vxlan, vni, vninode, in vxlan_rcv()
1788 if (unlikely(!(vxlan->dev->flags & IFF_UP))) { in vxlan_rcv()
1790 dev_dstats_rx_dropped(vxlan->dev); in vxlan_rcv()
1791 vxlan_vnifilter_count(vxlan, vni, vninode, in vxlan_rcv()
1797 dev_dstats_rx_add(vxlan->dev, skb->len); in vxlan_rcv()
1798 vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len); in vxlan_rcv()
1799 gro_cells_receive(&vxlan->gro_cells, skb); in vxlan_rcv()
1814 struct vxlan_dev *vxlan; in vxlan_err_lookup() local
1832 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL); in vxlan_err_lookup()
1833 if (!vxlan) in vxlan_err_lookup()
1841 struct vxlan_dev *vxlan = netdev_priv(dev); in arp_reduce() local
1852 vxlan_vnifilter_count(vxlan, vni, NULL, in arp_reduce()
1889 f = vxlan_find_mac_tx(vxlan, n->ha, vni); in arp_reduce()
1913 vxlan_vnifilter_count(vxlan, vni, NULL, in arp_reduce()
1917 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in arp_reduce()
2024 struct vxlan_dev *vxlan = netdev_priv(dev); in neigh_reduce() local
2055 f = vxlan_find_mac_tx(vxlan, n->ha, vni); in neigh_reduce()
2072 vxlan_vnifilter_count(vxlan, vni, NULL, in neigh_reduce()
2075 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in neigh_reduce()
2093 struct vxlan_dev *vxlan = netdev_priv(dev); in route_shortcircuit() local
2109 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2130 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2290 struct vxlan_dev *vxlan, in encap_bypass_if_local() argument
2306 vxlan->cfg.flags & VXLAN_F_LOCALBYPASS) { in encap_bypass_if_local()
2310 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, in encap_bypass_if_local()
2312 vxlan->cfg.flags); in encap_bypass_if_local()
2315 vxlan_vnifilter_count(vxlan, vni, NULL, in encap_bypass_if_local()
2321 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true); in encap_bypass_if_local()
2335 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit_one() local
2346 u32 flags = vxlan->cfg.flags; in vxlan_xmit_one()
2349 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); in vxlan_xmit_one()
2372 vxlan_encap_bypass(skb, vxlan, vxlan, in vxlan_xmit_one()
2379 addr_family = vxlan->cfg.saddr.sa.sa_family; in vxlan_xmit_one()
2380 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; in vxlan_xmit_one()
2385 key.u.ipv4.src = vxlan->cfg.saddr.sin.sin_addr.s_addr; in vxlan_xmit_one()
2388 key.u.ipv6.src = vxlan->cfg.saddr.sin6.sin6_addr; in vxlan_xmit_one()
2397 ttl = vxlan->cfg.ttl; in vxlan_xmit_one()
2401 tos = vxlan->cfg.tos; in vxlan_xmit_one()
2412 switch (vxlan->cfg.label_policy) { in vxlan_xmit_one()
2414 key.label = vxlan->cfg.label; in vxlan_xmit_one()
2432 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_xmit_one()
2445 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_xmit_one()
2446 vxlan->cfg.port_max, true); in vxlan_xmit_one()
2450 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_xmit_one()
2459 rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, ifindex, in vxlan_xmit_one()
2473 err = encap_bypass_if_local(skb, dev, vxlan, AF_INET, in vxlan_xmit_one()
2479 if (vxlan->cfg.df == VXLAN_DF_SET) { in vxlan_xmit_one()
2481 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { in vxlan_xmit_one()
2510 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2530 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_xmit_one()
2537 ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, in vxlan_xmit_one()
2554 err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6, in vxlan_xmit_one()
2578 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2599 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len); in vxlan_xmit_one()
2606 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); in vxlan_xmit_one()
2618 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); in vxlan_xmit_one()
2655 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit_nhid() local
2673 if (vxlan->cfg.saddr.sa.sa_family != nh_rdst.remote_ip.sa.sa_family) in vxlan_xmit_nhid()
2699 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit() local
2712 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in vxlan_xmit()
2726 if (vxlan->cfg.flags & VXLAN_F_PROXY) { in vxlan_xmit()
2747 if (vxlan->cfg.flags & VXLAN_F_MDB) { in vxlan_xmit()
2751 mdb_entry = vxlan_mdb_entry_skb_get(vxlan, skb, vni); in vxlan_xmit()
2755 ret = vxlan_mdb_xmit(vxlan, mdb_entry, skb); in vxlan_xmit()
2764 f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni); in vxlan_xmit()
2767 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) && in vxlan_xmit()
2772 f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni); in vxlan_xmit()
2776 f = vxlan_find_mac_tx(vxlan, all_zeros_mac, vni); in vxlan_xmit()
2778 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) && in vxlan_xmit()
2780 vxlan_fdb_miss(vxlan, eth->h_dest); in vxlan_xmit()
2783 vxlan_vnifilter_count(vxlan, vni, NULL, in vxlan_xmit()
2792 (vni ? : vxlan->default_dst.remote_vni), did_rsc); in vxlan_xmit()
2819 struct vxlan_dev *vxlan = timer_container_of(vxlan, t, age_timer); in vxlan_cleanup() local
2823 if (!netif_running(vxlan->dev)) in vxlan_cleanup()
2827 hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) { in vxlan_cleanup()
2836 timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ; in vxlan_cleanup()
2838 spin_lock(&vxlan->hash_lock); in vxlan_cleanup()
2840 netdev_dbg(vxlan->dev, "garbage collect %pM\n", in vxlan_cleanup()
2843 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_cleanup()
2845 spin_unlock(&vxlan->hash_lock); in vxlan_cleanup()
2852 mod_timer(&vxlan->age_timer, next_timer); in vxlan_cleanup()
2855 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) in vxlan_vs_del_dev() argument
2859 hlist_del_init_rcu(&vxlan->hlist4.hlist); in vxlan_vs_del_dev()
2861 hlist_del_init_rcu(&vxlan->hlist6.hlist); in vxlan_vs_del_dev()
2865 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, in vxlan_vs_add_dev() argument
2868 __be32 vni = vxlan->default_dst.remote_vni; in vxlan_vs_add_dev()
2872 node->vxlan = vxlan; in vxlan_vs_add_dev()
2879 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_init() local
2882 err = rhashtable_init(&vxlan->fdb_hash_tbl, &vxlan_fdb_rht_params); in vxlan_init()
2886 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) { in vxlan_init()
2887 err = vxlan_vnigroup_init(vxlan); in vxlan_init()
2892 err = gro_cells_init(&vxlan->gro_cells, dev); in vxlan_init()
2896 err = vxlan_mdb_init(vxlan); in vxlan_init()
2904 gro_cells_destroy(&vxlan->gro_cells); in vxlan_init()
2906 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) in vxlan_init()
2907 vxlan_vnigroup_uninit(vxlan); in vxlan_init()
2909 rhashtable_destroy(&vxlan->fdb_hash_tbl); in vxlan_init()
2915 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_uninit() local
2917 vxlan_mdb_fini(vxlan); in vxlan_uninit()
2919 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) in vxlan_uninit()
2920 vxlan_vnigroup_uninit(vxlan); in vxlan_uninit()
2922 gro_cells_destroy(&vxlan->gro_cells); in vxlan_uninit()
2924 rhashtable_destroy(&vxlan->fdb_hash_tbl); in vxlan_uninit()
2930 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_open() local
2933 ret = vxlan_sock_add(vxlan); in vxlan_open()
2937 ret = vxlan_multicast_join(vxlan); in vxlan_open()
2939 vxlan_sock_release(vxlan); in vxlan_open()
2943 if (vxlan->cfg.age_interval) in vxlan_open()
2944 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); in vxlan_open()
2963 const struct vxlan_dev *vxlan) in vxlan_fdb_is_default_entry() argument
2966 f->key.vni == vxlan->cfg.vni; in vxlan_fdb_is_default_entry()
2977 const struct vxlan_dev *vxlan, in vxlan_fdb_flush_matches() argument
2986 if (desc->ignore_default_entry && vxlan_fdb_is_default_entry(f, vxlan)) in vxlan_fdb_flush_matches()
3022 vxlan_fdb_flush_match_remotes(struct vxlan_fdb *f, struct vxlan_dev *vxlan, in vxlan_fdb_flush_match_remotes() argument
3033 vxlan_fdb_dst_destroy(vxlan, f, rd, true); in vxlan_fdb_flush_match_remotes()
3041 static void vxlan_flush(struct vxlan_dev *vxlan, in vxlan_flush() argument
3048 hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) { in vxlan_flush()
3049 if (!vxlan_fdb_flush_matches(f, vxlan, desc)) in vxlan_flush()
3052 spin_lock_bh(&vxlan->hash_lock); in vxlan_flush()
3059 vxlan_fdb_flush_match_remotes(f, vxlan, desc, in vxlan_flush()
3066 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_flush()
3068 spin_unlock_bh(&vxlan->hash_lock); in vxlan_flush()
3092 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_delete_bulk() local
3148 vxlan_flush(vxlan, &desc); in vxlan_fdb_delete_bulk()
3156 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_stop() local
3164 vxlan_multicast_leave(vxlan); in vxlan_stop()
3166 timer_delete_sync(&vxlan->age_timer); in vxlan_stop()
3168 vxlan_flush(vxlan, &desc); in vxlan_stop()
3169 vxlan_sock_release(vxlan); in vxlan_stop()
3181 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_change_mtu() local
3182 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_change_mtu()
3183 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_change_mtu()
3190 int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags); in vxlan_change_mtu()
3201 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_metadata_dst() local
3205 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_fill_metadata_dst()
3206 vxlan->cfg.port_max, true); in vxlan_fill_metadata_dst()
3207 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_fill_metadata_dst()
3210 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_fill_metadata_dst()
3216 rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, 0, in vxlan_fill_metadata_dst()
3226 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_fill_metadata_dst()
3232 ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, in vxlan_fill_metadata_dst()
3320 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_setup() local
3346 INIT_LIST_HEAD(&vxlan->next); in vxlan_setup()
3347 spin_lock_init(&vxlan->hash_lock); in vxlan_setup()
3349 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); in vxlan_setup()
3351 vxlan->dev = dev; in vxlan_setup()
3353 INIT_HLIST_HEAD(&vxlan->fdb_list); in vxlan_setup()
3488 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_ksettings() local
3489 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_get_link_ksettings()
3490 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_get_link_ksettings()
3595 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) in __vxlan_sock_add() argument
3597 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; in __vxlan_sock_add()
3604 if (vxlan->cfg.remote_ifindex) in __vxlan_sock_add()
3606 vxlan->net, vxlan->cfg.remote_ifindex); in __vxlan_sock_add()
3608 if (!vxlan->cfg.no_share) { in __vxlan_sock_add()
3610 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, in __vxlan_sock_add()
3611 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3620 vs = vxlan_socket_create(vxlan->net, ipv6, in __vxlan_sock_add()
3621 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3627 rcu_assign_pointer(vxlan->vn6_sock, vs); in __vxlan_sock_add()
3628 node = &vxlan->hlist6; in __vxlan_sock_add()
3632 rcu_assign_pointer(vxlan->vn4_sock, vs); in __vxlan_sock_add()
3633 node = &vxlan->hlist4; in __vxlan_sock_add()
3636 if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER)) in __vxlan_sock_add()
3637 vxlan_vs_add_vnigrp(vxlan, vs, ipv6); in __vxlan_sock_add()
3639 vxlan_vs_add_dev(vs, vxlan, node); in __vxlan_sock_add()
3644 static int vxlan_sock_add(struct vxlan_dev *vxlan) in vxlan_sock_add() argument
3646 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; in vxlan_sock_add()
3647 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata; in vxlan_sock_add()
3651 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_add()
3653 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_add()
3655 ret = __vxlan_sock_add(vxlan, true); in vxlan_sock_add()
3661 ret = __vxlan_sock_add(vxlan, false); in vxlan_sock_add()
3663 vxlan_sock_release(vxlan); in vxlan_sock_add()
3667 int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan, in vxlan_vni_in_use() argument
3674 if (tmp == vxlan) in vxlan_vni_in_use()
3856 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_config_apply() local
3857 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_config_apply()
3871 vxlan->net = src_net; in vxlan_config_apply()
3904 memcpy(&vxlan->cfg, conf, sizeof(*conf)); in vxlan_config_apply()
3911 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dev_configure() local
3915 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); in vxlan_dev_configure()
3929 struct vxlan_dev *vxlan = netdev_priv(dev); in __vxlan_dev_create() local
3934 dst = &vxlan->default_dst; in __vxlan_dev_create()
3965 spin_lock_bh(&vxlan->hash_lock); in __vxlan_dev_create()
3966 err = vxlan_fdb_update(vxlan, all_zeros_mac, in __vxlan_dev_create()
3970 vxlan->cfg.dst_port, in __vxlan_dev_create()
3975 spin_unlock_bh(&vxlan->hash_lock); in __vxlan_dev_create()
3980 list_add(&vxlan->next, &vn->vxlan_list); in __vxlan_dev_create()
4028 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_nl2conf() local
4035 memcpy(conf, &vxlan->cfg, sizeof(*conf)); in vxlan_nl2conf()
4381 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_changelink() local
4388 dst = &vxlan->default_dst; in vxlan_changelink()
4393 err = vxlan_config_validate(vxlan->net, &conf, &lowerdev, in vxlan_changelink()
4394 vxlan, extack); in vxlan_changelink()
4407 change_igmp = vxlan->dev->flags & IFF_UP && in vxlan_changelink()
4413 spin_lock_bh(&vxlan->hash_lock); in vxlan_changelink()
4415 err = vxlan_fdb_update(vxlan, all_zeros_mac, in vxlan_changelink()
4419 vxlan->cfg.dst_port, in vxlan_changelink()
4424 spin_unlock_bh(&vxlan->hash_lock); in vxlan_changelink()
4431 __vxlan_fdb_delete(vxlan, all_zeros_mac, in vxlan_changelink()
4433 vxlan->cfg.dst_port, in vxlan_changelink()
4438 spin_unlock_bh(&vxlan->hash_lock); in vxlan_changelink()
4443 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) { in vxlan_changelink()
4444 err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip, in vxlan_changelink()
4455 err = vxlan_multicast_leave(vxlan); in vxlan_changelink()
4457 if (conf.age_interval != vxlan->cfg.age_interval) in vxlan_changelink()
4458 mod_timer(&vxlan->age_timer, jiffies); in vxlan_changelink()
4463 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); in vxlan_changelink()
4467 err = vxlan_multicast_join(vxlan); in vxlan_changelink()
4474 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dellink() local
4477 vxlan_flush(vxlan, &desc); in vxlan_dellink()
4479 list_del(&vxlan->next); in vxlan_dellink()
4481 if (vxlan->default_dst.remote_dev) in vxlan_dellink()
4482 netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev); in vxlan_dellink()
4525 const struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_info() local
4526 const struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_fill_info()
4528 .low = htons(vxlan->cfg.port_min), in vxlan_fill_info()
4529 .high = htons(vxlan->cfg.port_max), in vxlan_fill_info()
4552 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { in vxlan_fill_info()
4553 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { in vxlan_fill_info()
4555 vxlan->cfg.saddr.sin.sin_addr.s_addr)) in vxlan_fill_info()
4560 &vxlan->cfg.saddr.sin6.sin6_addr)) in vxlan_fill_info()
4566 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || in vxlan_fill_info()
4568 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || in vxlan_fill_info()
4569 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || in vxlan_fill_info()
4570 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || in vxlan_fill_info()
4571 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || in vxlan_fill_info()
4572 nla_put_u32(skb, IFLA_VXLAN_LABEL_POLICY, vxlan->cfg.label_policy) || in vxlan_fill_info()
4574 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || in vxlan_fill_info()
4576 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) || in vxlan_fill_info()
4578 !!(vxlan->cfg.flags & VXLAN_F_RSC)) || in vxlan_fill_info()
4580 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) || in vxlan_fill_info()
4582 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) || in vxlan_fill_info()
4584 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) || in vxlan_fill_info()
4585 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || in vxlan_fill_info()
4586 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || in vxlan_fill_info()
4587 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || in vxlan_fill_info()
4589 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || in vxlan_fill_info()
4591 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || in vxlan_fill_info()
4593 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || in vxlan_fill_info()
4595 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) || in vxlan_fill_info()
4597 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)) || in vxlan_fill_info()
4599 !!(vxlan->cfg.flags & VXLAN_F_LOCALBYPASS))) in vxlan_fill_info()
4605 if (vxlan->cfg.flags & VXLAN_F_GBP && in vxlan_fill_info()
4609 if (vxlan->cfg.flags & VXLAN_F_GPE && in vxlan_fill_info()
4613 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL && in vxlan_fill_info()
4617 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER && in vxlan_fill_info()
4619 !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))) in vxlan_fill_info()
4623 sizeof(vxlan->cfg.reserved_bits), in vxlan_fill_info()
4624 &vxlan->cfg.reserved_bits)) in vxlan_fill_info()
4635 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_net() local
4637 return READ_ONCE(vxlan->net); in vxlan_get_link_net()
4692 struct vxlan_dev *vxlan, *next; in vxlan_handle_lowerdev_unregister() local
4695 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_handle_lowerdev_unregister()
4696 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_handle_lowerdev_unregister()
4705 vxlan_dellink(vxlan->dev, &list_kill); in vxlan_handle_lowerdev_unregister()
4735 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_offloaded_set() local
4739 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_offloaded_set()
4741 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4755 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_offloaded_set()
4762 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_add() local
4768 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_external_learn_add()
4769 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, in vxlan_fdb_external_learn_add()
4778 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_external_learn_add()
4787 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_del() local
4791 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_external_learn_del()
4793 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4797 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr, in vxlan_fdb_external_learn_del()
4805 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_external_learn_del()
4853 struct vxlan_dev *vxlan; in vxlan_fdb_nh_flush() local
4857 vxlan = rcu_dereference(fdb->vdev); in vxlan_fdb_nh_flush()
4858 WARN_ON(!vxlan); in vxlan_fdb_nh_flush()
4859 spin_lock_bh(&vxlan->hash_lock); in vxlan_fdb_nh_flush()
4861 vxlan_fdb_destroy(vxlan, fdb, false, false); in vxlan_fdb_nh_flush()
4862 spin_unlock_bh(&vxlan->hash_lock); in vxlan_fdb_nh_flush()
4903 struct vxlan_dev *vxlan, *next; in vxlan_destroy_tunnels() local
4905 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) in vxlan_destroy_tunnels()
4906 vxlan_dellink(vxlan->dev, dev_to_kill); in vxlan_destroy_tunnels()