Lines Matching refs:vxlan

60 static int vxlan_sock_add(struct vxlan_dev *vxlan);
62 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
224 if (node->vxlan->default_dst.remote_vni != vni) in vxlan_vs_find_vni()
228 const struct vxlan_config *cfg = &node->vxlan->cfg; in vxlan_vs_find_vni()
235 return node->vxlan; in vxlan_vs_find_vni()
256 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, in vxlan_fdb_info() argument
298 ndm->ndm_ifindex = vxlan->dev->ifindex; in vxlan_fdb_info()
304 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && in vxlan_fdb_info()
306 peernet2id(dev_net(vxlan->dev), vxlan->net))) in vxlan_fdb_info()
320 rdst->remote_port != vxlan->cfg.dst_port && in vxlan_fdb_info()
323 if (rdst->remote_vni != vxlan->default_dst.remote_vni && in vxlan_fdb_info()
331 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && in vxlan_fdb_info()
364 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in __vxlan_fdb_notify() argument
367 struct net *net = dev_net(vxlan->dev); in __vxlan_fdb_notify()
375 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); in __vxlan_fdb_notify()
390 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_notifier_info() argument
396 fdb_info->info.dev = vxlan->dev; in vxlan_fdb_switchdev_notifier_info()
408 static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_call_notifiers() argument
423 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info); in vxlan_fdb_switchdev_call_notifiers()
424 ret = call_switchdev_notifiers(notifier_type, vxlan->dev, in vxlan_fdb_switchdev_call_notifiers()
429 static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_notify() argument
438 err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
444 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
450 __vxlan_fdb_notify(vxlan, fdb, rd, type); in vxlan_fdb_notify()
456 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_ip_miss() local
465 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_ip_miss()
468 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) in vxlan_fdb_miss() argument
477 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_fdb_miss()
502 static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni) in fdb_head_index() argument
504 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) in fdb_head_index()
511 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, in vxlan_fdb_head() argument
514 return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)]; in vxlan_fdb_head()
518 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, in __vxlan_find_mac() argument
521 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); in __vxlan_find_mac()
526 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in __vxlan_find_mac()
538 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, in vxlan_find_mac() argument
543 f = __vxlan_find_mac(vxlan, mac, vni); in vxlan_find_mac()
571 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_find_uc() local
585 f = __vxlan_find_mac(vxlan, eth_addr, vni); in vxlan_fdb_find_uc()
592 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info); in vxlan_fdb_find_uc()
601 const struct vxlan_dev *vxlan, in vxlan_fdb_notify_one() argument
609 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info); in vxlan_fdb_notify_one()
619 struct vxlan_dev *vxlan; in vxlan_fdb_replay() local
627 vxlan = netdev_priv(dev); in vxlan_fdb_replay()
630 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
631 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_replay()
634 rc = vxlan_fdb_notify_one(nb, vxlan, in vxlan_fdb_replay()
642 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
647 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
654 struct vxlan_dev *vxlan; in vxlan_fdb_clear_offload() local
661 vxlan = netdev_priv(dev); in vxlan_fdb_clear_offload()
664 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
665 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) in vxlan_fdb_clear_offload()
669 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
827 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_alloc() argument
841 RCU_INIT_POINTER(f->vdev, vxlan); in vxlan_fdb_alloc()
849 static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_insert() argument
852 ++vxlan->addrcnt; in vxlan_fdb_insert()
854 vxlan_fdb_head(vxlan, mac, src_vni)); in vxlan_fdb_insert()
857 static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_nh_update() argument
867 nh = nexthop_find_by_id(vxlan->net, nhid); in vxlan_fdb_nh_update()
890 switch (vxlan->default_dst.remote_ip.sa.sa_family) { in vxlan_fdb_nh_update()
921 static int vxlan_fdb_create(struct vxlan_dev *vxlan, in vxlan_fdb_create() argument
932 if (vxlan->cfg.addrmax && in vxlan_fdb_create()
933 vxlan->addrcnt >= vxlan->cfg.addrmax) in vxlan_fdb_create()
936 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_create()
937 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); in vxlan_fdb_create()
942 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_create()
983 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_destroy() argument
988 netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); in vxlan_fdb_destroy()
990 --vxlan->addrcnt; in vxlan_fdb_destroy()
993 vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH, in vxlan_fdb_destroy()
997 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, in vxlan_fdb_destroy()
1014 static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, in vxlan_fdb_update_existing() argument
1064 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_update_existing()
1094 err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, in vxlan_fdb_update_existing()
1114 static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, in vxlan_fdb_update_create() argument
1131 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_update_create()
1132 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, in vxlan_fdb_update_create()
1137 vxlan_fdb_insert(vxlan, mac, src_vni, f); in vxlan_fdb_update_create()
1138 rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, in vxlan_fdb_update_create()
1146 vxlan_fdb_destroy(vxlan, f, false, false); in vxlan_fdb_update_create()
1151 static int vxlan_fdb_update(struct vxlan_dev *vxlan, in vxlan_fdb_update() argument
1161 f = __vxlan_find_mac(vxlan, mac, src_vni); in vxlan_fdb_update()
1164 netdev_dbg(vxlan->dev, in vxlan_fdb_update()
1169 return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, in vxlan_fdb_update()
1176 return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, in vxlan_fdb_update()
1183 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_dst_destroy() argument
1187 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL); in vxlan_fdb_dst_destroy()
1191 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, in vxlan_fdb_parse() argument
1195 struct net *net = dev_net(vxlan->dev); in vxlan_fdb_parse()
1207 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; in vxlan_fdb_parse()
1225 *port = vxlan->cfg.dst_port; in vxlan_fdb_parse()
1233 *vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1241 *src_vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1271 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_add() local
1289 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_add()
1294 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) in vxlan_fdb_add()
1297 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_add()
1298 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1299 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, in vxlan_fdb_add()
1303 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1308 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, in __vxlan_fdb_delete() argument
1317 f = vxlan_find_mac(vxlan, addr, src_vni); in __vxlan_fdb_delete()
1331 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify); in __vxlan_fdb_delete()
1335 vxlan_fdb_destroy(vxlan, f, true, swdev_notify); in __vxlan_fdb_delete()
1346 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_delete() local
1354 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_delete()
1359 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_delete()
1360 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1361 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, in vxlan_fdb_delete()
1363 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1373 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_dump() local
1381 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_dump()
1387 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1405 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1431 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_get() local
1439 vni = vxlan->default_dst.remote_vni; in vxlan_fdb_get()
1443 f = __vxlan_find_mac(vxlan, addr, vni); in vxlan_fdb_get()
1450 err = vxlan_fdb_info(skb, vxlan, f, portid, seq, in vxlan_fdb_get()
1465 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_snoop() local
1475 f = vxlan_find_mac(vxlan, src_mac, vni); in vxlan_snoop()
1498 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); in vxlan_snoop()
1500 u32 hash_index = fdb_head_index(vxlan, src_mac, vni); in vxlan_snoop()
1503 spin_lock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1507 vxlan_fdb_update(vxlan, src_mac, src_ip, in vxlan_snoop()
1510 vxlan->cfg.dst_port, in vxlan_snoop()
1512 vxlan->default_dst.remote_vni, in vxlan_snoop()
1514 spin_unlock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1523 struct vxlan_dev *vxlan; in vxlan_group_used() local
1543 list_for_each_entry(vxlan, &vn->vxlan_list, next) { in vxlan_group_used()
1544 if (!netif_running(vxlan->dev) || vxlan == dev) in vxlan_group_used()
1548 rtnl_dereference(vxlan->vn4_sock) != sock4) in vxlan_group_used()
1552 rtnl_dereference(vxlan->vn6_sock) != sock6) in vxlan_group_used()
1556 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, in vxlan_group_used()
1560 if (vxlan->default_dst.remote_ifindex != in vxlan_group_used()
1591 static void vxlan_sock_release(struct vxlan_dev *vxlan) in vxlan_sock_release() argument
1593 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_sock_release()
1595 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_sock_release()
1597 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_release()
1600 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_release()
1603 vxlan_vs_del_dev(vxlan); in vxlan_sock_release()
1621 static int vxlan_igmp_join(struct vxlan_dev *vxlan) in vxlan_igmp_join() argument
1624 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; in vxlan_igmp_join()
1625 int ifindex = vxlan->default_dst.remote_ifindex; in vxlan_igmp_join()
1629 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_igmp_join()
1641 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_igmp_join()
1655 static int vxlan_igmp_leave(struct vxlan_dev *vxlan) in vxlan_igmp_leave() argument
1658 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; in vxlan_igmp_leave()
1659 int ifindex = vxlan->default_dst.remote_ifindex; in vxlan_igmp_leave()
1663 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_igmp_leave()
1675 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_igmp_leave()
1769 static bool vxlan_set_mac(struct vxlan_dev *vxlan, in vxlan_set_mac() argument
1777 skb->protocol = eth_type_trans(skb, vxlan->dev); in vxlan_set_mac()
1781 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) in vxlan_set_mac()
1795 if ((vxlan->cfg.flags & VXLAN_F_LEARN) && in vxlan_set_mac()
1829 struct vxlan_dev *vxlan; in vxlan_rcv() local
1861 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); in vxlan_rcv()
1862 if (!vxlan) in vxlan_rcv()
1875 !net_eq(vxlan->net, dev_net(vxlan->dev)))) in vxlan_rcv()
1917 if (!vxlan_set_mac(vxlan, vs, skb, vni)) in vxlan_rcv()
1921 skb->dev = vxlan->dev; in vxlan_rcv()
1929 ++vxlan->dev->stats.rx_frame_errors; in vxlan_rcv()
1930 ++vxlan->dev->stats.rx_errors; in vxlan_rcv()
1936 if (unlikely(!(vxlan->dev->flags & IFF_UP))) { in vxlan_rcv()
1938 atomic_long_inc(&vxlan->dev->rx_dropped); in vxlan_rcv()
1942 dev_sw_netstats_rx_add(vxlan->dev, skb->len); in vxlan_rcv()
1943 gro_cells_receive(&vxlan->gro_cells, skb); in vxlan_rcv()
1958 struct vxlan_dev *vxlan; in vxlan_err_lookup() local
1976 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); in vxlan_err_lookup()
1977 if (!vxlan) in vxlan_err_lookup()
1985 struct vxlan_dev *vxlan = netdev_priv(dev); in arp_reduce() local
2030 f = vxlan_find_mac(vxlan, n->ha, vni); in arp_reduce()
2052 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in arp_reduce()
2159 struct vxlan_dev *vxlan = netdev_priv(dev); in neigh_reduce() local
2190 f = vxlan_find_mac(vxlan, n->ha, vni); in neigh_reduce()
2208 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in neigh_reduce()
2226 struct vxlan_dev *vxlan = netdev_priv(dev); in route_shortcircuit() local
2242 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2263 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2392 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, in vxlan_get_route() argument
2424 rt = ip_route_output_key(vxlan->net, &fl4); in vxlan_get_route()
2443 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, in vxlan6_get_route() argument
2479 ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, in vxlan6_get_route()
2555 struct vxlan_dev *vxlan, in encap_bypass_if_local() argument
2574 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, in encap_bypass_if_local()
2576 vxlan->cfg.flags); in encap_bypass_if_local()
2583 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true); in encap_bypass_if_local()
2596 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit_one() local
2608 u32 flags = vxlan->cfg.flags; in vxlan_xmit_one()
2610 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); in vxlan_xmit_one()
2619 vxlan_encap_bypass(skb, vxlan, vxlan, in vxlan_xmit_one()
2626 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; in vxlan_xmit_one()
2629 local_ip = vxlan->cfg.saddr; in vxlan_xmit_one()
2635 ttl = vxlan->cfg.ttl; in vxlan_xmit_one()
2640 tos = vxlan->cfg.tos; in vxlan_xmit_one()
2648 label = vxlan->cfg.label; in vxlan_xmit_one()
2664 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_xmit_one()
2678 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_xmit_one()
2679 vxlan->cfg.port_max, true); in vxlan_xmit_one()
2683 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_xmit_one()
2690 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, in vxlan_xmit_one()
2702 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2708 if (vxlan->cfg.df == VXLAN_DF_SET) { in vxlan_xmit_one()
2710 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { in vxlan_xmit_one()
2741 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2758 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_xmit_one()
2763 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, in vxlan_xmit_one()
2777 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2803 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2882 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit() local
2894 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in vxlan_xmit()
2907 if (vxlan->cfg.flags & VXLAN_F_PROXY) { in vxlan_xmit()
2926 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2929 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) && in vxlan_xmit()
2934 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2938 f = vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_xmit()
2940 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) && in vxlan_xmit()
2942 vxlan_fdb_miss(vxlan, eth->h_dest); in vxlan_xmit()
2952 (vni ? : vxlan->default_dst.remote_vni), did_rsc); in vxlan_xmit()
2977 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer); in vxlan_cleanup() local
2981 if (!netif_running(vxlan->dev)) in vxlan_cleanup()
2987 spin_lock(&vxlan->hash_lock[h]); in vxlan_cleanup()
2988 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_cleanup()
2999 timeout = f->used + vxlan->cfg.age_interval * HZ; in vxlan_cleanup()
3001 netdev_dbg(vxlan->dev, in vxlan_cleanup()
3005 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_cleanup()
3009 spin_unlock(&vxlan->hash_lock[h]); in vxlan_cleanup()
3012 mod_timer(&vxlan->age_timer, next_timer); in vxlan_cleanup()
3015 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) in vxlan_vs_del_dev() argument
3017 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_del_dev()
3020 hlist_del_init_rcu(&vxlan->hlist4.hlist); in vxlan_vs_del_dev()
3022 hlist_del_init_rcu(&vxlan->hlist6.hlist); in vxlan_vs_del_dev()
3027 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, in vxlan_vs_add_dev() argument
3030 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_add_dev()
3031 __be32 vni = vxlan->default_dst.remote_vni; in vxlan_vs_add_dev()
3033 node->vxlan = vxlan; in vxlan_vs_add_dev()
3042 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_init() local
3049 err = gro_cells_init(&vxlan->gro_cells, dev); in vxlan_init()
3058 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) in vxlan_fdb_delete_default() argument
3061 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3063 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3064 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3066 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_fdb_delete_default()
3067 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3072 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_uninit() local
3074 gro_cells_destroy(&vxlan->gro_cells); in vxlan_uninit()
3076 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); in vxlan_uninit()
3084 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_open() local
3087 ret = vxlan_sock_add(vxlan); in vxlan_open()
3091 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { in vxlan_open()
3092 ret = vxlan_igmp_join(vxlan); in vxlan_open()
3096 vxlan_sock_release(vxlan); in vxlan_open()
3101 if (vxlan->cfg.age_interval) in vxlan_open()
3102 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); in vxlan_open()
3108 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) in vxlan_flush() argument
3115 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3116 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_flush()
3123 f->vni == vxlan->cfg.vni) in vxlan_flush()
3125 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_flush()
3127 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3134 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_stop() local
3135 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_stop()
3138 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && in vxlan_stop()
3139 !vxlan_group_used(vn, vxlan)) in vxlan_stop()
3140 ret = vxlan_igmp_leave(vxlan); in vxlan_stop()
3142 del_timer_sync(&vxlan->age_timer); in vxlan_stop()
3144 vxlan_flush(vxlan, false); in vxlan_stop()
3145 vxlan_sock_release(vxlan); in vxlan_stop()
3157 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_change_mtu() local
3158 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_change_mtu()
3159 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_change_mtu()
3161 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6); in vxlan_change_mtu()
3179 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_metadata_dst() local
3183 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_fill_metadata_dst()
3184 vxlan->cfg.port_max, true); in vxlan_fill_metadata_dst()
3185 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_fill_metadata_dst()
3188 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_fill_metadata_dst()
3191 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3200 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_fill_metadata_dst()
3203 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3287 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_setup() local
3311 INIT_LIST_HEAD(&vxlan->next); in vxlan_setup()
3313 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); in vxlan_setup()
3315 vxlan->dev = dev; in vxlan_setup()
3318 spin_lock_init(&vxlan->hash_lock[h]); in vxlan_setup()
3319 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); in vxlan_setup()
3449 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_ksettings() local
3450 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_get_link_ksettings()
3451 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_get_link_ksettings()
3551 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) in __vxlan_sock_add() argument
3553 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in __vxlan_sock_add()
3558 if (vxlan->cfg.remote_ifindex) in __vxlan_sock_add()
3560 vxlan->net, vxlan->cfg.remote_ifindex); in __vxlan_sock_add()
3562 if (!vxlan->cfg.no_share) { in __vxlan_sock_add()
3564 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, in __vxlan_sock_add()
3565 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3574 vs = vxlan_socket_create(vxlan->net, ipv6, in __vxlan_sock_add()
3575 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3581 rcu_assign_pointer(vxlan->vn6_sock, vs); in __vxlan_sock_add()
3582 node = &vxlan->hlist6; in __vxlan_sock_add()
3586 rcu_assign_pointer(vxlan->vn4_sock, vs); in __vxlan_sock_add()
3587 node = &vxlan->hlist4; in __vxlan_sock_add()
3589 vxlan_vs_add_dev(vs, vxlan, node); in __vxlan_sock_add()
3593 static int vxlan_sock_add(struct vxlan_dev *vxlan) in vxlan_sock_add() argument
3595 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; in vxlan_sock_add()
3596 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata; in vxlan_sock_add()
3600 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_add()
3602 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_add()
3604 ret = __vxlan_sock_add(vxlan, true); in vxlan_sock_add()
3610 ret = __vxlan_sock_add(vxlan, false); in vxlan_sock_add()
3612 vxlan_sock_release(vxlan); in vxlan_sock_add()
3784 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_config_apply() local
3785 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_config_apply()
3799 vxlan->net = src_net; in vxlan_config_apply()
3835 memcpy(&vxlan->cfg, conf, sizeof(*conf)); in vxlan_config_apply()
3842 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dev_configure() local
3846 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); in vxlan_dev_configure()
3860 struct vxlan_dev *vxlan = netdev_priv(dev); in __vxlan_dev_create() local
3867 dst = &vxlan->default_dst; in __vxlan_dev_create()
3876 err = vxlan_fdb_create(vxlan, all_zeros_mac, in __vxlan_dev_create()
3879 vxlan->cfg.dst_port, in __vxlan_dev_create()
3910 vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f); in __vxlan_dev_create()
3913 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), in __vxlan_dev_create()
3916 vxlan_fdb_destroy(vxlan, f, false, false); in __vxlan_dev_create()
3923 list_add(&vxlan->next, &vn->vxlan_list); in __vxlan_dev_create()
3975 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_nl2conf() local
3982 memcpy(conf, &vxlan->cfg, sizeof(*conf)); in vxlan_nl2conf()
4246 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_changelink() local
4252 dst = &vxlan->default_dst; in vxlan_changelink()
4257 err = vxlan_config_validate(vxlan->net, &conf, &lowerdev, in vxlan_changelink()
4258 vxlan, extack); in vxlan_changelink()
4272 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); in vxlan_changelink()
4274 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4276 err = vxlan_fdb_update(vxlan, all_zeros_mac, in vxlan_changelink()
4280 vxlan->cfg.dst_port, in vxlan_changelink()
4285 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4292 __vxlan_fdb_delete(vxlan, all_zeros_mac, in vxlan_changelink()
4294 vxlan->cfg.dst_port, in vxlan_changelink()
4299 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4302 if (conf.age_interval != vxlan->cfg.age_interval) in vxlan_changelink()
4303 mod_timer(&vxlan->age_timer, jiffies); in vxlan_changelink()
4308 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); in vxlan_changelink()
4314 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dellink() local
4316 vxlan_flush(vxlan, true); in vxlan_dellink()
4318 list_del(&vxlan->next); in vxlan_dellink()
4320 if (vxlan->default_dst.remote_dev) in vxlan_dellink()
4321 netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev); in vxlan_dellink()
4356 const struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_info() local
4357 const struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_fill_info()
4359 .low = htons(vxlan->cfg.port_min), in vxlan_fill_info()
4360 .high = htons(vxlan->cfg.port_max), in vxlan_fill_info()
4383 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { in vxlan_fill_info()
4384 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { in vxlan_fill_info()
4386 vxlan->cfg.saddr.sin.sin_addr.s_addr)) in vxlan_fill_info()
4391 &vxlan->cfg.saddr.sin6.sin6_addr)) in vxlan_fill_info()
4397 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || in vxlan_fill_info()
4399 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || in vxlan_fill_info()
4400 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || in vxlan_fill_info()
4401 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || in vxlan_fill_info()
4402 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || in vxlan_fill_info()
4404 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || in vxlan_fill_info()
4406 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) || in vxlan_fill_info()
4408 !!(vxlan->cfg.flags & VXLAN_F_RSC)) || in vxlan_fill_info()
4410 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) || in vxlan_fill_info()
4412 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) || in vxlan_fill_info()
4414 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) || in vxlan_fill_info()
4415 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || in vxlan_fill_info()
4416 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || in vxlan_fill_info()
4417 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || in vxlan_fill_info()
4419 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || in vxlan_fill_info()
4421 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || in vxlan_fill_info()
4423 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || in vxlan_fill_info()
4425 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) || in vxlan_fill_info()
4427 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX))) in vxlan_fill_info()
4433 if (vxlan->cfg.flags & VXLAN_F_GBP && in vxlan_fill_info()
4437 if (vxlan->cfg.flags & VXLAN_F_GPE && in vxlan_fill_info()
4441 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL && in vxlan_fill_info()
4453 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_net() local
4455 return vxlan->net; in vxlan_get_link_net()
4510 struct vxlan_dev *vxlan, *next; in vxlan_handle_lowerdev_unregister() local
4513 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_handle_lowerdev_unregister()
4514 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_handle_lowerdev_unregister()
4523 vxlan_dellink(vxlan->dev, &list_kill); in vxlan_handle_lowerdev_unregister()
4558 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_offloaded_set() local
4563 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4565 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4567 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4581 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4588 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_add() local
4593 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_add()
4596 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4597 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, in vxlan_fdb_external_learn_add()
4606 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4615 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_del() local
4620 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4621 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4623 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4627 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr, in vxlan_fdb_external_learn_del()
4635 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4683 struct vxlan_dev *vxlan; in vxlan_fdb_nh_flush() local
4688 vxlan = rcu_dereference(fdb->vdev); in vxlan_fdb_nh_flush()
4689 WARN_ON(!vxlan); in vxlan_fdb_nh_flush()
4690 hash_index = fdb_head_index(vxlan, fdb->eth_addr, in vxlan_fdb_nh_flush()
4691 vxlan->default_dst.remote_vni); in vxlan_fdb_nh_flush()
4692 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4694 vxlan_fdb_destroy(vxlan, fdb, false, false); in vxlan_fdb_nh_flush()
4695 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4734 struct vxlan_dev *vxlan, *next; in vxlan_destroy_tunnels() local
4741 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_destroy_tunnels()
4745 if (!net_eq(dev_net(vxlan->dev), net)) in vxlan_destroy_tunnels()
4746 unregister_netdevice_queue(vxlan->dev, head); in vxlan_destroy_tunnels()