• Home
  • Raw
  • Download

Lines Matching refs:vxlan

60 static int vxlan_sock_add(struct vxlan_dev *vxlan);
62 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
225 if (node->vxlan->default_dst.remote_vni != vni) in vxlan_vs_find_vni()
229 const struct vxlan_config *cfg = &node->vxlan->cfg; in vxlan_vs_find_vni()
236 return node->vxlan; in vxlan_vs_find_vni()
257 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, in vxlan_fdb_info() argument
299 ndm->ndm_ifindex = vxlan->dev->ifindex; in vxlan_fdb_info()
305 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && in vxlan_fdb_info()
307 peernet2id(dev_net(vxlan->dev), vxlan->net))) in vxlan_fdb_info()
321 rdst->remote_port != vxlan->cfg.dst_port && in vxlan_fdb_info()
324 if (rdst->remote_vni != vxlan->default_dst.remote_vni && in vxlan_fdb_info()
332 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && in vxlan_fdb_info()
365 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in __vxlan_fdb_notify() argument
368 struct net *net = dev_net(vxlan->dev); in __vxlan_fdb_notify()
376 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); in __vxlan_fdb_notify()
391 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_notifier_info() argument
397 fdb_info->info.dev = vxlan->dev; in vxlan_fdb_switchdev_notifier_info()
409 static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, in vxlan_fdb_switchdev_call_notifiers() argument
424 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info); in vxlan_fdb_switchdev_call_notifiers()
425 ret = call_switchdev_notifiers(notifier_type, vxlan->dev, in vxlan_fdb_switchdev_call_notifiers()
430 static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_notify() argument
439 err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
445 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, in vxlan_fdb_notify()
451 __vxlan_fdb_notify(vxlan, fdb, rd, type); in vxlan_fdb_notify()
457 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_ip_miss() local
466 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_ip_miss()
469 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) in vxlan_fdb_miss() argument
478 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); in vxlan_fdb_miss()
503 static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni) in fdb_head_index() argument
505 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) in fdb_head_index()
512 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, in vxlan_fdb_head() argument
515 return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)]; in vxlan_fdb_head()
519 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, in __vxlan_find_mac() argument
522 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); in __vxlan_find_mac()
527 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in __vxlan_find_mac()
539 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, in vxlan_find_mac() argument
544 f = __vxlan_find_mac(vxlan, mac, vni); in vxlan_find_mac()
572 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_find_uc() local
586 f = __vxlan_find_mac(vxlan, eth_addr, vni); in vxlan_fdb_find_uc()
593 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info); in vxlan_fdb_find_uc()
602 const struct vxlan_dev *vxlan, in vxlan_fdb_notify_one() argument
610 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info); in vxlan_fdb_notify_one()
620 struct vxlan_dev *vxlan; in vxlan_fdb_replay() local
628 vxlan = netdev_priv(dev); in vxlan_fdb_replay()
631 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
632 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_replay()
635 rc = vxlan_fdb_notify_one(nb, vxlan, in vxlan_fdb_replay()
643 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
648 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_replay()
655 struct vxlan_dev *vxlan; in vxlan_fdb_clear_offload() local
662 vxlan = netdev_priv(dev); in vxlan_fdb_clear_offload()
665 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
666 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) in vxlan_fdb_clear_offload()
670 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_fdb_clear_offload()
854 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_alloc() argument
868 RCU_INIT_POINTER(f->vdev, vxlan); in vxlan_fdb_alloc()
876 static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac, in vxlan_fdb_insert() argument
879 ++vxlan->addrcnt; in vxlan_fdb_insert()
881 vxlan_fdb_head(vxlan, mac, src_vni)); in vxlan_fdb_insert()
884 static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, in vxlan_fdb_nh_update() argument
894 nh = nexthop_find_by_id(vxlan->net, nhid); in vxlan_fdb_nh_update()
917 switch (vxlan->default_dst.remote_ip.sa.sa_family) { in vxlan_fdb_nh_update()
948 static int vxlan_fdb_create(struct vxlan_dev *vxlan, in vxlan_fdb_create() argument
959 if (vxlan->cfg.addrmax && in vxlan_fdb_create()
960 vxlan->addrcnt >= vxlan->cfg.addrmax) in vxlan_fdb_create()
963 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_create()
964 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); in vxlan_fdb_create()
969 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_create()
1010 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_destroy() argument
1015 netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); in vxlan_fdb_destroy()
1017 --vxlan->addrcnt; in vxlan_fdb_destroy()
1020 vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH, in vxlan_fdb_destroy()
1024 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, in vxlan_fdb_destroy()
1041 static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, in vxlan_fdb_update_existing() argument
1091 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); in vxlan_fdb_update_existing()
1121 err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, in vxlan_fdb_update_existing()
1141 static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, in vxlan_fdb_update_create() argument
1158 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); in vxlan_fdb_update_create()
1159 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, in vxlan_fdb_update_create()
1164 vxlan_fdb_insert(vxlan, mac, src_vni, f); in vxlan_fdb_update_create()
1165 rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, in vxlan_fdb_update_create()
1173 vxlan_fdb_destroy(vxlan, f, false, false); in vxlan_fdb_update_create()
1178 static int vxlan_fdb_update(struct vxlan_dev *vxlan, in vxlan_fdb_update() argument
1188 f = __vxlan_find_mac(vxlan, mac, src_vni); in vxlan_fdb_update()
1191 netdev_dbg(vxlan->dev, in vxlan_fdb_update()
1196 return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, in vxlan_fdb_update()
1203 return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, in vxlan_fdb_update()
1210 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, in vxlan_fdb_dst_destroy() argument
1214 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL); in vxlan_fdb_dst_destroy()
1218 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, in vxlan_fdb_parse() argument
1222 struct net *net = dev_net(vxlan->dev); in vxlan_fdb_parse()
1234 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; in vxlan_fdb_parse()
1252 *port = vxlan->cfg.dst_port; in vxlan_fdb_parse()
1260 *vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1268 *src_vni = vxlan->default_dst.remote_vni; in vxlan_fdb_parse()
1298 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_add() local
1316 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_add()
1321 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) in vxlan_fdb_add()
1324 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_add()
1325 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1326 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, in vxlan_fdb_add()
1330 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_add()
1335 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, in __vxlan_fdb_delete() argument
1344 f = vxlan_find_mac(vxlan, addr, src_vni); in __vxlan_fdb_delete()
1358 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify); in __vxlan_fdb_delete()
1362 vxlan_fdb_destroy(vxlan, f, true, swdev_notify); in __vxlan_fdb_delete()
1373 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_delete() local
1381 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, in vxlan_fdb_delete()
1386 hash_index = fdb_head_index(vxlan, addr, src_vni); in vxlan_fdb_delete()
1387 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1388 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, in vxlan_fdb_delete()
1390 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete()
1400 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_dump() local
1408 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { in vxlan_fdb_dump()
1414 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1432 err = vxlan_fdb_info(skb, vxlan, f, in vxlan_fdb_dump()
1458 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_get() local
1466 vni = vxlan->default_dst.remote_vni; in vxlan_fdb_get()
1470 f = __vxlan_find_mac(vxlan, addr, vni); in vxlan_fdb_get()
1477 err = vxlan_fdb_info(skb, vxlan, f, portid, seq, in vxlan_fdb_get()
1492 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_snoop() local
1502 f = vxlan_find_mac(vxlan, src_mac, vni); in vxlan_snoop()
1525 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); in vxlan_snoop()
1527 u32 hash_index = fdb_head_index(vxlan, src_mac, vni); in vxlan_snoop()
1530 spin_lock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1534 vxlan_fdb_update(vxlan, src_mac, src_ip, in vxlan_snoop()
1537 vxlan->cfg.dst_port, in vxlan_snoop()
1539 vxlan->default_dst.remote_vni, in vxlan_snoop()
1541 spin_unlock(&vxlan->hash_lock[hash_index]); in vxlan_snoop()
1550 struct vxlan_dev *vxlan; in vxlan_group_used() local
1570 list_for_each_entry(vxlan, &vn->vxlan_list, next) { in vxlan_group_used()
1571 if (!netif_running(vxlan->dev) || vxlan == dev) in vxlan_group_used()
1575 rtnl_dereference(vxlan->vn4_sock) != sock4) in vxlan_group_used()
1579 rtnl_dereference(vxlan->vn6_sock) != sock6) in vxlan_group_used()
1583 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, in vxlan_group_used()
1587 if (vxlan->default_dst.remote_ifindex != in vxlan_group_used()
1618 static void vxlan_sock_release(struct vxlan_dev *vxlan) in vxlan_sock_release() argument
1620 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_sock_release()
1622 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_sock_release()
1624 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_release()
1627 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_release()
1630 vxlan_vs_del_dev(vxlan); in vxlan_sock_release()
1648 static int vxlan_igmp_join(struct vxlan_dev *vxlan) in vxlan_igmp_join() argument
1651 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; in vxlan_igmp_join()
1652 int ifindex = vxlan->default_dst.remote_ifindex; in vxlan_igmp_join()
1656 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_igmp_join()
1668 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_igmp_join()
1682 static int vxlan_igmp_leave(struct vxlan_dev *vxlan) in vxlan_igmp_leave() argument
1685 union vxlan_addr *ip = &vxlan->default_dst.remote_ip; in vxlan_igmp_leave()
1686 int ifindex = vxlan->default_dst.remote_ifindex; in vxlan_igmp_leave()
1690 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); in vxlan_igmp_leave()
1702 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); in vxlan_igmp_leave()
1767 static bool vxlan_set_mac(struct vxlan_dev *vxlan, in vxlan_set_mac() argument
1775 skb->protocol = eth_type_trans(skb, vxlan->dev); in vxlan_set_mac()
1779 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) in vxlan_set_mac()
1793 if ((vxlan->cfg.flags & VXLAN_F_LEARN) && in vxlan_set_mac()
1827 struct vxlan_dev *vxlan; in vxlan_rcv() local
1859 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); in vxlan_rcv()
1860 if (!vxlan) in vxlan_rcv()
1874 !net_eq(vxlan->net, dev_net(vxlan->dev)))) in vxlan_rcv()
1916 if (!vxlan_set_mac(vxlan, vs, skb, vni)) in vxlan_rcv()
1920 skb->dev = vxlan->dev; in vxlan_rcv()
1928 ++vxlan->dev->stats.rx_frame_errors; in vxlan_rcv()
1929 ++vxlan->dev->stats.rx_errors; in vxlan_rcv()
1935 if (unlikely(!(vxlan->dev->flags & IFF_UP))) { in vxlan_rcv()
1937 atomic_long_inc(&vxlan->dev->rx_dropped); in vxlan_rcv()
1941 dev_sw_netstats_rx_add(vxlan->dev, skb->len); in vxlan_rcv()
1942 gro_cells_receive(&vxlan->gro_cells, skb); in vxlan_rcv()
1957 struct vxlan_dev *vxlan; in vxlan_err_lookup() local
1975 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); in vxlan_err_lookup()
1976 if (!vxlan) in vxlan_err_lookup()
1984 struct vxlan_dev *vxlan = netdev_priv(dev); in arp_reduce() local
2029 f = vxlan_find_mac(vxlan, n->ha, vni); in arp_reduce()
2051 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in arp_reduce()
2158 struct vxlan_dev *vxlan = netdev_priv(dev); in neigh_reduce() local
2189 f = vxlan_find_mac(vxlan, n->ha, vni); in neigh_reduce()
2207 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { in neigh_reduce()
2225 struct vxlan_dev *vxlan = netdev_priv(dev); in route_shortcircuit() local
2241 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2262 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { in route_shortcircuit()
2391 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, in vxlan_get_route() argument
2423 rt = ip_route_output_key(vxlan->net, &fl4); in vxlan_get_route()
2442 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, in vxlan6_get_route() argument
2478 ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, in vxlan6_get_route()
2554 struct vxlan_dev *vxlan, in encap_bypass_if_local() argument
2573 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, in encap_bypass_if_local()
2575 vxlan->cfg.flags); in encap_bypass_if_local()
2582 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true); in encap_bypass_if_local()
2595 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit_one() local
2607 u32 flags = vxlan->cfg.flags; in vxlan_xmit_one()
2609 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); in vxlan_xmit_one()
2618 vxlan_encap_bypass(skb, vxlan, vxlan, in vxlan_xmit_one()
2625 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; in vxlan_xmit_one()
2628 local_ip = vxlan->cfg.saddr; in vxlan_xmit_one()
2634 ttl = vxlan->cfg.ttl; in vxlan_xmit_one()
2639 tos = vxlan->cfg.tos; in vxlan_xmit_one()
2647 label = vxlan->cfg.label; in vxlan_xmit_one()
2663 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_xmit_one()
2677 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_xmit_one()
2678 vxlan->cfg.port_max, true); in vxlan_xmit_one()
2682 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_xmit_one()
2689 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, in vxlan_xmit_one()
2701 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2707 if (vxlan->cfg.df == VXLAN_DF_SET) { in vxlan_xmit_one()
2709 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { in vxlan_xmit_one()
2740 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2757 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_xmit_one()
2762 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, in vxlan_xmit_one()
2776 err = encap_bypass_if_local(skb, dev, vxlan, dst, in vxlan_xmit_one()
2803 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); in vxlan_xmit_one()
2882 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_xmit() local
2894 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { in vxlan_xmit()
2907 if (vxlan->cfg.flags & VXLAN_F_PROXY) { in vxlan_xmit()
2926 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2929 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) && in vxlan_xmit()
2934 f = vxlan_find_mac(vxlan, eth->h_dest, vni); in vxlan_xmit()
2938 f = vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_xmit()
2940 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) && in vxlan_xmit()
2942 vxlan_fdb_miss(vxlan, eth->h_dest); in vxlan_xmit()
2952 (vni ? : vxlan->default_dst.remote_vni), did_rsc); in vxlan_xmit()
2977 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer); in vxlan_cleanup() local
2981 if (!netif_running(vxlan->dev)) in vxlan_cleanup()
2987 spin_lock(&vxlan->hash_lock[h]); in vxlan_cleanup()
2988 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_cleanup()
2999 timeout = f->used + vxlan->cfg.age_interval * HZ; in vxlan_cleanup()
3001 netdev_dbg(vxlan->dev, in vxlan_cleanup()
3005 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_cleanup()
3009 spin_unlock(&vxlan->hash_lock[h]); in vxlan_cleanup()
3012 mod_timer(&vxlan->age_timer, next_timer); in vxlan_cleanup()
3015 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) in vxlan_vs_del_dev() argument
3017 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_del_dev()
3020 hlist_del_init_rcu(&vxlan->hlist4.hlist); in vxlan_vs_del_dev()
3022 hlist_del_init_rcu(&vxlan->hlist6.hlist); in vxlan_vs_del_dev()
3027 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, in vxlan_vs_add_dev() argument
3030 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_vs_add_dev()
3031 __be32 vni = vxlan->default_dst.remote_vni; in vxlan_vs_add_dev()
3033 node->vxlan = vxlan; in vxlan_vs_add_dev()
3042 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_init() local
3049 err = gro_cells_init(&vxlan->gro_cells, dev); in vxlan_init()
3058 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) in vxlan_fdb_delete_default() argument
3061 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3063 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3064 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); in vxlan_fdb_delete_default()
3066 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_fdb_delete_default()
3067 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_delete_default()
3072 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_uninit() local
3074 gro_cells_destroy(&vxlan->gro_cells); in vxlan_uninit()
3076 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); in vxlan_uninit()
3084 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_open() local
3087 ret = vxlan_sock_add(vxlan); in vxlan_open()
3091 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { in vxlan_open()
3092 ret = vxlan_igmp_join(vxlan); in vxlan_open()
3096 vxlan_sock_release(vxlan); in vxlan_open()
3101 if (vxlan->cfg.age_interval) in vxlan_open()
3102 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); in vxlan_open()
3108 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) in vxlan_flush() argument
3115 spin_lock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3116 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { in vxlan_flush()
3123 f->vni == vxlan->cfg.vni) in vxlan_flush()
3125 vxlan_fdb_destroy(vxlan, f, true, true); in vxlan_flush()
3127 spin_unlock_bh(&vxlan->hash_lock[h]); in vxlan_flush()
3134 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_stop() local
3135 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in vxlan_stop()
3138 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && in vxlan_stop()
3139 !vxlan_group_used(vn, vxlan)) in vxlan_stop()
3140 ret = vxlan_igmp_leave(vxlan); in vxlan_stop()
3142 del_timer_sync(&vxlan->age_timer); in vxlan_stop()
3144 vxlan_flush(vxlan, false); in vxlan_stop()
3145 vxlan_sock_release(vxlan); in vxlan_stop()
3157 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_change_mtu() local
3158 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_change_mtu()
3159 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_change_mtu()
3166 int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags); in vxlan_change_mtu()
3177 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_metadata_dst() local
3181 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, in vxlan_fill_metadata_dst()
3182 vxlan->cfg.port_max, true); in vxlan_fill_metadata_dst()
3183 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; in vxlan_fill_metadata_dst()
3186 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); in vxlan_fill_metadata_dst()
3189 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3198 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); in vxlan_fill_metadata_dst()
3201 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, in vxlan_fill_metadata_dst()
3285 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_setup() local
3310 INIT_LIST_HEAD(&vxlan->next); in vxlan_setup()
3312 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); in vxlan_setup()
3314 vxlan->dev = dev; in vxlan_setup()
3317 spin_lock_init(&vxlan->hash_lock[h]); in vxlan_setup()
3318 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); in vxlan_setup()
3448 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_ksettings() local
3449 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_get_link_ksettings()
3450 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, in vxlan_get_link_ksettings()
3551 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) in __vxlan_sock_add() argument
3553 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); in __vxlan_sock_add()
3558 if (vxlan->cfg.remote_ifindex) in __vxlan_sock_add()
3560 vxlan->net, vxlan->cfg.remote_ifindex); in __vxlan_sock_add()
3562 if (!vxlan->cfg.no_share) { in __vxlan_sock_add()
3564 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, in __vxlan_sock_add()
3565 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3574 vs = vxlan_socket_create(vxlan->net, ipv6, in __vxlan_sock_add()
3575 vxlan->cfg.dst_port, vxlan->cfg.flags, in __vxlan_sock_add()
3581 rcu_assign_pointer(vxlan->vn6_sock, vs); in __vxlan_sock_add()
3582 node = &vxlan->hlist6; in __vxlan_sock_add()
3586 rcu_assign_pointer(vxlan->vn4_sock, vs); in __vxlan_sock_add()
3587 node = &vxlan->hlist4; in __vxlan_sock_add()
3589 vxlan_vs_add_dev(vs, vxlan, node); in __vxlan_sock_add()
3593 static int vxlan_sock_add(struct vxlan_dev *vxlan) in vxlan_sock_add() argument
3595 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; in vxlan_sock_add()
3596 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata; in vxlan_sock_add()
3600 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); in vxlan_sock_add()
3602 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); in vxlan_sock_add()
3604 ret = __vxlan_sock_add(vxlan, true); in vxlan_sock_add()
3610 ret = __vxlan_sock_add(vxlan, false); in vxlan_sock_add()
3612 vxlan_sock_release(vxlan); in vxlan_sock_add()
3785 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_config_apply() local
3786 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_config_apply()
3800 vxlan->net = src_net; in vxlan_config_apply()
3834 memcpy(&vxlan->cfg, conf, sizeof(*conf)); in vxlan_config_apply()
3841 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dev_configure() local
3845 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); in vxlan_dev_configure()
3859 struct vxlan_dev *vxlan = netdev_priv(dev); in __vxlan_dev_create() local
3866 dst = &vxlan->default_dst; in __vxlan_dev_create()
3875 err = vxlan_fdb_create(vxlan, all_zeros_mac, in __vxlan_dev_create()
3878 vxlan->cfg.dst_port, in __vxlan_dev_create()
3909 vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f); in __vxlan_dev_create()
3912 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), in __vxlan_dev_create()
3915 vxlan_fdb_destroy(vxlan, f, false, false); in __vxlan_dev_create()
3922 list_add(&vxlan->next, &vn->vxlan_list); in __vxlan_dev_create()
3974 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_nl2conf() local
3981 memcpy(conf, &vxlan->cfg, sizeof(*conf)); in vxlan_nl2conf()
4245 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_changelink() local
4251 dst = &vxlan->default_dst; in vxlan_changelink()
4256 err = vxlan_config_validate(vxlan->net, &conf, &lowerdev, in vxlan_changelink()
4257 vxlan, extack); in vxlan_changelink()
4271 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); in vxlan_changelink()
4273 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4275 err = vxlan_fdb_update(vxlan, all_zeros_mac, in vxlan_changelink()
4279 vxlan->cfg.dst_port, in vxlan_changelink()
4284 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4291 __vxlan_fdb_delete(vxlan, all_zeros_mac, in vxlan_changelink()
4293 vxlan->cfg.dst_port, in vxlan_changelink()
4298 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_changelink()
4301 if (conf.age_interval != vxlan->cfg.age_interval) in vxlan_changelink()
4302 mod_timer(&vxlan->age_timer, jiffies); in vxlan_changelink()
4307 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); in vxlan_changelink()
4313 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_dellink() local
4315 vxlan_flush(vxlan, true); in vxlan_dellink()
4317 list_del(&vxlan->next); in vxlan_dellink()
4319 if (vxlan->default_dst.remote_dev) in vxlan_dellink()
4320 netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev); in vxlan_dellink()
4355 const struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fill_info() local
4356 const struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_fill_info()
4358 .low = htons(vxlan->cfg.port_min), in vxlan_fill_info()
4359 .high = htons(vxlan->cfg.port_max), in vxlan_fill_info()
4382 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { in vxlan_fill_info()
4383 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { in vxlan_fill_info()
4385 vxlan->cfg.saddr.sin.sin_addr.s_addr)) in vxlan_fill_info()
4390 &vxlan->cfg.saddr.sin6.sin6_addr)) in vxlan_fill_info()
4396 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || in vxlan_fill_info()
4398 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || in vxlan_fill_info()
4399 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || in vxlan_fill_info()
4400 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || in vxlan_fill_info()
4401 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || in vxlan_fill_info()
4403 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || in vxlan_fill_info()
4405 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) || in vxlan_fill_info()
4407 !!(vxlan->cfg.flags & VXLAN_F_RSC)) || in vxlan_fill_info()
4409 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) || in vxlan_fill_info()
4411 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) || in vxlan_fill_info()
4413 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) || in vxlan_fill_info()
4414 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || in vxlan_fill_info()
4415 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || in vxlan_fill_info()
4416 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || in vxlan_fill_info()
4418 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || in vxlan_fill_info()
4420 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || in vxlan_fill_info()
4422 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || in vxlan_fill_info()
4424 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) || in vxlan_fill_info()
4426 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX))) in vxlan_fill_info()
4432 if (vxlan->cfg.flags & VXLAN_F_GBP && in vxlan_fill_info()
4436 if (vxlan->cfg.flags & VXLAN_F_GPE && in vxlan_fill_info()
4440 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL && in vxlan_fill_info()
4452 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_get_link_net() local
4454 return vxlan->net; in vxlan_get_link_net()
4509 struct vxlan_dev *vxlan, *next; in vxlan_handle_lowerdev_unregister() local
4512 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_handle_lowerdev_unregister()
4513 struct vxlan_rdst *dst = &vxlan->default_dst; in vxlan_handle_lowerdev_unregister()
4522 vxlan_dellink(vxlan->dev, &list_kill); in vxlan_handle_lowerdev_unregister()
4552 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_offloaded_set() local
4557 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4559 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4561 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_offloaded_set()
4575 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_offloaded_set()
4582 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_add() local
4587 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_add()
4590 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4591 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, in vxlan_fdb_external_learn_add()
4600 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_add()
4609 struct vxlan_dev *vxlan = netdev_priv(dev); in vxlan_fdb_external_learn_del() local
4614 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4615 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4617 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); in vxlan_fdb_external_learn_del()
4621 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr, in vxlan_fdb_external_learn_del()
4629 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_external_learn_del()
4677 struct vxlan_dev *vxlan; in vxlan_fdb_nh_flush() local
4682 vxlan = rcu_dereference(fdb->vdev); in vxlan_fdb_nh_flush()
4683 WARN_ON(!vxlan); in vxlan_fdb_nh_flush()
4684 hash_index = fdb_head_index(vxlan, fdb->eth_addr, in vxlan_fdb_nh_flush()
4685 vxlan->default_dst.remote_vni); in vxlan_fdb_nh_flush()
4686 spin_lock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4688 vxlan_fdb_destroy(vxlan, fdb, false, false); in vxlan_fdb_nh_flush()
4689 spin_unlock_bh(&vxlan->hash_lock[hash_index]); in vxlan_fdb_nh_flush()
4731 struct vxlan_dev *vxlan, *next; in vxlan_destroy_tunnels() local
4738 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { in vxlan_destroy_tunnels()
4742 if (!net_eq(dev_net(vxlan->dev), net)) in vxlan_destroy_tunnels()
4743 unregister_netdevice_queue(vxlan->dev, head); in vxlan_destroy_tunnels()