1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2023 Isovalent */
3
4 #include <linux/netdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/etherdevice.h>
7 #include <linux/filter.h>
8 #include <linux/netfilter_netdev.h>
9 #include <linux/bpf_mprog.h>
10 #include <linux/indirect_call_wrapper.h>
11
12 #include <net/netkit.h>
13 #include <net/dst.h>
14 #include <net/tcx.h>
15
16 #define DRV_NAME "netkit"
17
18 struct netkit {
19 /* Needed in fast-path */
20 struct net_device __rcu *peer;
21 struct bpf_mprog_entry __rcu *active;
22 enum netkit_action policy;
23 enum netkit_scrub scrub;
24 struct bpf_mprog_bundle bundle;
25
26 /* Needed in slow-path */
27 enum netkit_mode mode;
28 bool primary;
29 u32 headroom;
30 };
31
32 struct netkit_link {
33 struct bpf_link link;
34 struct net_device *dev;
35 u32 location;
36 };
37
38 static __always_inline int
netkit_run(const struct bpf_mprog_entry * entry,struct sk_buff * skb,enum netkit_action ret)39 netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
40 enum netkit_action ret)
41 {
42 const struct bpf_mprog_fp *fp;
43 const struct bpf_prog *prog;
44
45 bpf_mprog_foreach_prog(entry, fp, prog) {
46 bpf_compute_data_pointers(skb);
47 ret = bpf_prog_run(prog, skb);
48 if (ret != NETKIT_NEXT)
49 break;
50 }
51 return ret;
52 }
53
netkit_xnet(struct sk_buff * skb)54 static void netkit_xnet(struct sk_buff *skb)
55 {
56 skb->priority = 0;
57 skb->mark = 0;
58 }
59
netkit_prep_forward(struct sk_buff * skb,bool xnet,bool xnet_scrub)60 static void netkit_prep_forward(struct sk_buff *skb,
61 bool xnet, bool xnet_scrub)
62 {
63 skb_scrub_packet(skb, false);
64 nf_skip_egress(skb, true);
65 skb_reset_mac_header(skb);
66 if (!xnet)
67 return;
68 ipvs_reset(skb);
69 skb_clear_tstamp(skb);
70 if (xnet_scrub)
71 netkit_xnet(skb);
72 }
73
netkit_priv(const struct net_device * dev)74 static struct netkit *netkit_priv(const struct net_device *dev)
75 {
76 return netdev_priv(dev);
77 }
78
netkit_xmit(struct sk_buff * skb,struct net_device * dev)79 static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
80 {
81 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
82 struct netkit *nk = netkit_priv(dev);
83 enum netkit_action ret = READ_ONCE(nk->policy);
84 netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
85 const struct bpf_mprog_entry *entry;
86 struct net_device *peer;
87 int len = skb->len;
88
89 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
90 rcu_read_lock();
91 peer = rcu_dereference(nk->peer);
92 if (unlikely(!peer || !(peer->flags & IFF_UP) ||
93 !pskb_may_pull(skb, ETH_HLEN) ||
94 skb_orphan_frags(skb, GFP_ATOMIC)))
95 goto drop;
96 netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)),
97 nk->scrub);
98 eth_skb_pkt_type(skb, peer);
99 skb->dev = peer;
100 entry = rcu_dereference(nk->active);
101 if (entry)
102 ret = netkit_run(entry, skb, ret);
103 switch (ret) {
104 case NETKIT_NEXT:
105 case NETKIT_PASS:
106 eth_skb_pull_mac(skb);
107 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
108 if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
109 dev_sw_netstats_tx_add(dev, 1, len);
110 dev_sw_netstats_rx_add(peer, len);
111 } else {
112 goto drop_stats;
113 }
114 break;
115 case NETKIT_REDIRECT:
116 dev_sw_netstats_tx_add(dev, 1, len);
117 skb_do_redirect(skb);
118 break;
119 case NETKIT_DROP:
120 default:
121 drop:
122 kfree_skb(skb);
123 drop_stats:
124 dev_core_stats_tx_dropped_inc(dev);
125 ret_dev = NET_XMIT_DROP;
126 break;
127 }
128 rcu_read_unlock();
129 bpf_net_ctx_clear(bpf_net_ctx);
130 return ret_dev;
131 }
132
netkit_open(struct net_device * dev)133 static int netkit_open(struct net_device *dev)
134 {
135 struct netkit *nk = netkit_priv(dev);
136 struct net_device *peer = rtnl_dereference(nk->peer);
137
138 if (!peer)
139 return -ENOTCONN;
140 if (peer->flags & IFF_UP) {
141 netif_carrier_on(dev);
142 netif_carrier_on(peer);
143 }
144 return 0;
145 }
146
netkit_close(struct net_device * dev)147 static int netkit_close(struct net_device *dev)
148 {
149 struct netkit *nk = netkit_priv(dev);
150 struct net_device *peer = rtnl_dereference(nk->peer);
151
152 netif_carrier_off(dev);
153 if (peer)
154 netif_carrier_off(peer);
155 return 0;
156 }
157
netkit_get_iflink(const struct net_device * dev)158 static int netkit_get_iflink(const struct net_device *dev)
159 {
160 struct netkit *nk = netkit_priv(dev);
161 struct net_device *peer;
162 int iflink = 0;
163
164 rcu_read_lock();
165 peer = rcu_dereference(nk->peer);
166 if (peer)
167 iflink = READ_ONCE(peer->ifindex);
168 rcu_read_unlock();
169 return iflink;
170 }
171
netkit_set_multicast(struct net_device * dev)172 static void netkit_set_multicast(struct net_device *dev)
173 {
174 /* Nothing to do, we receive whatever gets pushed to us! */
175 }
176
netkit_set_macaddr(struct net_device * dev,void * sa)177 static int netkit_set_macaddr(struct net_device *dev, void *sa)
178 {
179 struct netkit *nk = netkit_priv(dev);
180
181 if (nk->mode != NETKIT_L2)
182 return -EOPNOTSUPP;
183
184 return eth_mac_addr(dev, sa);
185 }
186
netkit_set_headroom(struct net_device * dev,int headroom)187 static void netkit_set_headroom(struct net_device *dev, int headroom)
188 {
189 struct netkit *nk = netkit_priv(dev), *nk2;
190 struct net_device *peer;
191
192 if (headroom < 0)
193 headroom = NET_SKB_PAD;
194
195 rcu_read_lock();
196 peer = rcu_dereference(nk->peer);
197 if (unlikely(!peer))
198 goto out;
199
200 nk2 = netkit_priv(peer);
201 nk->headroom = headroom;
202 headroom = max(nk->headroom, nk2->headroom);
203
204 peer->needed_headroom = headroom;
205 dev->needed_headroom = headroom;
206 out:
207 rcu_read_unlock();
208 }
209
netkit_peer_dev(struct net_device * dev)210 INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
211 {
212 return rcu_dereference(netkit_priv(dev)->peer);
213 }
214
netkit_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)215 static void netkit_get_stats(struct net_device *dev,
216 struct rtnl_link_stats64 *stats)
217 {
218 dev_fetch_sw_netstats(stats, dev->tstats);
219 stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
220 }
221
222 static void netkit_uninit(struct net_device *dev);
223
224 static const struct net_device_ops netkit_netdev_ops = {
225 .ndo_open = netkit_open,
226 .ndo_stop = netkit_close,
227 .ndo_start_xmit = netkit_xmit,
228 .ndo_set_rx_mode = netkit_set_multicast,
229 .ndo_set_rx_headroom = netkit_set_headroom,
230 .ndo_set_mac_address = netkit_set_macaddr,
231 .ndo_get_iflink = netkit_get_iflink,
232 .ndo_get_peer_dev = netkit_peer_dev,
233 .ndo_get_stats64 = netkit_get_stats,
234 .ndo_uninit = netkit_uninit,
235 .ndo_features_check = passthru_features_check,
236 };
237
netkit_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)238 static void netkit_get_drvinfo(struct net_device *dev,
239 struct ethtool_drvinfo *info)
240 {
241 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
242 }
243
244 static const struct ethtool_ops netkit_ethtool_ops = {
245 .get_drvinfo = netkit_get_drvinfo,
246 };
247
netkit_setup(struct net_device * dev)248 static void netkit_setup(struct net_device *dev)
249 {
250 static const netdev_features_t netkit_features_hw_vlan =
251 NETIF_F_HW_VLAN_CTAG_TX |
252 NETIF_F_HW_VLAN_CTAG_RX |
253 NETIF_F_HW_VLAN_STAG_TX |
254 NETIF_F_HW_VLAN_STAG_RX;
255 static const netdev_features_t netkit_features =
256 netkit_features_hw_vlan |
257 NETIF_F_SG |
258 NETIF_F_FRAGLIST |
259 NETIF_F_HW_CSUM |
260 NETIF_F_RXCSUM |
261 NETIF_F_SCTP_CRC |
262 NETIF_F_HIGHDMA |
263 NETIF_F_GSO_SOFTWARE |
264 NETIF_F_GSO_ENCAP_ALL;
265
266 ether_setup(dev);
267 dev->max_mtu = ETH_MAX_MTU;
268 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
269
270 dev->flags |= IFF_NOARP;
271 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
272 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
273 dev->priv_flags |= IFF_PHONY_HEADROOM;
274 dev->priv_flags |= IFF_NO_QUEUE;
275 dev->priv_flags |= IFF_DISABLE_NETPOLL;
276 dev->lltx = true;
277
278 dev->ethtool_ops = &netkit_ethtool_ops;
279 dev->netdev_ops = &netkit_netdev_ops;
280
281 dev->features |= netkit_features;
282 dev->hw_features = netkit_features;
283 dev->hw_enc_features = netkit_features;
284 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
285 dev->vlan_features = dev->features & ~netkit_features_hw_vlan;
286
287 dev->needs_free_netdev = true;
288
289 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
290 }
291
netkit_get_link_net(const struct net_device * dev)292 static struct net *netkit_get_link_net(const struct net_device *dev)
293 {
294 struct netkit *nk = netkit_priv(dev);
295 struct net_device *peer = rtnl_dereference(nk->peer);
296
297 return peer ? dev_net(peer) : dev_net(dev);
298 }
299
netkit_check_policy(int policy,struct nlattr * tb,struct netlink_ext_ack * extack)300 static int netkit_check_policy(int policy, struct nlattr *tb,
301 struct netlink_ext_ack *extack)
302 {
303 switch (policy) {
304 case NETKIT_PASS:
305 case NETKIT_DROP:
306 return 0;
307 default:
308 NL_SET_ERR_MSG_ATTR(extack, tb,
309 "Provided default xmit policy not supported");
310 return -EINVAL;
311 }
312 }
313
netkit_check_mode(int mode,struct nlattr * tb,struct netlink_ext_ack * extack)314 static int netkit_check_mode(int mode, struct nlattr *tb,
315 struct netlink_ext_ack *extack)
316 {
317 switch (mode) {
318 case NETKIT_L2:
319 case NETKIT_L3:
320 return 0;
321 default:
322 NL_SET_ERR_MSG_ATTR(extack, tb,
323 "Provided device mode can only be L2 or L3");
324 return -EINVAL;
325 }
326 }
327
netkit_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)328 static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
329 struct netlink_ext_ack *extack)
330 {
331 struct nlattr *attr = tb[IFLA_ADDRESS];
332
333 if (!attr)
334 return 0;
335 if (nla_len(attr) != ETH_ALEN)
336 return -EINVAL;
337 if (!is_valid_ether_addr(nla_data(attr)))
338 return -EADDRNOTAVAIL;
339 return 0;
340 }
341
342 static struct rtnl_link_ops netkit_link_ops;
343
netkit_new_link(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)344 static int netkit_new_link(struct net *src_net, struct net_device *dev,
345 struct nlattr *tb[], struct nlattr *data[],
346 struct netlink_ext_ack *extack)
347 {
348 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr;
349 enum netkit_action policy_prim = NETKIT_PASS;
350 enum netkit_action policy_peer = NETKIT_PASS;
351 enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
352 enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
353 enum netkit_mode mode = NETKIT_L3;
354 unsigned char ifname_assign_type;
355 struct ifinfomsg *ifmp = NULL;
356 struct net_device *peer;
357 char ifname[IFNAMSIZ];
358 struct netkit *nk;
359 struct net *net;
360 int err;
361
362 if (data) {
363 if (data[IFLA_NETKIT_MODE]) {
364 attr = data[IFLA_NETKIT_MODE];
365 mode = nla_get_u32(attr);
366 err = netkit_check_mode(mode, attr, extack);
367 if (err < 0)
368 return err;
369 }
370 if (data[IFLA_NETKIT_PEER_INFO]) {
371 attr = data[IFLA_NETKIT_PEER_INFO];
372 ifmp = nla_data(attr);
373 err = rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack);
374 if (err < 0)
375 return err;
376 err = netkit_validate(peer_tb, NULL, extack);
377 if (err < 0)
378 return err;
379 tbp = peer_tb;
380 }
381 if (data[IFLA_NETKIT_SCRUB])
382 scrub_prim = nla_get_u32(data[IFLA_NETKIT_SCRUB]);
383 if (data[IFLA_NETKIT_PEER_SCRUB])
384 scrub_peer = nla_get_u32(data[IFLA_NETKIT_PEER_SCRUB]);
385 if (data[IFLA_NETKIT_POLICY]) {
386 attr = data[IFLA_NETKIT_POLICY];
387 policy_prim = nla_get_u32(attr);
388 err = netkit_check_policy(policy_prim, attr, extack);
389 if (err < 0)
390 return err;
391 }
392 if (data[IFLA_NETKIT_PEER_POLICY]) {
393 attr = data[IFLA_NETKIT_PEER_POLICY];
394 policy_peer = nla_get_u32(attr);
395 err = netkit_check_policy(policy_peer, attr, extack);
396 if (err < 0)
397 return err;
398 }
399 }
400
401 if (ifmp && tbp[IFLA_IFNAME]) {
402 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
403 ifname_assign_type = NET_NAME_USER;
404 } else {
405 strscpy(ifname, "nk%d", IFNAMSIZ);
406 ifname_assign_type = NET_NAME_ENUM;
407 }
408 if (mode != NETKIT_L2 &&
409 (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
410 return -EOPNOTSUPP;
411
412 net = rtnl_link_get_net(src_net, tbp);
413 if (IS_ERR(net))
414 return PTR_ERR(net);
415
416 peer = rtnl_create_link(net, ifname, ifname_assign_type,
417 &netkit_link_ops, tbp, extack);
418 if (IS_ERR(peer)) {
419 put_net(net);
420 return PTR_ERR(peer);
421 }
422
423 netif_inherit_tso_max(peer, dev);
424
425 if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
426 eth_hw_addr_random(peer);
427 if (ifmp && dev->ifindex)
428 peer->ifindex = ifmp->ifi_index;
429
430 nk = netkit_priv(peer);
431 nk->primary = false;
432 nk->policy = policy_peer;
433 nk->scrub = scrub_peer;
434 nk->mode = mode;
435 bpf_mprog_bundle_init(&nk->bundle);
436
437 err = register_netdevice(peer);
438 put_net(net);
439 if (err < 0)
440 goto err_register_peer;
441 netif_carrier_off(peer);
442 if (mode == NETKIT_L2)
443 dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL);
444
445 err = rtnl_configure_link(peer, NULL, 0, NULL);
446 if (err < 0)
447 goto err_configure_peer;
448
449 if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
450 eth_hw_addr_random(dev);
451 if (tb[IFLA_IFNAME])
452 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
453 else
454 strscpy(dev->name, "nk%d", IFNAMSIZ);
455
456 nk = netkit_priv(dev);
457 nk->primary = true;
458 nk->policy = policy_prim;
459 nk->scrub = scrub_prim;
460 nk->mode = mode;
461 bpf_mprog_bundle_init(&nk->bundle);
462
463 err = register_netdevice(dev);
464 if (err < 0)
465 goto err_configure_peer;
466 netif_carrier_off(dev);
467 if (mode == NETKIT_L2)
468 dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL);
469
470 rcu_assign_pointer(netkit_priv(dev)->peer, peer);
471 rcu_assign_pointer(netkit_priv(peer)->peer, dev);
472 return 0;
473 err_configure_peer:
474 unregister_netdevice(peer);
475 return err;
476 err_register_peer:
477 free_netdev(peer);
478 return err;
479 }
480
netkit_entry_fetch(struct net_device * dev,bool bundle_fallback)481 static struct bpf_mprog_entry *netkit_entry_fetch(struct net_device *dev,
482 bool bundle_fallback)
483 {
484 struct netkit *nk = netkit_priv(dev);
485 struct bpf_mprog_entry *entry;
486
487 ASSERT_RTNL();
488 entry = rcu_dereference_rtnl(nk->active);
489 if (entry)
490 return entry;
491 if (bundle_fallback)
492 return &nk->bundle.a;
493 return NULL;
494 }
495
netkit_entry_update(struct net_device * dev,struct bpf_mprog_entry * entry)496 static void netkit_entry_update(struct net_device *dev,
497 struct bpf_mprog_entry *entry)
498 {
499 struct netkit *nk = netkit_priv(dev);
500
501 ASSERT_RTNL();
502 rcu_assign_pointer(nk->active, entry);
503 }
504
netkit_entry_sync(void)505 static void netkit_entry_sync(void)
506 {
507 synchronize_rcu();
508 }
509
netkit_dev_fetch(struct net * net,u32 ifindex,u32 which)510 static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 which)
511 {
512 struct net_device *dev;
513 struct netkit *nk;
514
515 ASSERT_RTNL();
516
517 switch (which) {
518 case BPF_NETKIT_PRIMARY:
519 case BPF_NETKIT_PEER:
520 break;
521 default:
522 return ERR_PTR(-EINVAL);
523 }
524
525 dev = __dev_get_by_index(net, ifindex);
526 if (!dev)
527 return ERR_PTR(-ENODEV);
528 if (dev->netdev_ops != &netkit_netdev_ops)
529 return ERR_PTR(-ENXIO);
530
531 nk = netkit_priv(dev);
532 if (!nk->primary)
533 return ERR_PTR(-EACCES);
534 if (which == BPF_NETKIT_PEER) {
535 dev = rcu_dereference_rtnl(nk->peer);
536 if (!dev)
537 return ERR_PTR(-ENODEV);
538 }
539 return dev;
540 }
541
netkit_prog_attach(const union bpf_attr * attr,struct bpf_prog * prog)542 int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
543 {
544 struct bpf_mprog_entry *entry, *entry_new;
545 struct bpf_prog *replace_prog = NULL;
546 struct net_device *dev;
547 int ret;
548
549 rtnl_lock();
550 dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
551 attr->attach_type);
552 if (IS_ERR(dev)) {
553 ret = PTR_ERR(dev);
554 goto out;
555 }
556 entry = netkit_entry_fetch(dev, true);
557 if (attr->attach_flags & BPF_F_REPLACE) {
558 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd,
559 prog->type);
560 if (IS_ERR(replace_prog)) {
561 ret = PTR_ERR(replace_prog);
562 replace_prog = NULL;
563 goto out;
564 }
565 }
566 ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog,
567 attr->attach_flags, attr->relative_fd,
568 attr->expected_revision);
569 if (!ret) {
570 if (entry != entry_new) {
571 netkit_entry_update(dev, entry_new);
572 netkit_entry_sync();
573 }
574 bpf_mprog_commit(entry);
575 }
576 out:
577 if (replace_prog)
578 bpf_prog_put(replace_prog);
579 rtnl_unlock();
580 return ret;
581 }
582
netkit_prog_detach(const union bpf_attr * attr,struct bpf_prog * prog)583 int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog)
584 {
585 struct bpf_mprog_entry *entry, *entry_new;
586 struct net_device *dev;
587 int ret;
588
589 rtnl_lock();
590 dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
591 attr->attach_type);
592 if (IS_ERR(dev)) {
593 ret = PTR_ERR(dev);
594 goto out;
595 }
596 entry = netkit_entry_fetch(dev, false);
597 if (!entry) {
598 ret = -ENOENT;
599 goto out;
600 }
601 ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags,
602 attr->relative_fd, attr->expected_revision);
603 if (!ret) {
604 if (!bpf_mprog_total(entry_new))
605 entry_new = NULL;
606 netkit_entry_update(dev, entry_new);
607 netkit_entry_sync();
608 bpf_mprog_commit(entry);
609 }
610 out:
611 rtnl_unlock();
612 return ret;
613 }
614
netkit_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)615 int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
616 {
617 struct net_device *dev;
618 int ret;
619
620 rtnl_lock();
621 dev = netkit_dev_fetch(current->nsproxy->net_ns,
622 attr->query.target_ifindex,
623 attr->query.attach_type);
624 if (IS_ERR(dev)) {
625 ret = PTR_ERR(dev);
626 goto out;
627 }
628 ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false));
629 out:
630 rtnl_unlock();
631 return ret;
632 }
633
netkit_link(const struct bpf_link * link)634 static struct netkit_link *netkit_link(const struct bpf_link *link)
635 {
636 return container_of(link, struct netkit_link, link);
637 }
638
netkit_link_prog_attach(struct bpf_link * link,u32 flags,u32 id_or_fd,u64 revision)639 static int netkit_link_prog_attach(struct bpf_link *link, u32 flags,
640 u32 id_or_fd, u64 revision)
641 {
642 struct netkit_link *nkl = netkit_link(link);
643 struct bpf_mprog_entry *entry, *entry_new;
644 struct net_device *dev = nkl->dev;
645 int ret;
646
647 ASSERT_RTNL();
648 entry = netkit_entry_fetch(dev, true);
649 ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags,
650 id_or_fd, revision);
651 if (!ret) {
652 if (entry != entry_new) {
653 netkit_entry_update(dev, entry_new);
654 netkit_entry_sync();
655 }
656 bpf_mprog_commit(entry);
657 }
658 return ret;
659 }
660
netkit_link_release(struct bpf_link * link)661 static void netkit_link_release(struct bpf_link *link)
662 {
663 struct netkit_link *nkl = netkit_link(link);
664 struct bpf_mprog_entry *entry, *entry_new;
665 struct net_device *dev;
666 int ret = 0;
667
668 rtnl_lock();
669 dev = nkl->dev;
670 if (!dev)
671 goto out;
672 entry = netkit_entry_fetch(dev, false);
673 if (!entry) {
674 ret = -ENOENT;
675 goto out;
676 }
677 ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0);
678 if (!ret) {
679 if (!bpf_mprog_total(entry_new))
680 entry_new = NULL;
681 netkit_entry_update(dev, entry_new);
682 netkit_entry_sync();
683 bpf_mprog_commit(entry);
684 nkl->dev = NULL;
685 }
686 out:
687 WARN_ON_ONCE(ret);
688 rtnl_unlock();
689 }
690
netkit_link_update(struct bpf_link * link,struct bpf_prog * nprog,struct bpf_prog * oprog)691 static int netkit_link_update(struct bpf_link *link, struct bpf_prog *nprog,
692 struct bpf_prog *oprog)
693 {
694 struct netkit_link *nkl = netkit_link(link);
695 struct bpf_mprog_entry *entry, *entry_new;
696 struct net_device *dev;
697 int ret = 0;
698
699 rtnl_lock();
700 dev = nkl->dev;
701 if (!dev) {
702 ret = -ENOLINK;
703 goto out;
704 }
705 if (oprog && link->prog != oprog) {
706 ret = -EPERM;
707 goto out;
708 }
709 oprog = link->prog;
710 if (oprog == nprog) {
711 bpf_prog_put(nprog);
712 goto out;
713 }
714 entry = netkit_entry_fetch(dev, false);
715 if (!entry) {
716 ret = -ENOENT;
717 goto out;
718 }
719 ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog,
720 BPF_F_REPLACE | BPF_F_ID,
721 link->prog->aux->id, 0);
722 if (!ret) {
723 WARN_ON_ONCE(entry != entry_new);
724 oprog = xchg(&link->prog, nprog);
725 bpf_prog_put(oprog);
726 bpf_mprog_commit(entry);
727 }
728 out:
729 rtnl_unlock();
730 return ret;
731 }
732
netkit_link_dealloc(struct bpf_link * link)733 static void netkit_link_dealloc(struct bpf_link *link)
734 {
735 kfree(netkit_link(link));
736 }
737
netkit_link_fdinfo(const struct bpf_link * link,struct seq_file * seq)738 static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq)
739 {
740 const struct netkit_link *nkl = netkit_link(link);
741 u32 ifindex = 0;
742
743 rtnl_lock();
744 if (nkl->dev)
745 ifindex = nkl->dev->ifindex;
746 rtnl_unlock();
747
748 seq_printf(seq, "ifindex:\t%u\n", ifindex);
749 seq_printf(seq, "attach_type:\t%u (%s)\n",
750 nkl->location,
751 nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer");
752 }
753
netkit_link_fill_info(const struct bpf_link * link,struct bpf_link_info * info)754 static int netkit_link_fill_info(const struct bpf_link *link,
755 struct bpf_link_info *info)
756 {
757 const struct netkit_link *nkl = netkit_link(link);
758 u32 ifindex = 0;
759
760 rtnl_lock();
761 if (nkl->dev)
762 ifindex = nkl->dev->ifindex;
763 rtnl_unlock();
764
765 info->netkit.ifindex = ifindex;
766 info->netkit.attach_type = nkl->location;
767 return 0;
768 }
769
netkit_link_detach(struct bpf_link * link)770 static int netkit_link_detach(struct bpf_link *link)
771 {
772 netkit_link_release(link);
773 return 0;
774 }
775
776 static const struct bpf_link_ops netkit_link_lops = {
777 .release = netkit_link_release,
778 .detach = netkit_link_detach,
779 .dealloc = netkit_link_dealloc,
780 .update_prog = netkit_link_update,
781 .show_fdinfo = netkit_link_fdinfo,
782 .fill_link_info = netkit_link_fill_info,
783 };
784
netkit_link_init(struct netkit_link * nkl,struct bpf_link_primer * link_primer,const union bpf_attr * attr,struct net_device * dev,struct bpf_prog * prog)785 static int netkit_link_init(struct netkit_link *nkl,
786 struct bpf_link_primer *link_primer,
787 const union bpf_attr *attr,
788 struct net_device *dev,
789 struct bpf_prog *prog)
790 {
791 bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT,
792 &netkit_link_lops, prog);
793 nkl->location = attr->link_create.attach_type;
794 nkl->dev = dev;
795 return bpf_link_prime(&nkl->link, link_primer);
796 }
797
netkit_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)798 int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
799 {
800 struct bpf_link_primer link_primer;
801 struct netkit_link *nkl;
802 struct net_device *dev;
803 int ret;
804
805 rtnl_lock();
806 dev = netkit_dev_fetch(current->nsproxy->net_ns,
807 attr->link_create.target_ifindex,
808 attr->link_create.attach_type);
809 if (IS_ERR(dev)) {
810 ret = PTR_ERR(dev);
811 goto out;
812 }
813 nkl = kzalloc(sizeof(*nkl), GFP_KERNEL_ACCOUNT);
814 if (!nkl) {
815 ret = -ENOMEM;
816 goto out;
817 }
818 ret = netkit_link_init(nkl, &link_primer, attr, dev, prog);
819 if (ret) {
820 kfree(nkl);
821 goto out;
822 }
823 ret = netkit_link_prog_attach(&nkl->link,
824 attr->link_create.flags,
825 attr->link_create.netkit.relative_fd,
826 attr->link_create.netkit.expected_revision);
827 if (ret) {
828 nkl->dev = NULL;
829 bpf_link_cleanup(&link_primer);
830 goto out;
831 }
832 ret = bpf_link_settle(&link_primer);
833 out:
834 rtnl_unlock();
835 return ret;
836 }
837
netkit_release_all(struct net_device * dev)838 static void netkit_release_all(struct net_device *dev)
839 {
840 struct bpf_mprog_entry *entry;
841 struct bpf_tuple tuple = {};
842 struct bpf_mprog_fp *fp;
843 struct bpf_mprog_cp *cp;
844
845 entry = netkit_entry_fetch(dev, false);
846 if (!entry)
847 return;
848 netkit_entry_update(dev, NULL);
849 netkit_entry_sync();
850 bpf_mprog_foreach_tuple(entry, fp, cp, tuple) {
851 if (tuple.link)
852 netkit_link(tuple.link)->dev = NULL;
853 else
854 bpf_prog_put(tuple.prog);
855 }
856 }
857
netkit_uninit(struct net_device * dev)858 static void netkit_uninit(struct net_device *dev)
859 {
860 netkit_release_all(dev);
861 }
862
netkit_del_link(struct net_device * dev,struct list_head * head)863 static void netkit_del_link(struct net_device *dev, struct list_head *head)
864 {
865 struct netkit *nk = netkit_priv(dev);
866 struct net_device *peer = rtnl_dereference(nk->peer);
867
868 RCU_INIT_POINTER(nk->peer, NULL);
869 unregister_netdevice_queue(dev, head);
870 if (peer) {
871 nk = netkit_priv(peer);
872 RCU_INIT_POINTER(nk->peer, NULL);
873 unregister_netdevice_queue(peer, head);
874 }
875 }
876
netkit_change_link(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)877 static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
878 struct nlattr *data[],
879 struct netlink_ext_ack *extack)
880 {
881 struct netkit *nk = netkit_priv(dev);
882 struct net_device *peer = rtnl_dereference(nk->peer);
883 enum netkit_action policy;
884 struct nlattr *attr;
885 int err;
886
887 if (!nk->primary) {
888 NL_SET_ERR_MSG(extack,
889 "netkit link settings can be changed only through the primary device");
890 return -EACCES;
891 }
892
893 if (data[IFLA_NETKIT_MODE]) {
894 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE],
895 "netkit link operating mode cannot be changed after device creation");
896 return -EACCES;
897 }
898
899 if (data[IFLA_NETKIT_SCRUB]) {
900 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB],
901 "netkit scrubbing cannot be changed after device creation");
902 return -EACCES;
903 }
904
905 if (data[IFLA_NETKIT_PEER_SCRUB]) {
906 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB],
907 "netkit scrubbing cannot be changed after device creation");
908 return -EACCES;
909 }
910
911 if (data[IFLA_NETKIT_PEER_INFO]) {
912 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
913 "netkit peer info cannot be changed after device creation");
914 return -EINVAL;
915 }
916
917 if (data[IFLA_NETKIT_POLICY]) {
918 attr = data[IFLA_NETKIT_POLICY];
919 policy = nla_get_u32(attr);
920 err = netkit_check_policy(policy, attr, extack);
921 if (err)
922 return err;
923 WRITE_ONCE(nk->policy, policy);
924 }
925
926 if (data[IFLA_NETKIT_PEER_POLICY]) {
927 err = -EOPNOTSUPP;
928 attr = data[IFLA_NETKIT_PEER_POLICY];
929 policy = nla_get_u32(attr);
930 if (peer)
931 err = netkit_check_policy(policy, attr, extack);
932 if (err)
933 return err;
934 nk = netkit_priv(peer);
935 WRITE_ONCE(nk->policy, policy);
936 }
937
938 return 0;
939 }
940
netkit_get_size(const struct net_device * dev)941 static size_t netkit_get_size(const struct net_device *dev)
942 {
943 return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */
944 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */
945 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_SCRUB */
946 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
947 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
948 nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
949 0;
950 }
951
netkit_fill_info(struct sk_buff * skb,const struct net_device * dev)952 static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
953 {
954 struct netkit *nk = netkit_priv(dev);
955 struct net_device *peer = rtnl_dereference(nk->peer);
956
957 if (nla_put_u8(skb, IFLA_NETKIT_PRIMARY, nk->primary))
958 return -EMSGSIZE;
959 if (nla_put_u32(skb, IFLA_NETKIT_POLICY, nk->policy))
960 return -EMSGSIZE;
961 if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode))
962 return -EMSGSIZE;
963 if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
964 return -EMSGSIZE;
965
966 if (peer) {
967 nk = netkit_priv(peer);
968 if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy))
969 return -EMSGSIZE;
970 if (nla_put_u32(skb, IFLA_NETKIT_PEER_SCRUB, nk->scrub))
971 return -EMSGSIZE;
972 }
973
974 return 0;
975 }
976
977 static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
978 [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) },
979 [IFLA_NETKIT_MODE] = { .type = NLA_U32 },
980 [IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
981 [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
982 [IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
983 [IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
984 [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
985 .reject_message = "Primary attribute is read-only" },
986 };
987
988 static struct rtnl_link_ops netkit_link_ops = {
989 .kind = DRV_NAME,
990 .priv_size = sizeof(struct netkit),
991 .setup = netkit_setup,
992 .newlink = netkit_new_link,
993 .dellink = netkit_del_link,
994 .changelink = netkit_change_link,
995 .get_link_net = netkit_get_link_net,
996 .get_size = netkit_get_size,
997 .fill_info = netkit_fill_info,
998 .policy = netkit_policy,
999 .validate = netkit_validate,
1000 .maxtype = IFLA_NETKIT_MAX,
1001 };
1002
netkit_init(void)1003 static __init int netkit_init(void)
1004 {
1005 BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT ||
1006 (int)NETKIT_PASS != (int)TCX_PASS ||
1007 (int)NETKIT_DROP != (int)TCX_DROP ||
1008 (int)NETKIT_REDIRECT != (int)TCX_REDIRECT);
1009
1010 return rtnl_link_register(&netkit_link_ops);
1011 }
1012
netkit_exit(void)1013 static __exit void netkit_exit(void)
1014 {
1015 rtnl_link_unregister(&netkit_link_ops);
1016 }
1017
1018 module_init(netkit_init);
1019 module_exit(netkit_exit);
1020
1021 MODULE_DESCRIPTION("BPF-programmable network device");
1022 MODULE_AUTHOR("Daniel Borkmann <daniel@iogearbox.net>");
1023 MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
1024 MODULE_LICENSE("GPL");
1025 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1026