Lines Matching refs:dev
59 struct net_device *dev; member
66 struct net_device *dev; /* must be first member, due to tracepoint */ member
218 struct bpf_dtab_netdev *dev; in dev_map_free() local
224 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
225 hlist_del_rcu(&dev->index_hlist); in dev_map_free()
226 if (dev->xdp_prog) in dev_map_free()
227 bpf_prog_put(dev->xdp_prog); in dev_map_free()
228 dev_put(dev->dev); in dev_map_free()
229 kfree(dev); in dev_map_free()
236 struct bpf_dtab_netdev *dev; in dev_map_free() local
238 dev = rcu_dereference_raw(dtab->netdev_map[i]); in dev_map_free()
239 if (!dev) in dev_map_free()
242 if (dev->xdp_prog) in dev_map_free()
243 bpf_prog_put(dev->xdp_prog); in dev_map_free()
244 dev_put(dev->dev); in dev_map_free()
245 kfree(dev); in dev_map_free()
279 struct bpf_dtab_netdev *dev; in __dev_map_hash_lookup_elem() local
281 hlist_for_each_entry_rcu(dev, head, index_hlist, in __dev_map_hash_lookup_elem()
283 if (dev->idx == key) in __dev_map_hash_lookup_elem()
284 return dev; in __dev_map_hash_lookup_elem()
294 struct bpf_dtab_netdev *dev, *next_dev; in dev_map_hash_get_next_key() local
303 dev = __dev_map_hash_lookup_elem(map, idx); in dev_map_hash_get_next_key()
304 if (!dev) in dev_map_hash_get_next_key()
307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), in dev_map_hash_get_next_key()
339 struct xdp_txq_info txq = { .dev = tx_dev }; in dev_map_bpf_prog_run()
340 struct xdp_rxq_info rxq = { .dev = rx_dev }; in dev_map_bpf_prog_run()
378 struct net_device *dev = bq->dev; in bq_xmit_all() local
394 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx); in bq_xmit_all()
399 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); in bq_xmit_all()
416 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); in bq_xmit_all()
456 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, in bq_enqueue() argument
459 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); in bq_enqueue()
482 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, in __xdp_enqueue() argument
488 if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT)) in __xdp_enqueue()
491 if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) && in __xdp_enqueue()
495 err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf)); in __xdp_enqueue()
499 bq_enqueue(dev, xdpf, dev_rx, xdp_prog); in __xdp_enqueue()
505 struct xdp_txq_info txq = { .dev = dst->dev }; in dev_map_bpf_prog_run_skb()
524 trace_xdp_exception(dst->dev, dst->xdp_prog, act); in dev_map_bpf_prog_run_skb()
534 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, in dev_xdp_enqueue() argument
537 return __xdp_enqueue(dev, xdpf, dev_rx, NULL); in dev_xdp_enqueue()
543 struct net_device *dev = dst->dev; in dev_map_enqueue() local
545 return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog); in dev_map_enqueue()
553 if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT)) in is_valid_dst()
556 if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) && in is_valid_dst()
560 if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf))) in is_valid_dst()
576 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog); in dev_map_enqueue_clone()
594 static int get_upper_ifindexes(struct net_device *dev, int *indexes) in get_upper_ifindexes() argument
600 netdev_for_each_upper_dev_rcu(dev, upper, iter) { in get_upper_ifindexes()
629 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) in dev_map_enqueue_multi()
653 dst->dev->ifindex)) in dev_map_enqueue_multi()
673 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog); in dev_map_enqueue_multi()
685 err = xdp_ok_fwd_dev(dst->dev, skb->len); in dev_map_generic_redirect()
696 skb->dev = dst->dev; in dev_map_generic_redirect()
722 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, in dev_map_redirect_multi() argument
736 num_excluded = get_upper_ifindexes(dev, excluded_devices); in dev_map_redirect_multi()
737 excluded_devices[num_excluded++] = dev->ifindex; in dev_map_redirect_multi()
747 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) in dev_map_redirect_multi()
768 dst->dev->ifindex)) in dev_map_redirect_multi()
811 struct bpf_dtab_netdev *dev; in __dev_map_entry_free() local
813 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); in __dev_map_entry_free()
814 if (dev->xdp_prog) in __dev_map_entry_free()
815 bpf_prog_put(dev->xdp_prog); in __dev_map_entry_free()
816 dev_put(dev->dev); in __dev_map_entry_free()
817 kfree(dev); in __dev_map_entry_free()
865 struct bpf_dtab_netdev *dev; in __dev_map_alloc_node() local
867 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), in __dev_map_alloc_node()
870 if (!dev) in __dev_map_alloc_node()
873 dev->dev = dev_get_by_index(net, val->ifindex); in __dev_map_alloc_node()
874 if (!dev->dev) in __dev_map_alloc_node()
887 dev->idx = idx; in __dev_map_alloc_node()
889 dev->xdp_prog = prog; in __dev_map_alloc_node()
890 dev->val.bpf_prog.id = prog->aux->id; in __dev_map_alloc_node()
892 dev->xdp_prog = NULL; in __dev_map_alloc_node()
893 dev->val.bpf_prog.id = 0; in __dev_map_alloc_node()
895 dev->val.ifindex = val->ifindex; in __dev_map_alloc_node()
897 return dev; in __dev_map_alloc_node()
901 dev_put(dev->dev); in __dev_map_alloc_node()
903 kfree(dev); in __dev_map_alloc_node()
911 struct bpf_dtab_netdev *dev, *old_dev; in __dev_map_update_elem() local
926 dev = NULL; in __dev_map_update_elem()
931 dev = __dev_map_alloc_node(net, dtab, &val, i); in __dev_map_update_elem()
932 if (IS_ERR(dev)) in __dev_map_update_elem()
933 return PTR_ERR(dev); in __dev_map_update_elem()
940 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev))); in __dev_map_update_elem()
960 struct bpf_dtab_netdev *dev, *old_dev; in __dev_map_hash_update_elem() local
978 dev = __dev_map_alloc_node(net, dtab, &val, idx); in __dev_map_hash_update_elem()
979 if (IS_ERR(dev)) { in __dev_map_hash_update_elem()
980 err = PTR_ERR(dev); in __dev_map_hash_update_elem()
989 call_rcu(&dev->rcu, __dev_map_entry_free); in __dev_map_hash_update_elem()
995 hlist_add_head_rcu(&dev->index_hlist, in __dev_map_hash_update_elem()
1083 struct bpf_dtab_netdev *dev; in dev_map_hash_remove_netdev() local
1089 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_hash_remove_netdev()
1090 if (netdev != dev->dev) in dev_map_hash_remove_netdev()
1094 hlist_del_rcu(&dev->index_hlist); in dev_map_hash_remove_netdev()
1095 call_rcu(&dev->rcu, __dev_map_entry_free); in dev_map_hash_remove_netdev()
1119 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; in dev_map_notification()
1135 struct bpf_dtab_netdev *dev, *odev; in dev_map_notification() local
1137 dev = rcu_dereference(dtab->netdev_map[i]); in dev_map_notification()
1138 if (!dev || netdev != dev->dev) in dev_map_notification()
1140 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL)); in dev_map_notification()
1141 if (dev == odev) { in dev_map_notification()
1142 call_rcu(&dev->rcu, in dev_map_notification()
1163 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != in dev_map_init()
1164 offsetof(struct _bpf_dtab_netdev, dev)); in dev_map_init()