• Home
  • Raw
  • Download

Lines Matching refs:dev

58 	struct net_device *dev;  member
64 struct net_device *dev; /* must be first member, due to tracepoint */ member
222 struct bpf_dtab_netdev *dev; in dev_map_free() local
228 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
229 hlist_del_rcu(&dev->index_hlist); in dev_map_free()
230 if (dev->xdp_prog) in dev_map_free()
231 bpf_prog_put(dev->xdp_prog); in dev_map_free()
232 dev_put(dev->dev); in dev_map_free()
233 kfree(dev); in dev_map_free()
240 struct bpf_dtab_netdev *dev; in dev_map_free() local
242 dev = dtab->netdev_map[i]; in dev_map_free()
243 if (!dev) in dev_map_free()
246 if (dev->xdp_prog) in dev_map_free()
247 bpf_prog_put(dev->xdp_prog); in dev_map_free()
248 dev_put(dev->dev); in dev_map_free()
249 kfree(dev); in dev_map_free()
279 struct bpf_dtab_netdev *dev; in __dev_map_hash_lookup_elem() local
281 hlist_for_each_entry_rcu(dev, head, index_hlist, in __dev_map_hash_lookup_elem()
283 if (dev->idx == key) in __dev_map_hash_lookup_elem()
284 return dev; in __dev_map_hash_lookup_elem()
294 struct bpf_dtab_netdev *dev, *next_dev; in dev_map_hash_get_next_key() local
303 dev = __dev_map_hash_lookup_elem(map, idx); in dev_map_hash_get_next_key()
304 if (!dev) in dev_map_hash_get_next_key()
307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), in dev_map_hash_get_next_key()
346 struct net_device *dev = bq->dev; in bq_xmit_all() local
359 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); in bq_xmit_all()
369 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); in bq_xmit_all()
424 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, in bq_enqueue() argument
428 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); in bq_enqueue()
446 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, in __xdp_enqueue() argument
452 if (!dev->netdev_ops->ndo_xdp_xmit) in __xdp_enqueue()
455 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); in __xdp_enqueue()
463 bq_enqueue(dev, xdpf, dev_rx); in __xdp_enqueue()
467 static struct xdp_buff *dev_map_run_prog(struct net_device *dev, in dev_map_run_prog() argument
471 struct xdp_txq_info txq = { .dev = dev }; in dev_map_run_prog()
487 trace_xdp_exception(dev, xdp_prog, act); in dev_map_run_prog()
495 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, in dev_xdp_enqueue() argument
498 return __xdp_enqueue(dev, xdp, dev_rx); in dev_xdp_enqueue()
504 struct net_device *dev = dst->dev; in dev_map_enqueue() local
507 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); in dev_map_enqueue()
511 return __xdp_enqueue(dev, xdp, dev_rx); in dev_map_enqueue()
519 err = xdp_ok_fwd_dev(dst->dev, skb->len); in dev_map_generic_redirect()
522 skb->dev = dst->dev; in dev_map_generic_redirect()
544 struct bpf_dtab_netdev *dev; in __dev_map_entry_free() local
546 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); in __dev_map_entry_free()
547 if (dev->xdp_prog) in __dev_map_entry_free()
548 bpf_prog_put(dev->xdp_prog); in __dev_map_entry_free()
549 dev_put(dev->dev); in __dev_map_entry_free()
550 kfree(dev); in __dev_map_entry_free()
603 struct bpf_dtab_netdev *dev; in __dev_map_alloc_node() local
605 dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, in __dev_map_alloc_node()
607 if (!dev) in __dev_map_alloc_node()
610 dev->dev = dev_get_by_index(net, val->ifindex); in __dev_map_alloc_node()
611 if (!dev->dev) in __dev_map_alloc_node()
623 dev->idx = idx; in __dev_map_alloc_node()
624 dev->dtab = dtab; in __dev_map_alloc_node()
626 dev->xdp_prog = prog; in __dev_map_alloc_node()
627 dev->val.bpf_prog.id = prog->aux->id; in __dev_map_alloc_node()
629 dev->xdp_prog = NULL; in __dev_map_alloc_node()
630 dev->val.bpf_prog.id = 0; in __dev_map_alloc_node()
632 dev->val.ifindex = val->ifindex; in __dev_map_alloc_node()
634 return dev; in __dev_map_alloc_node()
638 dev_put(dev->dev); in __dev_map_alloc_node()
640 kfree(dev); in __dev_map_alloc_node()
648 struct bpf_dtab_netdev *dev, *old_dev; in __dev_map_update_elem() local
663 dev = NULL; in __dev_map_update_elem()
668 dev = __dev_map_alloc_node(net, dtab, &val, i); in __dev_map_update_elem()
669 if (IS_ERR(dev)) in __dev_map_update_elem()
670 return PTR_ERR(dev); in __dev_map_update_elem()
677 old_dev = xchg(&dtab->netdev_map[i], dev); in __dev_map_update_elem()
695 struct bpf_dtab_netdev *dev, *old_dev; in __dev_map_hash_update_elem() local
713 dev = __dev_map_alloc_node(net, dtab, &val, idx); in __dev_map_hash_update_elem()
714 if (IS_ERR(dev)) { in __dev_map_hash_update_elem()
715 err = PTR_ERR(dev); in __dev_map_hash_update_elem()
724 call_rcu(&dev->rcu, __dev_map_entry_free); in __dev_map_hash_update_elem()
730 hlist_add_head_rcu(&dev->index_hlist, in __dev_map_hash_update_elem()
787 struct bpf_dtab_netdev *dev; in dev_map_hash_remove_netdev() local
793 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_hash_remove_netdev()
794 if (netdev != dev->dev) in dev_map_hash_remove_netdev()
798 hlist_del_rcu(&dev->index_hlist); in dev_map_hash_remove_netdev()
799 call_rcu(&dev->rcu, __dev_map_entry_free); in dev_map_hash_remove_netdev()
823 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; in dev_map_notification()
839 struct bpf_dtab_netdev *dev, *odev; in dev_map_notification() local
841 dev = READ_ONCE(dtab->netdev_map[i]); in dev_map_notification()
842 if (!dev || netdev != dev->dev) in dev_map_notification()
844 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); in dev_map_notification()
845 if (dev == odev) in dev_map_notification()
846 call_rcu(&dev->rcu, in dev_map_notification()
867 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != in dev_map_init()
868 offsetof(struct _bpf_dtab_netdev, dev)); in dev_map_init()