• Home
  • Raw
  • Download

Lines Matching refs:dev

67 	struct net_device *dev; /* must be first member, due to tracepoint */  member
238 struct bpf_dtab_netdev *dev; in dev_map_free() local
244 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
245 hlist_del_rcu(&dev->index_hlist); in dev_map_free()
246 free_percpu(dev->bulkq); in dev_map_free()
247 dev_put(dev->dev); in dev_map_free()
248 kfree(dev); in dev_map_free()
255 struct bpf_dtab_netdev *dev; in dev_map_free() local
257 dev = dtab->netdev_map[i]; in dev_map_free()
258 if (!dev) in dev_map_free()
261 free_percpu(dev->bulkq); in dev_map_free()
262 dev_put(dev->dev); in dev_map_free()
263 kfree(dev); in dev_map_free()
294 struct bpf_dtab_netdev *dev; in __dev_map_hash_lookup_elem() local
296 hlist_for_each_entry_rcu(dev, head, index_hlist) in __dev_map_hash_lookup_elem()
297 if (dev->idx == key) in __dev_map_hash_lookup_elem()
298 return dev; in __dev_map_hash_lookup_elem()
308 struct bpf_dtab_netdev *dev, *next_dev; in dev_map_hash_get_next_key() local
317 dev = __dev_map_hash_lookup_elem(map, idx); in dev_map_hash_get_next_key()
318 if (!dev) in dev_map_hash_get_next_key()
321 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), in dev_map_hash_get_next_key()
352 struct net_device *dev = obj->dev; in bq_xmit_all() local
365 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); in bq_xmit_all()
376 sent, drops, bq->dev_rx, dev, err); in bq_xmit_all()
463 struct net_device *dev = dst->dev; in dev_map_enqueue() local
467 if (!dev->netdev_ops->ndo_xdp_xmit) in dev_map_enqueue()
470 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); in dev_map_enqueue()
486 err = xdp_ok_fwd_dev(dst->dev, skb->len); in dev_map_generic_redirect()
489 skb->dev = dst->dev; in dev_map_generic_redirect()
498 struct net_device *dev = obj ? obj->dev : NULL; in dev_map_lookup_elem() local
500 return dev ? &dev->ifindex : NULL; in dev_map_lookup_elem()
507 struct net_device *dev = obj ? obj->dev : NULL; in dev_map_hash_lookup_elem() local
509 return dev ? &dev->ifindex : NULL; in dev_map_hash_lookup_elem()
512 static void dev_map_flush_old(struct bpf_dtab_netdev *dev) in dev_map_flush_old() argument
514 if (dev->dev->netdev_ops->ndo_xdp_xmit) { in dev_map_flush_old()
520 bq = per_cpu_ptr(dev->bulkq, cpu); in dev_map_flush_old()
529 struct bpf_dtab_netdev *dev; in __dev_map_entry_free() local
531 dev = container_of(rcu, struct bpf_dtab_netdev, rcu); in __dev_map_entry_free()
532 dev_map_flush_old(dev); in __dev_map_entry_free()
533 free_percpu(dev->bulkq); in __dev_map_entry_free()
534 dev_put(dev->dev); in __dev_map_entry_free()
535 kfree(dev); in __dev_map_entry_free()
589 struct bpf_dtab_netdev *dev; in __dev_map_alloc_node() local
593 dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node); in __dev_map_alloc_node()
594 if (!dev) in __dev_map_alloc_node()
597 dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq), in __dev_map_alloc_node()
599 if (!dev->bulkq) { in __dev_map_alloc_node()
600 kfree(dev); in __dev_map_alloc_node()
605 bq = per_cpu_ptr(dev->bulkq, cpu); in __dev_map_alloc_node()
606 bq->obj = dev; in __dev_map_alloc_node()
609 dev->dev = dev_get_by_index(net, ifindex); in __dev_map_alloc_node()
610 if (!dev->dev) { in __dev_map_alloc_node()
611 free_percpu(dev->bulkq); in __dev_map_alloc_node()
612 kfree(dev); in __dev_map_alloc_node()
616 dev->idx = idx; in __dev_map_alloc_node()
617 dev->dtab = dtab; in __dev_map_alloc_node()
619 return dev; in __dev_map_alloc_node()
626 struct bpf_dtab_netdev *dev, *old_dev; in __dev_map_update_elem() local
638 dev = NULL; in __dev_map_update_elem()
640 dev = __dev_map_alloc_node(net, dtab, ifindex, i); in __dev_map_update_elem()
641 if (IS_ERR(dev)) in __dev_map_update_elem()
642 return PTR_ERR(dev); in __dev_map_update_elem()
649 old_dev = xchg(&dtab->netdev_map[i], dev); in __dev_map_update_elem()
667 struct bpf_dtab_netdev *dev, *old_dev; in __dev_map_hash_update_elem() local
682 dev = __dev_map_alloc_node(net, dtab, ifindex, idx); in __dev_map_hash_update_elem()
683 if (IS_ERR(dev)) { in __dev_map_hash_update_elem()
684 err = PTR_ERR(dev); in __dev_map_hash_update_elem()
693 call_rcu(&dev->rcu, __dev_map_entry_free); in __dev_map_hash_update_elem()
699 hlist_add_head_rcu(&dev->index_hlist, in __dev_map_hash_update_elem()
748 struct bpf_dtab_netdev *dev; in dev_map_hash_remove_netdev() local
754 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_hash_remove_netdev()
755 if (netdev != dev->dev) in dev_map_hash_remove_netdev()
759 hlist_del_rcu(&dev->index_hlist); in dev_map_hash_remove_netdev()
760 call_rcu(&dev->rcu, __dev_map_entry_free); in dev_map_hash_remove_netdev()
788 struct bpf_dtab_netdev *dev, *odev; in dev_map_notification() local
790 dev = READ_ONCE(dtab->netdev_map[i]); in dev_map_notification()
791 if (!dev || netdev != dev->dev) in dev_map_notification()
793 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); in dev_map_notification()
794 if (dev == odev) in dev_map_notification()
795 call_rcu(&dev->rcu, in dev_map_notification()
814 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != in dev_map_init()
815 offsetof(struct _bpf_dtab_netdev, dev)); in dev_map_init()