• Home
  • Raw
  • Download

Lines Matching refs:dev

68 void netdev_set_default_ethtool_ops(struct net_device *dev,
227 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) argument
228 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) argument
229 #define netdev_for_each_uc_addr(ha, dev) \ argument
230 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) argument
233 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) argument
234 #define netdev_for_each_mc_addr(ha, dev) \ argument
235 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
258 #define LL_RESERVED_SPACE(dev) \ argument
259 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
260 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ argument
261 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264 int (*create) (struct sk_buff *skb, struct net_device *dev,
270 const struct net_device *dev,
332 struct net_device *dev; member
593 struct net_device *dev; member
730 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
742 struct net_device *dev; member
832 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1248 int (*ndo_init)(struct net_device *dev);
1249 void (*ndo_uninit)(struct net_device *dev);
1250 int (*ndo_open)(struct net_device *dev);
1251 int (*ndo_stop)(struct net_device *dev);
1253 struct net_device *dev);
1255 struct net_device *dev,
1257 u16 (*ndo_select_queue)(struct net_device *dev,
1260 void (*ndo_change_rx_flags)(struct net_device *dev,
1262 void (*ndo_set_rx_mode)(struct net_device *dev);
1263 int (*ndo_set_mac_address)(struct net_device *dev,
1265 int (*ndo_validate_addr)(struct net_device *dev);
1266 int (*ndo_do_ioctl)(struct net_device *dev,
1268 int (*ndo_set_config)(struct net_device *dev,
1270 int (*ndo_change_mtu)(struct net_device *dev,
1272 int (*ndo_neigh_setup)(struct net_device *dev,
1274 void (*ndo_tx_timeout) (struct net_device *dev);
1276 void (*ndo_get_stats64)(struct net_device *dev,
1278 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1280 const struct net_device *dev,
1282 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1284 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1286 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1289 void (*ndo_poll_controller)(struct net_device *dev);
1290 int (*ndo_netpoll_setup)(struct net_device *dev,
1292 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1294 int (*ndo_set_vf_mac)(struct net_device *dev,
1296 int (*ndo_set_vf_vlan)(struct net_device *dev,
1299 int (*ndo_set_vf_rate)(struct net_device *dev,
1302 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1304 int (*ndo_set_vf_trust)(struct net_device *dev,
1306 int (*ndo_get_vf_config)(struct net_device *dev,
1309 int (*ndo_set_vf_link_state)(struct net_device *dev,
1311 int (*ndo_get_vf_stats)(struct net_device *dev,
1315 int (*ndo_set_vf_port)(struct net_device *dev,
1318 int (*ndo_get_vf_port)(struct net_device *dev,
1320 int (*ndo_set_vf_guid)(struct net_device *dev,
1324 struct net_device *dev,
1326 int (*ndo_setup_tc)(struct net_device *dev,
1330 int (*ndo_fcoe_enable)(struct net_device *dev);
1331 int (*ndo_fcoe_disable)(struct net_device *dev);
1332 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1336 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1338 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1342 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1349 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1354 int (*ndo_rx_flow_steer)(struct net_device *dev,
1359 int (*ndo_add_slave)(struct net_device *dev,
1362 int (*ndo_del_slave)(struct net_device *dev,
1364 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1366 int (*ndo_set_features)(struct net_device *dev,
1368 int (*ndo_neigh_construct)(struct net_device *dev,
1370 void (*ndo_neigh_destroy)(struct net_device *dev,
1375 struct net_device *dev,
1382 struct net_device *dev,
1387 struct net_device *dev,
1392 struct net_device *dev,
1396 int (*ndo_bridge_setlink)(struct net_device *dev,
1402 struct net_device *dev,
1405 int (*ndo_bridge_dellink)(struct net_device *dev,
1408 int (*ndo_change_carrier)(struct net_device *dev,
1410 int (*ndo_get_phys_port_id)(struct net_device *dev,
1412 int (*ndo_get_port_parent_id)(struct net_device *dev,
1414 int (*ndo_get_phys_port_name)(struct net_device *dev,
1416 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1418 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1421 struct net_device *dev);
1425 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1428 int (*ndo_get_iflink)(const struct net_device *dev);
1429 int (*ndo_change_proto_down)(struct net_device *dev,
1431 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1433 void (*ndo_set_rx_headroom)(struct net_device *dev,
1435 int (*ndo_bpf)(struct net_device *dev,
1437 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1440 int (*ndo_xsk_wakeup)(struct net_device *dev,
1442 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
2011 void (*priv_destructor)(struct net_device *dev);
2034 struct device dev; member
2068 #define to_net_dev(d) container_of(d, struct net_device, dev)
2070 static inline bool netif_elide_gro(const struct net_device *dev) in netif_elide_gro() argument
2072 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) in netif_elide_gro()
2080 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) in netdev_get_prio_tc_map() argument
2082 return dev->prio_tc_map[prio & TC_BITMASK]; in netdev_get_prio_tc_map()
2086 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) in netdev_set_prio_tc_map() argument
2088 if (tc >= dev->num_tc) in netdev_set_prio_tc_map()
2091 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; in netdev_set_prio_tc_map()
2095 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2096 void netdev_reset_tc(struct net_device *dev);
2097 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2098 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2101 int netdev_get_num_tc(struct net_device *dev) in netdev_get_num_tc() argument
2103 return dev->num_tc; in netdev_get_num_tc()
2106 void netdev_unbind_sb_channel(struct net_device *dev,
2108 int netdev_bind_sb_channel_queue(struct net_device *dev,
2111 int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2112 static inline int netdev_get_sb_channel(struct net_device *dev) in netdev_get_sb_channel() argument
2114 return max_t(int, -dev->num_tc, 0); in netdev_get_sb_channel()
2118 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, in netdev_get_tx_queue() argument
2121 return &dev->_tx[index]; in netdev_get_tx_queue()
2124 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, in skb_get_tx_queue() argument
2127 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); in skb_get_tx_queue()
2130 static inline void netdev_for_each_tx_queue(struct net_device *dev, in netdev_for_each_tx_queue() argument
2138 for (i = 0; i < dev->num_tx_queues; i++) in netdev_for_each_tx_queue()
2139 f(dev, &dev->_tx[i], arg); in netdev_for_each_tx_queue()
2142 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2144 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2151 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) in netdev_get_fwd_headroom() argument
2153 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; in netdev_get_fwd_headroom()
2156 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) in netdev_set_rx_headroom() argument
2158 if (dev->netdev_ops->ndo_set_rx_headroom) in netdev_set_rx_headroom()
2159 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); in netdev_set_rx_headroom()
2163 static inline void netdev_reset_rx_headroom(struct net_device *dev) in netdev_reset_rx_headroom() argument
2165 netdev_set_rx_headroom(dev, -1); in netdev_reset_rx_headroom()
2172 struct net *dev_net(const struct net_device *dev) in dev_net() argument
2174 return read_pnet(&dev->nd_net); in dev_net()
2178 void dev_net_set(struct net_device *dev, struct net *net) in dev_net_set() argument
2180 write_pnet(&dev->nd_net, net); in dev_net_set()
2189 static inline void *netdev_priv(const struct net_device *dev) in netdev_priv() argument
2191 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); in netdev_priv()
2197 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2203 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2220 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2234 static inline void netif_tx_napi_add(struct net_device *dev, in netif_tx_napi_add() argument
2240 netif_napi_add(dev, napi, poll, weight); in netif_tx_napi_add()
2356 struct net_device *dev; /* NULL is wildcarded here */ member
2497 struct net_device *dev; member
2532 struct net_device *dev) in netdev_notifier_info_init() argument
2534 info->dev = dev; in netdev_notifier_info_init()
2541 return info->dev; in netdev_notifier_info_to_dev()
2550 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2572 static inline struct net_device *next_net_device(struct net_device *dev) in next_net_device() argument
2577 net = dev_net(dev); in next_net_device()
2578 lh = dev->dev_list.next; in next_net_device()
2582 static inline struct net_device *next_net_device_rcu(struct net_device *dev) in next_net_device_rcu() argument
2587 net = dev_net(dev); in next_net_device_rcu()
2588 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); in next_net_device_rcu()
2605 int netdev_boot_setup_check(struct net_device *dev);
2617 int dev_get_iflink(const struct net_device *dev);
2618 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2624 int dev_alloc_name(struct net_device *dev, const char *name);
2625 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2626 void dev_close(struct net_device *dev);
2628 void dev_disable_lro(struct net_device *dev);
2630 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2632 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2637 int register_netdevice(struct net_device *dev);
2638 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2640 static inline void unregister_netdevice(struct net_device *dev) in unregister_netdevice() argument
2642 unregister_netdevice_queue(dev, NULL); in unregister_netdevice()
2645 int netdev_refcnt_read(const struct net_device *dev);
2646 void free_netdev(struct net_device *dev);
2647 void netdev_freemem(struct net_device *dev);
2649 int init_dummy_netdev(struct net_device *dev);
2656 int dev_restart(struct net_device *dev);
2905 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, in dev_hard_header() argument
2910 if (!dev->header_ops || !dev->header_ops->create) in dev_hard_header()
2913 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); in dev_hard_header()
2919 const struct net_device *dev = skb->dev; in dev_parse_header() local
2921 if (!dev->header_ops || !dev->header_ops->parse) in dev_parse_header()
2923 return dev->header_ops->parse(skb, haddr); in dev_parse_header()
2928 const struct net_device *dev = skb->dev; in dev_parse_header_protocol() local
2930 if (!dev->header_ops || !dev->header_ops->parse_protocol) in dev_parse_header_protocol()
2932 return dev->header_ops->parse_protocol(skb); in dev_parse_header_protocol()
2936 static inline bool dev_validate_header(const struct net_device *dev, in dev_validate_header() argument
2939 if (likely(len >= dev->hard_header_len)) in dev_validate_header()
2941 if (len < dev->min_header_len) in dev_validate_header()
2945 memset(ll_header + len, 0, dev->hard_header_len - len); in dev_validate_header()
2949 if (dev->header_ops && dev->header_ops->validate) in dev_validate_header()
2950 return dev->header_ops->validate(ll_header, len); in dev_validate_header()
2955 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
3064 static inline void netif_tx_schedule_all(struct net_device *dev) in netif_tx_schedule_all() argument
3068 for (i = 0; i < dev->num_tx_queues; i++) in netif_tx_schedule_all()
3069 netif_schedule_queue(netdev_get_tx_queue(dev, i)); in netif_tx_schedule_all()
3083 static inline void netif_start_queue(struct net_device *dev) in netif_start_queue() argument
3085 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); in netif_start_queue()
3088 static inline void netif_tx_start_all_queues(struct net_device *dev) in netif_tx_start_all_queues() argument
3092 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_start_all_queues()
3093 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_start_all_queues()
3107 static inline void netif_wake_queue(struct net_device *dev) in netif_wake_queue() argument
3109 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); in netif_wake_queue()
3112 static inline void netif_tx_wake_all_queues(struct net_device *dev) in netif_tx_wake_all_queues() argument
3116 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_wake_all_queues()
3117 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_wake_all_queues()
3134 static inline void netif_stop_queue(struct net_device *dev) in netif_stop_queue() argument
3136 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); in netif_stop_queue()
3139 void netif_tx_stop_all_queues(struct net_device *dev);
3140 void netdev_update_lockdep_key(struct net_device *dev);
3153 static inline bool netif_queue_stopped(const struct net_device *dev) in netif_queue_stopped() argument
3155 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); in netif_queue_stopped()
3256 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) in netdev_sent_queue() argument
3258 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); in netdev_sent_queue()
3261 static inline bool __netdev_sent_queue(struct net_device *dev, in __netdev_sent_queue() argument
3265 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, in __netdev_sent_queue()
3303 static inline void netdev_completed_queue(struct net_device *dev, in netdev_completed_queue() argument
3306 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); in netdev_completed_queue()
3337 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) in netdev_cap_txqueue() argument
3339 if (unlikely(queue_index >= dev->real_num_tx_queues)) { in netdev_cap_txqueue()
3341 dev->name, queue_index, in netdev_cap_txqueue()
3342 dev->real_num_tx_queues); in netdev_cap_txqueue()
3355 static inline bool netif_running(const struct net_device *dev) in netif_running() argument
3357 return test_bit(__LINK_STATE_START, &dev->state); in netif_running()
3374 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) in netif_start_subqueue() argument
3376 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_start_subqueue()
3388 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) in netif_stop_subqueue() argument
3390 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_stop_subqueue()
3401 static inline bool __netif_subqueue_stopped(const struct net_device *dev, in __netif_subqueue_stopped() argument
3404 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in __netif_subqueue_stopped()
3409 static inline bool netif_subqueue_stopped(const struct net_device *dev, in netif_subqueue_stopped() argument
3412 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); in netif_subqueue_stopped()
3422 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) in netif_wake_subqueue() argument
3424 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_wake_subqueue()
3430 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3432 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3519 static inline int netif_set_xps_queue(struct net_device *dev, in netif_set_xps_queue() argument
3526 static inline int __netif_set_xps_queue(struct net_device *dev, in __netif_set_xps_queue() argument
3540 static inline bool netif_is_multiqueue(const struct net_device *dev) in netif_is_multiqueue() argument
3542 return dev->num_tx_queues > 1; in netif_is_multiqueue()
3545 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3548 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3550 static inline int netif_set_real_num_rx_queues(struct net_device *dev, in netif_set_real_num_rx_queues() argument
3553 dev->real_num_rx_queues = rxqs; in netif_set_real_num_rx_queues()
3559 __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) in __netif_get_rx_queue() argument
3561 return dev->_rx + rxq; in __netif_get_rx_queue()
3568 struct net_device *dev = queue->dev; in get_netdev_rx_queue_index() local
3569 int index = queue - dev->_rx; in get_netdev_rx_queue_index()
3571 BUG_ON(index >= dev->num_rx_queues); in get_netdev_rx_queue_index()
3646 bool netdev_is_rx_handler_busy(struct net_device *dev);
3647 int netdev_rx_handler_register(struct net_device *dev,
3650 void netdev_rx_handler_unregister(struct net_device *dev);
3658 int __dev_change_flags(struct net_device *dev, unsigned int flags,
3660 int dev_change_flags(struct net_device *dev, unsigned int flags,
3669 int dev_validate_mtu(struct net_device *dev, int mtu,
3671 int dev_set_mtu_ext(struct net_device *dev, int mtu,
3676 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3678 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3681 int dev_get_phys_port_id(struct net_device *dev,
3683 int dev_get_phys_port_name(struct net_device *dev,
3685 int dev_get_port_parent_id(struct net_device *dev,
3688 int dev_change_proto_down(struct net_device *dev, bool proto_down);
3689 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3690 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3691 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3694 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3695 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3697 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3699 int xdp_umem_query(struct net_device *dev, u16 queue_id);
3701 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3702 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3703 bool is_skb_forwardable(const struct net_device *dev,
3706 static __always_inline int ____dev_forward_skb(struct net_device *dev, in ____dev_forward_skb() argument
3710 unlikely(!is_skb_forwardable(dev, skb))) { in ____dev_forward_skb()
3711 atomic_long_inc(&dev->rx_dropped); in ____dev_forward_skb()
3721 bool dev_nit_active(struct net_device *dev);
3722 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3736 static inline void dev_put(struct net_device *dev) in dev_put() argument
3738 this_cpu_dec(*dev->pcpu_refcnt); in dev_put()
3747 static inline void dev_hold(struct net_device *dev) in dev_hold() argument
3749 this_cpu_inc(*dev->pcpu_refcnt); in dev_hold()
3761 void linkwatch_init_dev(struct net_device *dev);
3762 void linkwatch_fire_event(struct net_device *dev);
3763 void linkwatch_forget_dev(struct net_device *dev);
3771 static inline bool netif_carrier_ok(const struct net_device *dev) in netif_carrier_ok() argument
3773 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); in netif_carrier_ok()
3776 unsigned long dev_trans_start(struct net_device *dev);
3778 void __netdev_watchdog_up(struct net_device *dev);
3780 void netif_carrier_on(struct net_device *dev);
3782 void netif_carrier_off(struct net_device *dev);
3796 static inline void netif_dormant_on(struct net_device *dev) in netif_dormant_on() argument
3798 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_on()
3799 linkwatch_fire_event(dev); in netif_dormant_on()
3808 static inline void netif_dormant_off(struct net_device *dev) in netif_dormant_off() argument
3810 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_off()
3811 linkwatch_fire_event(dev); in netif_dormant_off()
3820 static inline bool netif_dormant(const struct net_device *dev) in netif_dormant() argument
3822 return test_bit(__LINK_STATE_DORMANT, &dev->state); in netif_dormant()
3832 static inline bool netif_oper_up(const struct net_device *dev) in netif_oper_up() argument
3834 return (dev->operstate == IF_OPER_UP || in netif_oper_up()
3835 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); in netif_oper_up()
3844 static inline bool netif_device_present(struct net_device *dev) in netif_device_present() argument
3846 return test_bit(__LINK_STATE_PRESENT, &dev->state); in netif_device_present()
3849 void netif_device_detach(struct net_device *dev);
3851 void netif_device_attach(struct net_device *dev);
3952 static inline void netif_trans_update(struct net_device *dev) in netif_trans_update() argument
3954 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); in netif_trans_update()
3966 static inline void netif_tx_lock(struct net_device *dev) in netif_tx_lock() argument
3971 spin_lock(&dev->tx_global_lock); in netif_tx_lock()
3973 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_lock()
3974 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_lock()
3988 static inline void netif_tx_lock_bh(struct net_device *dev) in netif_tx_lock_bh() argument
3991 netif_tx_lock(dev); in netif_tx_lock_bh()
3994 static inline void netif_tx_unlock(struct net_device *dev) in netif_tx_unlock() argument
3998 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_unlock()
3999 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_unlock()
4008 spin_unlock(&dev->tx_global_lock); in netif_tx_unlock()
4011 static inline void netif_tx_unlock_bh(struct net_device *dev) in netif_tx_unlock_bh() argument
4013 netif_tx_unlock(dev); in netif_tx_unlock_bh()
4017 #define HARD_TX_LOCK(dev, txq, cpu) { \ argument
4018 if ((dev->features & NETIF_F_LLTX) == 0) { \
4025 #define HARD_TX_TRYLOCK(dev, txq) \ argument
4026 (((dev->features & NETIF_F_LLTX) == 0) ? \
4030 #define HARD_TX_UNLOCK(dev, txq) { \ argument
4031 if ((dev->features & NETIF_F_LLTX) == 0) { \
4038 static inline void netif_tx_disable(struct net_device *dev) in netif_tx_disable() argument
4045 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_disable()
4046 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_disable()
4055 static inline void netif_addr_lock(struct net_device *dev) in netif_addr_lock() argument
4057 spin_lock(&dev->addr_list_lock); in netif_addr_lock()
4060 static inline void netif_addr_lock_bh(struct net_device *dev) in netif_addr_lock_bh() argument
4062 spin_lock_bh(&dev->addr_list_lock); in netif_addr_lock_bh()
4065 static inline void netif_addr_unlock(struct net_device *dev) in netif_addr_unlock() argument
4067 spin_unlock(&dev->addr_list_lock); in netif_addr_unlock()
4070 static inline void netif_addr_unlock_bh(struct net_device *dev) in netif_addr_unlock_bh() argument
4072 spin_unlock_bh(&dev->addr_list_lock); in netif_addr_unlock_bh()
4079 #define for_each_dev_addr(dev, ha) \ argument
4080 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4084 void ether_setup(struct net_device *dev);
4091 int dev_get_valid_name(struct net *net, struct net_device *dev,
4101 int register_netdev(struct net_device *dev);
4102 void unregister_netdev(struct net_device *dev);
4110 struct net_device *dev,
4115 struct net_device *dev,
4121 struct net_device *dev,
4125 struct net_device *dev,
4131 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4133 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4135 void dev_addr_flush(struct net_device *dev);
4136 int dev_addr_init(struct net_device *dev);
4139 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4140 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4141 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4145 void dev_uc_flush(struct net_device *dev);
4146 void dev_uc_init(struct net_device *dev);
4157 static inline int __dev_uc_sync(struct net_device *dev, in __dev_uc_sync() argument
4163 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); in __dev_uc_sync()
4173 static inline void __dev_uc_unsync(struct net_device *dev, in __dev_uc_unsync() argument
4177 __hw_addr_unsync_dev(&dev->uc, dev, unsync); in __dev_uc_unsync()
4181 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4182 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4183 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4184 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4185 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4189 void dev_mc_flush(struct net_device *dev);
4190 void dev_mc_init(struct net_device *dev);
4201 static inline int __dev_mc_sync(struct net_device *dev, in __dev_mc_sync() argument
4207 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); in __dev_mc_sync()
4217 static inline void __dev_mc_unsync(struct net_device *dev, in __dev_mc_unsync() argument
4221 __hw_addr_unsync_dev(&dev->mc, dev, unsync); in __dev_mc_unsync()
4225 void dev_set_rx_mode(struct net_device *dev);
4226 void __dev_set_rx_mode(struct net_device *dev);
4227 int dev_set_promiscuity(struct net_device *dev, int inc);
4228 int dev_set_allmulti(struct net_device *dev, int inc);
4229 void netdev_state_change(struct net_device *dev);
4230 void netdev_notify_peers(struct net_device *dev);
4231 void netdev_features_change(struct net_device *dev);
4234 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4248 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4249 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4251 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4255 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ argument
4256 for (iter = &(dev)->adj_list.upper, \
4257 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4259 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4261 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4266 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4269 bool netdev_has_any_upper_dev(struct net_device *dev);
4271 void *netdev_lower_get_next_private(struct net_device *dev,
4273 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4276 #define netdev_for_each_lower_private(dev, priv, iter) \ argument
4277 for (iter = (dev)->adj_list.lower.next, \
4278 priv = netdev_lower_get_next_private(dev, &(iter)); \
4280 priv = netdev_lower_get_next_private(dev, &(iter)))
4282 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ argument
4283 for (iter = &(dev)->adj_list.lower, \
4284 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4286 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4288 void *netdev_lower_get_next(struct net_device *dev,
4291 #define netdev_for_each_lower_dev(dev, ldev, iter) \ argument
4292 for (iter = (dev)->adj_list.lower.next, \
4293 ldev = netdev_lower_get_next(dev, &(iter)); \
4295 ldev = netdev_lower_get_next(dev, &(iter)))
4297 struct net_device *netdev_all_lower_get_next(struct net_device *dev,
4299 struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
4302 int netdev_walk_all_lower_dev(struct net_device *dev,
4306 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4312 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4313 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4314 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4315 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4317 int netdev_master_upper_dev_link(struct net_device *dev,
4321 void netdev_upper_dev_unlink(struct net_device *dev,
4325 struct net_device *dev,
4329 struct net_device *dev);
4332 struct net_device *dev);
4333 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4334 void *netdev_lower_dev_get_private(struct net_device *dev,
4364 void netdev_bonding_info_change(struct net_device *dev,
4398 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4400 static inline void netdev_rx_csum_fault(struct net_device *dev, in netdev_rx_csum_fault() argument
4416 struct sk_buff *skb, struct net_device *dev, in __netdev_start_xmit() argument
4420 return ops->ndo_start_xmit(skb, dev); in __netdev_start_xmit()
4428 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, in netdev_start_xmit() argument
4431 const struct net_device_ops *ops = dev->netdev_ops; in netdev_start_xmit()
4434 rc = __netdev_start_xmit(ops, skb, dev, more); in netdev_start_xmit()
4458 const char *netdev_drivername(const struct net_device *dev);
4476 struct net_device *dev) in netdev_get_wanted_features() argument
4478 return (dev->features & ~dev->hw_features) | dev->wanted_features; in netdev_get_wanted_features()
4493 int __netdev_update_features(struct net_device *dev);
4494 void netdev_update_features(struct net_device *dev);
4495 void netdev_change_features(struct net_device *dev);
4498 struct net_device *dev);
4501 struct net_device *dev,
4546 static inline void netif_set_gso_max_size(struct net_device *dev, in netif_set_gso_max_size() argument
4549 dev->gso_max_size = size; in netif_set_gso_max_size()
4565 static inline bool netif_is_macsec(const struct net_device *dev) in netif_is_macsec() argument
4567 return dev->priv_flags & IFF_MACSEC; in netif_is_macsec()
4570 static inline bool netif_is_macvlan(const struct net_device *dev) in netif_is_macvlan() argument
4572 return dev->priv_flags & IFF_MACVLAN; in netif_is_macvlan()
4575 static inline bool netif_is_macvlan_port(const struct net_device *dev) in netif_is_macvlan_port() argument
4577 return dev->priv_flags & IFF_MACVLAN_PORT; in netif_is_macvlan_port()
4580 static inline bool netif_is_bond_master(const struct net_device *dev) in netif_is_bond_master() argument
4582 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; in netif_is_bond_master()
4585 static inline bool netif_is_bond_slave(const struct net_device *dev) in netif_is_bond_slave() argument
4587 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; in netif_is_bond_slave()
4590 static inline bool netif_supports_nofcs(struct net_device *dev) in netif_supports_nofcs() argument
4592 return dev->priv_flags & IFF_SUPP_NOFCS; in netif_supports_nofcs()
4595 static inline bool netif_has_l3_rx_handler(const struct net_device *dev) in netif_has_l3_rx_handler() argument
4597 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; in netif_has_l3_rx_handler()
4600 static inline bool netif_is_l3_master(const struct net_device *dev) in netif_is_l3_master() argument
4602 return dev->priv_flags & IFF_L3MDEV_MASTER; in netif_is_l3_master()
4605 static inline bool netif_is_l3_slave(const struct net_device *dev) in netif_is_l3_slave() argument
4607 return dev->priv_flags & IFF_L3MDEV_SLAVE; in netif_is_l3_slave()
4610 static inline bool netif_is_bridge_master(const struct net_device *dev) in netif_is_bridge_master() argument
4612 return dev->priv_flags & IFF_EBRIDGE; in netif_is_bridge_master()
4615 static inline bool netif_is_bridge_port(const struct net_device *dev) in netif_is_bridge_port() argument
4617 return dev->priv_flags & IFF_BRIDGE_PORT; in netif_is_bridge_port()
4620 static inline bool netif_is_ovs_master(const struct net_device *dev) in netif_is_ovs_master() argument
4622 return dev->priv_flags & IFF_OPENVSWITCH; in netif_is_ovs_master()
4625 static inline bool netif_is_ovs_port(const struct net_device *dev) in netif_is_ovs_port() argument
4627 return dev->priv_flags & IFF_OVS_DATAPATH; in netif_is_ovs_port()
4630 static inline bool netif_is_team_master(const struct net_device *dev) in netif_is_team_master() argument
4632 return dev->priv_flags & IFF_TEAM; in netif_is_team_master()
4635 static inline bool netif_is_team_port(const struct net_device *dev) in netif_is_team_port() argument
4637 return dev->priv_flags & IFF_TEAM_PORT; in netif_is_team_port()
4640 static inline bool netif_is_lag_master(const struct net_device *dev) in netif_is_lag_master() argument
4642 return netif_is_bond_master(dev) || netif_is_team_master(dev); in netif_is_lag_master()
4645 static inline bool netif_is_lag_port(const struct net_device *dev) in netif_is_lag_port() argument
4647 return netif_is_bond_slave(dev) || netif_is_team_port(dev); in netif_is_lag_port()
4650 static inline bool netif_is_rxfh_configured(const struct net_device *dev) in netif_is_rxfh_configured() argument
4652 return dev->priv_flags & IFF_RXFH_CONFIGURED; in netif_is_rxfh_configured()
4655 static inline bool netif_is_failover(const struct net_device *dev) in netif_is_failover() argument
4657 return dev->priv_flags & IFF_FAILOVER; in netif_is_failover()
4660 static inline bool netif_is_failover_slave(const struct net_device *dev) in netif_is_failover_slave() argument
4662 return dev->priv_flags & IFF_FAILOVER_SLAVE; in netif_is_failover_slave()
4666 static inline void netif_keep_dst(struct net_device *dev) in netif_keep_dst() argument
4668 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); in netif_keep_dst()
4672 static inline bool netif_reduces_vlan_mtu(struct net_device *dev) in netif_reduces_vlan_mtu() argument
4675 return dev->priv_flags & IFF_MACSEC; in netif_reduces_vlan_mtu()
4684 static inline const char *netdev_name(const struct net_device *dev) in netdev_name() argument
4686 if (!dev->name[0] || strchr(dev->name, '%')) in netdev_name()
4688 return dev->name; in netdev_name()
4691 static inline bool netdev_unregistering(const struct net_device *dev) in netdev_unregistering() argument
4693 return dev->reg_state == NETREG_UNREGISTERING; in netdev_unregistering()
4696 static inline const char *netdev_reg_state(const struct net_device *dev) in netdev_reg_state() argument
4698 switch (dev->reg_state) { in netdev_reg_state()
4707 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); in netdev_reg_state()
4712 void netdev_printk(const char *level, const struct net_device *dev,
4715 void netdev_emerg(const struct net_device *dev, const char *format, ...);
4717 void netdev_alert(const struct net_device *dev, const char *format, ...);
4719 void netdev_crit(const struct net_device *dev, const char *format, ...);
4721 void netdev_err(const struct net_device *dev, const char *format, ...);
4723 void netdev_warn(const struct net_device *dev, const char *format, ...);
4725 void netdev_notice(const struct net_device *dev, const char *format, ...);
4727 void netdev_info(const struct net_device *dev, const char *format, ...);
4729 #define netdev_level_once(level, dev, fmt, ...) \ argument
4735 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
4739 #define netdev_emerg_once(dev, fmt, ...) \ argument
4740 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
4741 #define netdev_alert_once(dev, fmt, ...) \ argument
4742 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
4743 #define netdev_crit_once(dev, fmt, ...) \ argument
4744 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
4745 #define netdev_err_once(dev, fmt, ...) \ argument
4746 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
4747 #define netdev_warn_once(dev, fmt, ...) \ argument
4748 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
4749 #define netdev_notice_once(dev, fmt, ...) \ argument
4750 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
4751 #define netdev_info_once(dev, fmt, ...) \ argument
4752 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
4777 #define netdev_vdbg(dev, format, args...) \ argument
4780 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4790 #define netdev_WARN(dev, format, args...) \ argument
4791 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
4792 netdev_reg_state(dev), ##args)
4794 #define netdev_WARN_ONCE(dev, format, args...) \ argument
4795 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
4796 netdev_reg_state(dev), ##args)
4800 #define netif_printk(priv, type, level, dev, fmt, args...) \ argument
4803 netdev_printk(level, (dev), fmt, ##args); \
4806 #define netif_level(level, priv, type, dev, fmt, args...) \ argument
4809 netdev_##level(dev, fmt, ##args); \
4812 #define netif_emerg(priv, type, dev, fmt, args...) \ argument
4813 netif_level(emerg, priv, type, dev, fmt, ##args)
4814 #define netif_alert(priv, type, dev, fmt, args...) \ argument
4815 netif_level(alert, priv, type, dev, fmt, ##args)
4816 #define netif_crit(priv, type, dev, fmt, args...) \ argument
4817 netif_level(crit, priv, type, dev, fmt, ##args)
4818 #define netif_err(priv, type, dev, fmt, args...) \ argument
4819 netif_level(err, priv, type, dev, fmt, ##args)
4820 #define netif_warn(priv, type, dev, fmt, args...) \ argument
4821 netif_level(warn, priv, type, dev, fmt, ##args)
4822 #define netif_notice(priv, type, dev, fmt, args...) \ argument
4823 netif_level(notice, priv, type, dev, fmt, ##args)
4824 #define netif_info(priv, type, dev, fmt, args...) \ argument
4825 netif_level(info, priv, type, dev, fmt, ##args)
4834 #define netif_dbg(priv, type, dev, format, args...) \ argument
4835 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4837 #define netif_dbg(priv, type, dev, format, args...) \ argument
4840 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4857 #define netif_vdbg(priv, type, dev, format, args...) \ argument
4860 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \