Lines Matching refs:dev_queue
2841 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) in netif_tx_start_queue() argument
2843 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue()
2867 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2891 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) in netif_tx_stop_queue() argument
2893 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue()
2910 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) in netif_tx_queue_stopped() argument
2912 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped()
2926 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) in netif_xmit_stopped() argument
2928 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped()
2932 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) in netif_xmit_frozen_or_stopped() argument
2934 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; in netif_xmit_frozen_or_stopped()
2938 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) in netif_xmit_frozen_or_drv_stopped() argument
2940 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; in netif_xmit_frozen_or_drv_stopped()
2950 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) in netdev_txq_bql_enqueue_prefetchw() argument
2953 prefetchw(&dev_queue->dql.num_queued); in netdev_txq_bql_enqueue_prefetchw()
2964 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) in netdev_txq_bql_complete_prefetchw() argument
2967 prefetchw(&dev_queue->dql.limit); in netdev_txq_bql_complete_prefetchw()
2971 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, in netdev_tx_sent_queue() argument
2975 dql_queued(&dev_queue->dql, bytes); in netdev_tx_sent_queue()
2977 if (likely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
2980 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
2990 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
2991 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3009 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, in netdev_tx_completed_queue() argument
3016 dql_completed(&dev_queue->dql, bytes); in netdev_tx_completed_queue()
3025 if (dql_avail(&dev_queue->dql) < 0) in netdev_tx_completed_queue()
3028 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) in netdev_tx_completed_queue()
3029 netif_schedule_queue(dev_queue); in netdev_tx_completed_queue()
3064 static inline void netdev_reset_queue(struct net_device *dev_queue) in netdev_reset_queue() argument
3066 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); in netdev_reset_queue()