• Home
  • Raw
  • Download

Lines Matching refs:dev_queue

3323 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)  in netif_tx_start_queue()  argument
3325 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue()
3349 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3373 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) in netif_tx_stop_queue() argument
3375 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue()
3392 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) in netif_tx_queue_stopped() argument
3394 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped()
3408 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) in netif_xmit_stopped() argument
3410 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped()
3414 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) in netif_xmit_frozen_or_stopped() argument
3416 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; in netif_xmit_frozen_or_stopped()
3420 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) in netif_xmit_frozen_or_drv_stopped() argument
3422 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; in netif_xmit_frozen_or_drv_stopped()
3432 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) in netdev_txq_bql_enqueue_prefetchw() argument
3435 prefetchw(&dev_queue->dql.num_queued); in netdev_txq_bql_enqueue_prefetchw()
3446 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) in netdev_txq_bql_complete_prefetchw() argument
3449 prefetchw(&dev_queue->dql.limit); in netdev_txq_bql_complete_prefetchw()
3453 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, in netdev_tx_sent_queue() argument
3457 dql_queued(&dev_queue->dql, bytes); in netdev_tx_sent_queue()
3459 if (likely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3462 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3472 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3473 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3483 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, in __netdev_tx_sent_queue() argument
3489 dql_queued(&dev_queue->dql, bytes); in __netdev_tx_sent_queue()
3491 return netif_tx_queue_stopped(dev_queue); in __netdev_tx_sent_queue()
3493 netdev_tx_sent_queue(dev_queue, bytes); in __netdev_tx_sent_queue()
3519 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, in netdev_tx_completed_queue() argument
3526 dql_completed(&dev_queue->dql, bytes); in netdev_tx_completed_queue()
3535 if (unlikely(dql_avail(&dev_queue->dql) < 0)) in netdev_tx_completed_queue()
3538 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) in netdev_tx_completed_queue()
3539 netif_schedule_queue(dev_queue); in netdev_tx_completed_queue()
3574 static inline void netdev_reset_queue(struct net_device *dev_queue) in netdev_reset_queue() argument
3576 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); in netdev_reset_queue()