Lines Matching refs:txq
2388 void netif_schedule_queue(struct netdev_queue *txq);
2419 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_start_all_queues() local
2420 netif_tx_start_queue(txq); in netif_tx_start_all_queues()
2443 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_wake_all_queues() local
2444 netif_tx_wake_queue(txq); in netif_tx_wake_all_queues()
2474 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_stop_all_queues() local
2475 netif_tx_stop_queue(txq); in netif_tx_stop_all_queues()
2685 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_start_subqueue() local
2687 netif_tx_start_queue(txq); in netif_start_subqueue()
2699 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_stop_subqueue() local
2700 netif_tx_stop_queue(txq); in netif_stop_subqueue()
2713 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in __netif_subqueue_stopped() local
2715 return netif_tx_queue_stopped(txq); in __netif_subqueue_stopped()
2759 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2890 struct netdev_queue *txq, int *ret);
3073 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) in __netif_tx_lock() argument
3075 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
3076 txq->xmit_lock_owner = cpu; in __netif_tx_lock()
3079 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) in __netif_tx_lock_bh() argument
3081 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
3082 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_lock_bh()
3085 static inline bool __netif_tx_trylock(struct netdev_queue *txq) in __netif_tx_trylock() argument
3087 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
3089 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_trylock()
3093 static inline void __netif_tx_unlock(struct netdev_queue *txq) in __netif_tx_unlock() argument
3095 txq->xmit_lock_owner = -1; in __netif_tx_unlock()
3096 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
3099 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) in __netif_tx_unlock_bh() argument
3101 txq->xmit_lock_owner = -1; in __netif_tx_unlock_bh()
3102 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
3105 static inline void txq_trans_update(struct netdev_queue *txq) in txq_trans_update() argument
3107 if (txq->xmit_lock_owner != -1) in txq_trans_update()
3108 txq->trans_start = jiffies; in txq_trans_update()
3125 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_lock() local
3133 __netif_tx_lock(txq, cpu); in netif_tx_lock()
3134 set_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_lock()
3135 __netif_tx_unlock(txq); in netif_tx_lock()
3150 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_unlock() local
3156 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_unlock()
3157 netif_schedule_queue(txq); in netif_tx_unlock()
3168 #define HARD_TX_LOCK(dev, txq, cpu) { \ argument
3170 __netif_tx_lock(txq, cpu); \
3174 #define HARD_TX_TRYLOCK(dev, txq) \ argument
3176 __netif_tx_trylock(txq) : \
3179 #define HARD_TX_UNLOCK(dev, txq) { \ argument
3181 __netif_tx_unlock(txq); \
3193 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_disable() local
3195 __netif_tx_lock(txq, cpu); in netif_tx_disable()
3196 netif_tx_stop_queue(txq); in netif_tx_disable()
3197 __netif_tx_unlock(txq); in netif_tx_disable()
3504 struct netdev_queue *txq, bool more) in netdev_start_xmit() argument
3511 txq_trans_update(txq); in netdev_start_xmit()