Home
last modified time | relevance | path

Searched refs:txq (Results 1 – 25 of 31) sorted by relevance

12

/net/sched/
Dsch_generic.c61 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument
64 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb()
115 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local
122 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
123 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
135 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
136 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
145 !netif_xmit_frozen_or_stopped(txq)) in dequeue_skb()
150 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb()
167 struct net_device *dev, struct netdev_queue *txq, in sch_direct_xmit() argument
[all …]
Dsch_teql.c150 struct netdev_queue *txq; in teql_destroy() local
153 txq = netdev_get_tx_queue(master->dev, 0); in teql_destroy()
156 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); in teql_destroy()
158 qdisc_reset(rtnl_dereference(txq->qdisc)); in teql_destroy()
220 struct net_device *dev, struct netdev_queue *txq, in __teql_resolve() argument
260 struct netdev_queue *txq) in teql_resolve() argument
265 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) in teql_resolve()
272 res = __teql_resolve(skb, skb_res, dev, txq, dst); in teql_resolve()
/net/core/
Dnetpoll.c73 struct netdev_queue *txq) in netpoll_start_xmit() argument
92 status = netdev_start_xmit(skb, dev, txq, false); in netpoll_start_xmit()
105 while ((skb = skb_dequeue(&npinfo->txq))) { in queue_process()
107 struct netdev_queue *txq; in queue_process() local
122 txq = netdev_get_tx_queue(dev, q_index); in queue_process()
123 HARD_TX_LOCK(dev, txq, smp_processor_id()); in queue_process()
124 if (netif_xmit_frozen_or_stopped(txq) || in queue_process()
125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { in queue_process()
126 skb_queue_head(&npinfo->txq, skb); in queue_process()
127 HARD_TX_UNLOCK(dev, txq); in queue_process()
[all …]
Ddev.c1946 static void netif_setup_tc(struct net_device *dev, unsigned int txq) in netif_setup_tc() argument
1952 if (tc->offset + tc->count > txq) { in netif_setup_tc()
1963 if (tc->offset + tc->count > txq) { in netif_setup_tc()
2200 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) in netif_set_real_num_tx_queues() argument
2205 disabling = txq < dev->real_num_tx_queues; in netif_set_real_num_tx_queues()
2207 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues()
2215 txq); in netif_set_real_num_tx_queues()
2220 netif_setup_tc(dev, txq); in netif_set_real_num_tx_queues()
2222 dev->real_num_tx_queues = txq; in netif_set_real_num_tx_queues()
2226 qdisc_reset_all_tx_gt(dev, txq); in netif_set_real_num_tx_queues()
[all …]
Dnet-sysfs.c1357 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; in register_queue_kobjects() local
1376 txq = real_tx; in register_queue_kobjects()
1381 netdev_queue_update_kobjects(dev, txq, 0); in register_queue_kobjects()
Dpktgen.c3380 struct netdev_queue *txq; in pktgen_xmit() local
3479 txq = skb_get_tx_queue(odev, pkt_dev->skb); in pktgen_xmit()
3483 HARD_TX_LOCK(odev, txq, smp_processor_id()); in pktgen_xmit()
3485 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { in pktgen_xmit()
3493 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); in pktgen_xmit()
3501 if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) in pktgen_xmit()
3522 HARD_TX_UNLOCK(odev, txq); in pktgen_xmit()
/net/mac80211/
Dagg-tx.c196 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_stop_txq() local
201 if (!txq) in ieee80211_agg_stop_txq()
204 txqi = to_txq_info(txq); in ieee80211_agg_stop_txq()
205 sdata = vif_to_sdata(txq->vif); in ieee80211_agg_stop_txq()
217 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_start_txq() local
220 if (!txq) in ieee80211_agg_start_txq()
223 txqi = to_txq_info(txq); in ieee80211_agg_start_txq()
945 struct ieee80211_txq *txq; in ieee80211_process_addba_resp() local
956 txq = sta->sta.txq[tid]; in ieee80211_process_addba_resp()
957 if (!amsdu && txq) in ieee80211_process_addba_resp()
[all …]
Dsta_info.c112 if (sta->sta.txq[0]) { in __cleanup_single_sta()
113 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in __cleanup_single_sta()
114 struct txq_info *txqi = to_txq_info(sta->sta.txq[i]); in __cleanup_single_sta()
250 if (sta->sta.txq[0]) in sta_info_free()
251 kfree(to_txq_info(sta->sta.txq[0])); in sta_info_free()
365 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); in sta_info_alloc()
369 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in sta_info_alloc()
370 struct txq_info *txq = txq_data + i * size; in sta_info_alloc() local
372 ieee80211_txq_init(sdata, sta, txq, i); in sta_info_alloc()
433 if (sta->sta.txq[0]) in sta_info_alloc()
[all …]
Dtx.c1096 } else if (!tx->sta->sta.txq[tid]) { in ieee80211_tx_prep_agg()
1252 struct ieee80211_txq *txq = NULL; in ieee80211_get_txq() local
1267 txq = sta->sta.txq[tid]; in ieee80211_get_txq()
1269 txq = vif->txq; in ieee80211_get_txq()
1272 if (!txq) in ieee80211_get_txq()
1275 return to_txq_info(txq); in ieee80211_get_txq()
1305 local = vif_to_sdata(txqi->txq.vif)->local; in codel_dequeue_func()
1324 local = vif_to_sdata(txqi->txq.vif)->local; in codel_drop_func()
1406 txqi->txq.vif = &sdata->vif; in ieee80211_txq_init()
1409 txqi->txq.sta = &sta->sta; in ieee80211_txq_init()
[all …]
Ddriver-ops.h1162 struct txq_info *txq) in drv_wake_tx_queue() argument
1164 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); in drv_wake_tx_queue()
1169 trace_drv_wake_tx_queue(local, sdata, txq); in drv_wake_tx_queue()
1170 local->ops->wake_tx_queue(&local->hw, &txq->txq); in drv_wake_tx_queue()
Dieee80211_i.h828 struct ieee80211_txq txq; member
1503 static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq) in to_txq_info() argument
1505 return container_of(txq, struct txq_info, txq); in to_txq_info()
1508 static inline bool txq_has_queue(struct ieee80211_txq *txq) in txq_has_queue() argument
1510 struct txq_info *txqi = to_txq_info(txq); in txq_has_queue()
1990 struct txq_info *txq, int tid);
Dtdls.c271 struct ieee80211_tx_queue_params *txq; in ieee80211_tdls_add_wmm_param_ie() local
293 txq = &sdata->tx_conf[ieee80211_ac_from_wmm(i)]; in ieee80211_tdls_add_wmm_param_ie()
294 wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs, in ieee80211_tdls_add_wmm_param_ie()
295 txq->acm, i); in ieee80211_tdls_add_wmm_param_ie()
296 wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max); in ieee80211_tdls_add_wmm_param_ie()
297 wmm->ac[i].txop_limit = cpu_to_le16(txq->txop); in ieee80211_tdls_add_wmm_param_ie()
Ddebugfs_sta.c160 txqi = to_txq_info(sta->sta.txq[i]); in sta_aqm_read()
163 txqi->txq.tid, in sta_aqm_read()
164 txqi->txq.ac, in sta_aqm_read()
Dtrace.h2549 struct txq_info *txq),
2551 TP_ARGS(local, sdata, txq),
2562 struct ieee80211_sta *sta = txq->txq.sta;
2567 __entry->ac = txq->txq.ac;
2568 __entry->tid = txq->txq.tid;
Ddebugfs_netdev.c493 struct txq_info *txqi = to_txq_info(sdata->vif.txq); in ieee80211_if_fmt_aqm()
503 txqi->txq.ac, in ieee80211_if_fmt_aqm()
/net/irda/
Dirlap_event.c192 if (skb_queue_empty(&self->txq) || self->remote_busy) { in irlap_start_poll_timer()
262 skb_queue_len(&self->txq)); in irlap_do_event()
264 if (!skb_queue_empty(&self->txq)) { in irlap_do_event()
284 while ((skb = skb_dequeue(&self->txq)) != NULL) { in irlap_do_event()
1005 skb_next = skb_peek(&self->txq); in irlap_state_xmit_p()
1031 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_p()
1055 nextfit = !skb_queue_empty(&self->txq); in irlap_state_xmit_p()
1082 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_p()
1768 skb_next = skb_peek(&self->txq); in irlap_state_xmit_s()
1782 skb_queue_head(&self->txq, skb_get(skb)); in irlap_state_xmit_s()
[all …]
Dirlap.c135 skb_queue_head_init(&self->txq); in irlap_open()
350 skb_queue_tail(&self->txq, skb); in irlap_data_request()
359 if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy)) in irlap_data_request()
424 if (!skb_queue_empty(&self->txq)) { in irlap_disconnect_request()
825 while ((skb = skb_dequeue(&self->txq)) != NULL) in irlap_flush_all_queues()
1134 skb_queue_len(&self->txq)); in irlap_seq_show()
Dirlap_frame.c1011 while (!skb_queue_empty(&self->txq)) { in irlap_resend_rejected_frames()
1015 skb = skb_dequeue( &self->txq); in irlap_resend_rejected_frames()
1023 !skb_queue_empty(&self->txq)) { in irlap_resend_rejected_frames()
/net/caif/
Dcaif_dev.c170 struct netdev_queue *txq; in transmit() local
188 txq = netdev_get_tx_queue(skb->dev, 0); in transmit()
189 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); in transmit()
/net/irda/irlan/
Dirlan_common.c235 skb_queue_head_init(&self->client.txq); in irlan_open()
276 skb_queue_purge(&self->client.txq); in __irlan_close()
577 skb = skb_dequeue(&self->client.txq); in irlan_run_ctrl_tx_queue()
605 skb_queue_tail(&self->client.txq, skb); in irlan_ctrl_data_request()
Dirlan_eth.c148 skb_queue_purge(&self->client.txq); in irlan_eth_close()
Dirlan_client.c229 while ((skb = skb_dequeue(&self->client.txq)) != NULL) { in irlan_client_ctrl_disconnect_indication()
/net/batman-adv/
Dsoft-interface.c740 struct netdev_queue *txq, in batadv_set_lockdep_class_one() argument
743 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); in batadv_set_lockdep_class_one()
/net/8021q/
Dvlan_dev.c497 struct netdev_queue *txq, in vlan_dev_set_lockdep_one() argument
500 lockdep_set_class_and_subclass(&txq->_xmit_lock, in vlan_dev_set_lockdep_one()
/net/netrom/
Daf_netrom.c79 struct netdev_queue *txq, in nr_set_lockdep_one() argument
82 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); in nr_set_lockdep_one()

12