Home
last modified time | relevance | path

Searched refs:tx_queue (Results 1 – 25 of 113) sorted by relevance

12345

/drivers/net/ethernet/sfc/falcon/
Dtx.c28 static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue, in ef4_tx_get_copy_buffer() argument
31 unsigned int index = ef4_tx_queue_get_insert_index(tx_queue); in ef4_tx_get_copy_buffer()
33 &tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)]; in ef4_tx_get_copy_buffer()
38 ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in ef4_tx_get_copy_buffer()
46 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue, in ef4_tx_get_copy_buffer_limited() argument
51 return ef4_tx_get_copy_buffer(tx_queue, buffer); in ef4_tx_get_copy_buffer_limited()
54 static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue, in ef4_dequeue_buffer() argument
60 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in ef4_dequeue_buffer()
75 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in ef4_dequeue_buffer()
77 tx_queue->queue, tx_queue->read_count); in ef4_dequeue_buffer()
[all …]
Dnic.h66 ef4_tx_desc(struct ef4_tx_queue *tx_queue, unsigned int index) in ef4_tx_desc() argument
68 return ((ef4_qword_t *) (tx_queue->txd.buf.addr)) + index; in ef4_tx_desc()
72 static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_queue) in ef4_tx_queue_partner() argument
74 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD) in ef4_tx_queue_partner()
75 return tx_queue - EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner()
77 return tx_queue + EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner()
83 static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, in __ef4_nic_tx_is_empty() argument
86 unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); in __ef4_nic_tx_is_empty()
102 static inline bool ef4_nic_may_push_tx_desc(struct ef4_tx_queue *tx_queue, in ef4_nic_may_push_tx_desc() argument
105 bool was_empty = __ef4_nic_tx_is_empty(tx_queue, write_count); in ef4_nic_may_push_tx_desc()
[all …]
Dfarch.c275 static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue) in ef4_farch_notify_tx_desc() argument
280 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_notify_tx_desc()
282 ef4_writed_page(tx_queue->efx, &reg, in ef4_farch_notify_tx_desc()
283 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); in ef4_farch_notify_tx_desc()
287 static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue, in ef4_farch_push_tx_desc() argument
296 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_push_tx_desc()
300 ef4_writeo_page(tx_queue->efx, &reg, in ef4_farch_push_tx_desc()
301 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); in ef4_farch_push_tx_desc()
309 void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue) in ef4_farch_tx_write() argument
314 unsigned old_write_count = tx_queue->write_count; in ef4_farch_tx_write()
[all …]
Dnet_driver.h448 struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; member
1085 int (*tx_probe)(struct ef4_tx_queue *tx_queue);
1086 void (*tx_init)(struct ef4_tx_queue *tx_queue);
1087 void (*tx_remove)(struct ef4_tx_queue *tx_queue);
1088 void (*tx_write)(struct ef4_tx_queue *tx_queue);
1089 unsigned int (*tx_limit_len)(struct ef4_tx_queue *tx_queue,
1195 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; in ef4_get_tx_queue()
1209 return &channel->tx_queue[type]; in ef4_channel_get_tx_queue()
1212 static inline bool ef4_tx_queue_used(struct ef4_tx_queue *tx_queue) in ef4_tx_queue_used() argument
1214 return !(tx_queue->efx->net_dev->num_tc < 2 && in ef4_tx_queue_used()
[all …]
Dselftest.c413 static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue) in ef4_begin_loopback() argument
415 struct ef4_nic *efx = tx_queue->efx; in ef4_begin_loopback()
443 rc = ef4_enqueue_skb(tx_queue, skb); in ef4_begin_loopback()
449 "%d in %s loopback test\n", tx_queue->queue, in ef4_begin_loopback()
469 static int ef4_end_loopback(struct ef4_tx_queue *tx_queue, in ef4_end_loopback() argument
472 struct ef4_nic *efx = tx_queue->efx; in ef4_end_loopback()
501 tx_queue->queue, tx_done, state->packet_count, in ef4_end_loopback()
512 tx_queue->queue, rx_good, state->packet_count, in ef4_end_loopback()
519 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; in ef4_end_loopback()
520 lb_tests->tx_done[tx_queue->queue] += tx_done; in ef4_end_loopback()
[all …]
Defx.h26 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue);
27 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue);
28 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue);
29 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue);
30 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue);
33 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
34 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
Defx.c243 struct ef4_tx_queue *tx_queue; in ef4_process_channel() local
249 ef4_for_each_channel_tx_queue(tx_queue, channel) { in ef4_process_channel()
250 tx_queue->pkts_compl = 0; in ef4_process_channel()
251 tx_queue->bytes_compl = 0; in ef4_process_channel()
264 ef4_for_each_channel_tx_queue(tx_queue, channel) { in ef4_process_channel()
265 if (tx_queue->bytes_compl) { in ef4_process_channel()
266 netdev_tx_completed_queue(tx_queue->core_txq, in ef4_process_channel()
267 tx_queue->pkts_compl, tx_queue->bytes_compl); in ef4_process_channel()
432 struct ef4_tx_queue *tx_queue; in ef4_alloc_channel() local
444 tx_queue = &channel->tx_queue[j]; in ef4_alloc_channel()
[all …]
Dtx.h18 unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
21 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
24 int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
/drivers/net/ethernet/sfc/
Dtx.c36 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer() argument
39 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); in efx_tx_get_copy_buffer()
41 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer()
46 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer()
54 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer_limited() argument
59 return efx_tx_get_copy_buffer(tx_queue, buffer); in efx_tx_get_copy_buffer_limited()
62 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, in efx_dequeue_buffer() argument
68 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in efx_dequeue_buffer()
84 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in efx_dequeue_buffer()
86 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer()
[all …]
Dtx_tso.c82 static inline void prefetch_ptr(struct efx_tx_queue *tx_queue) in prefetch_ptr() argument
84 unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue); in prefetch_ptr()
87 ptr = (char *) (tx_queue->buffer + insert_ptr); in prefetch_ptr()
91 ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr); in prefetch_ptr()
105 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, in efx_tx_queue_insert() argument
115 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_queue_insert()
116 ++tx_queue->insert_count; in efx_tx_queue_insert()
118 EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count - in efx_tx_queue_insert()
119 tx_queue->read_count >= in efx_tx_queue_insert()
120 tx_queue->efx->txq_entries); in efx_tx_queue_insert()
[all …]
Dnic.h64 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in efx_tx_desc() argument
66 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; in efx_tx_desc()
70 static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) in efx_tx_queue_partner() argument
72 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) in efx_tx_queue_partner()
73 return tx_queue - EFX_TXQ_TYPE_OFFLOAD; in efx_tx_queue_partner()
75 return tx_queue + EFX_TXQ_TYPE_OFFLOAD; in efx_tx_queue_partner()
81 static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, in __efx_nic_tx_is_empty() argument
84 unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); in __efx_nic_tx_is_empty()
97 static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) in efx_nic_tx_is_empty() argument
99 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); in efx_nic_tx_is_empty()
[all …]
Dfarch.c284 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) in efx_farch_notify_tx_desc() argument
289 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_notify_tx_desc()
291 efx_writed_page(tx_queue->efx, &reg, in efx_farch_notify_tx_desc()
292 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); in efx_farch_notify_tx_desc()
296 static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, in efx_farch_push_tx_desc() argument
305 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_push_tx_desc()
309 efx_writeo_page(tx_queue->efx, &reg, in efx_farch_push_tx_desc()
310 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); in efx_farch_push_tx_desc()
318 void efx_farch_tx_write(struct efx_tx_queue *tx_queue) in efx_farch_tx_write() argument
323 unsigned old_write_count = tx_queue->write_count; in efx_farch_tx_write()
[all …]
Dnet_driver.h493 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; member
1206 int (*tx_probe)(struct efx_tx_queue *tx_queue);
1207 void (*tx_init)(struct efx_tx_queue *tx_queue);
1208 void (*tx_remove)(struct efx_tx_queue *tx_queue);
1209 void (*tx_write)(struct efx_tx_queue *tx_queue);
1210 unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
1353 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; in efx_get_tx_queue()
1367 return &channel->tx_queue[type]; in efx_channel_get_tx_queue()
1370 static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) in efx_tx_queue_used() argument
1372 return !(tx_queue->efx->net_dev->num_tc < 2 && in efx_tx_queue_used()
[all …]
Dselftest.c413 static int efx_begin_loopback(struct efx_tx_queue *tx_queue) in efx_begin_loopback() argument
415 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback()
443 rc = efx_enqueue_skb(tx_queue, skb); in efx_begin_loopback()
449 "%d in %s loopback test\n", tx_queue->queue, in efx_begin_loopback()
469 static int efx_end_loopback(struct efx_tx_queue *tx_queue, in efx_end_loopback() argument
472 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback()
501 tx_queue->queue, tx_done, state->packet_count, in efx_end_loopback()
512 tx_queue->queue, rx_good, state->packet_count, in efx_end_loopback()
519 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; in efx_end_loopback()
520 lb_tests->tx_done[tx_queue->queue] += tx_done; in efx_end_loopback()
[all …]
Defx.h26 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
27 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
28 void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
29 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
30 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
33 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
34 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
Dtx.h18 unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
21 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
24 int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
Def10.c834 struct efx_tx_queue *tx_queue; in efx_ef10_link_piobufs() local
864 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_ef10_link_piobufs()
870 tx_queue->channel->channel - 1) * in efx_ef10_link_piobufs()
880 if (tx_queue->queue == nic_data->pio_write_vi_base) { in efx_ef10_link_piobufs()
889 tx_queue->queue); in efx_ef10_link_piobufs()
901 tx_queue->queue, index, rc); in efx_ef10_link_piobufs()
902 tx_queue->piobuf = NULL; in efx_ef10_link_piobufs()
904 tx_queue->piobuf = in efx_ef10_link_piobufs()
907 tx_queue->piobuf_offset = offset; in efx_ef10_link_piobufs()
910 tx_queue->queue, index, in efx_ef10_link_piobufs()
[all …]
Defx.c265 struct efx_tx_queue *tx_queue; in efx_process_channel() local
271 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel()
272 tx_queue->pkts_compl = 0; in efx_process_channel()
273 tx_queue->bytes_compl = 0; in efx_process_channel()
286 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel()
287 if (tx_queue->bytes_compl) { in efx_process_channel()
288 netdev_tx_completed_queue(tx_queue->core_txq, in efx_process_channel()
289 tx_queue->pkts_compl, tx_queue->bytes_compl); in efx_process_channel()
454 struct efx_tx_queue *tx_queue; in efx_alloc_channel() local
466 tx_queue = &channel->tx_queue[j]; in efx_alloc_channel()
[all …]
/drivers/net/ethernet/freescale/
Dgianfar.c143 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
175 struct gfar_priv_tx_q *tx_queue = NULL; in gfar_init_bds() local
182 tx_queue = priv->tx_queue[i]; in gfar_init_bds()
184 tx_queue->num_txbdfree = tx_queue->tx_ring_size; in gfar_init_bds()
185 tx_queue->dirty_tx = tx_queue->tx_bd_base; in gfar_init_bds()
186 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds()
187 tx_queue->skb_curtx = 0; in gfar_init_bds()
188 tx_queue->skb_dirtytx = 0; in gfar_init_bds()
191 txbdp = tx_queue->tx_bd_base; in gfar_init_bds()
192 for (j = 0; j < tx_queue->tx_ring_size; j++) { in gfar_init_bds()
[all …]
/drivers/net/wireless/rsi/
Drsi_91x_core.c35 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_determine_min_weight_queue()
59 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_recalculate_weights()
103 if (skb_queue_len(&common->tx_queue[q_num])) in rsi_get_num_pkts_dequeue()
104 skb = skb_peek(&common->tx_queue[q_num]); in rsi_get_num_pkts_dequeue()
116 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt) in rsi_get_num_pkts_dequeue()
140 if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) { in rsi_core_determine_hal_queue()
144 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { in rsi_core_determine_hal_queue()
167 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_core_determine_hal_queue()
182 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue()
196 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue()
[all …]
Drsi_91x_debugfs.c153 skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])); in rsi_stats_read()
161 skb_queue_len(&common->tx_queue[VO_Q])); in rsi_stats_read()
167 skb_queue_len(&common->tx_queue[VI_Q])); in rsi_stats_read()
173 skb_queue_len(&common->tx_queue[BE_Q])); in rsi_stats_read()
179 skb_queue_len(&common->tx_queue[BK_Q])); in rsi_stats_read()
/drivers/net/wireless/ath/ath5k/
Ddma.c132 u32 tx_queue; in ath5k_hw_start_tx_dma() local
141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_start_tx_dma()
148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; in ath5k_hw_start_tx_dma()
151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma()
156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma()
164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); in ath5k_hw_start_tx_dma()
191 u32 tx_queue, pending; in ath5k_hw_stop_tx_dma() local
200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_stop_tx_dma()
207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; in ath5k_hw_stop_tx_dma()
212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; in ath5k_hw_stop_tx_dma()
[all …]
/drivers/net/wireless/intersil/p54/
Dtxrx.c41 spin_lock_irqsave(&priv->tx_queue.lock, flags); in p54_dump_tx_queue()
43 skb_queue_len(&priv->tx_queue)); in p54_dump_tx_queue()
46 skb_queue_walk(&priv->tx_queue, skb) { in p54_dump_tx_queue()
69 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); in p54_dump_tx_queue()
97 spin_lock_irqsave(&priv->tx_queue.lock, flags); in p54_assign_address()
98 if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) { in p54_assign_address()
104 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); in p54_assign_address()
108 skb_queue_walk(&priv->tx_queue, entry) { in p54_assign_address()
124 target_skb = priv->tx_queue.prev; in p54_assign_address()
125 if (!skb_queue_empty(&priv->tx_queue)) { in p54_assign_address()
[all …]
/drivers/net/ethernet/qlogic/qed/
Dqed_ll2.c67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
308 p_tx = &p_ll2_conn->tx_queue; in qed_ll2_txq_flush()
344 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; in qed_ll2_txq_completion()
791 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; in qed_ll2_lb_txq_completion()
924 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; in qed_sp_ll2_tx_queue_start()
1126 &p_ll2_info->tx_queue.txq_chain, NULL); in qed_ll2_acquire_connection_tx()
1130 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); in qed_ll2_acquire_connection_tx()
1137 p_ll2_info->tx_queue.descq_array = p_descq; in qed_ll2_acquire_connection_tx()
1318 &p_ll2_info->tx_queue.tx_sb_index, in qed_ll2_acquire_connection()
1319 &p_ll2_info->tx_queue.p_fw_cons); in qed_ll2_acquire_connection()
[all …]
/drivers/bluetooth/
Dbtmrvl_main.c201 skb_queue_head(&priv->adapter->tx_queue, skb); in btmrvl_send_sync_cmd()
402 skb_queue_head_init(&priv->adapter->tx_queue); in btmrvl_init_adapter()
425 skb_queue_purge(&priv->adapter->tx_queue); in btmrvl_free_adapter()
458 skb_queue_tail(&priv->adapter->tx_queue, skb); in btmrvl_send_frame()
470 skb_queue_purge(&priv->adapter->tx_queue); in btmrvl_flush()
479 skb_queue_purge(&priv->adapter->tx_queue); in btmrvl_close()
623 skb_queue_empty(&adapter->tx_queue)))) { in btmrvl_service_main_thread()
645 !skb_queue_empty(&adapter->tx_queue)) { in btmrvl_service_main_thread()
661 skb = skb_dequeue(&adapter->tx_queue); in btmrvl_service_main_thread()

12345