Lines Matching refs:tx_queue
28 static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue, in ef4_tx_get_copy_buffer() argument
31 unsigned int index = ef4_tx_queue_get_insert_index(tx_queue); in ef4_tx_get_copy_buffer()
33 &tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)]; in ef4_tx_get_copy_buffer()
38 ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in ef4_tx_get_copy_buffer()
46 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue, in ef4_tx_get_copy_buffer_limited() argument
51 return ef4_tx_get_copy_buffer(tx_queue, buffer); in ef4_tx_get_copy_buffer_limited()
54 static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue, in ef4_dequeue_buffer() argument
60 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in ef4_dequeue_buffer()
75 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in ef4_dequeue_buffer()
77 tx_queue->queue, tx_queue->read_count); in ef4_dequeue_buffer()
150 static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue, in ef4_enqueue_skb_copy() argument
153 unsigned int min_len = tx_queue->tx_min_size; in ef4_enqueue_skb_copy()
161 buffer = ef4_tx_queue_get_insert_buffer(tx_queue); in ef4_enqueue_skb_copy()
163 copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer); in ef4_enqueue_skb_copy()
179 ++tx_queue->insert_count; in ef4_enqueue_skb_copy()
183 static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue, in ef4_tx_map_chunk() argument
187 const struct ef4_nic_type *nic_type = tx_queue->efx->type; in ef4_tx_map_chunk()
193 buffer = ef4_tx_queue_get_insert_buffer(tx_queue); in ef4_tx_map_chunk()
194 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); in ef4_tx_map_chunk()
201 ++tx_queue->insert_count; in ef4_tx_map_chunk()
209 static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb) in ef4_tx_map_data() argument
211 struct ef4_nic *efx = tx_queue->efx; in ef4_tx_map_data()
236 buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len); in ef4_tx_map_data()
271 static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue) in ef4_enqueue_unwind() argument
276 while (tx_queue->insert_count != tx_queue->write_count) { in ef4_enqueue_unwind()
277 --tx_queue->insert_count; in ef4_enqueue_unwind()
278 buffer = __ef4_tx_queue_get_insert_buffer(tx_queue); in ef4_enqueue_unwind()
279 ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL); in ef4_enqueue_unwind()
299 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb) in ef4_enqueue_skb() argument
307 if (skb_len < tx_queue->tx_min_size || in ef4_enqueue_skb()
310 if (ef4_enqueue_skb_copy(tx_queue, skb)) in ef4_enqueue_skb()
312 tx_queue->cb_packets++; in ef4_enqueue_skb()
317 if (!data_mapped && (ef4_tx_map_data(tx_queue, skb))) in ef4_enqueue_skb()
321 netdev_tx_sent_queue(tx_queue->core_txq, skb_len); in ef4_enqueue_skb()
324 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { in ef4_enqueue_skb()
325 struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue); in ef4_enqueue_skb()
334 ef4_nic_push_buffers(tx_queue); in ef4_enqueue_skb()
336 tx_queue->xmit_more_available = skb->xmit_more; in ef4_enqueue_skb()
339 tx_queue->tx_packets++; in ef4_enqueue_skb()
341 ef4_tx_maybe_stop_queue(tx_queue); in ef4_enqueue_skb()
347 ef4_enqueue_unwind(tx_queue); in ef4_enqueue_skb()
357 static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue, in ef4_dequeue_buffers() argument
362 struct ef4_nic *efx = tx_queue->efx; in ef4_dequeue_buffers()
365 stop_index = (index + 1) & tx_queue->ptr_mask; in ef4_dequeue_buffers()
366 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in ef4_dequeue_buffers()
369 struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; in ef4_dequeue_buffers()
375 tx_queue->queue, read_ptr); in ef4_dequeue_buffers()
380 ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); in ef4_dequeue_buffers()
382 ++tx_queue->read_count; in ef4_dequeue_buffers()
383 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in ef4_dequeue_buffers()
400 struct ef4_tx_queue *tx_queue; in ef4_hard_start_xmit() local
411 tx_queue = ef4_get_tx_queue(efx, index, type); in ef4_hard_start_xmit()
413 return ef4_enqueue_skb(tx_queue, skb); in ef4_hard_start_xmit()
416 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue) in ef4_init_tx_queue_core_txq() argument
418 struct ef4_nic *efx = tx_queue->efx; in ef4_init_tx_queue_core_txq()
421 tx_queue->core_txq = in ef4_init_tx_queue_core_txq()
423 tx_queue->queue / EF4_TXQ_TYPES + in ef4_init_tx_queue_core_txq()
424 ((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ? in ef4_init_tx_queue_core_txq()
434 struct ef4_tx_queue *tx_queue; in ef4_setup_tc() local
459 ef4_for_each_possible_channel_tx_queue(tx_queue, in ef4_setup_tc()
461 if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI)) in ef4_setup_tc()
463 if (!tx_queue->buffer) { in ef4_setup_tc()
464 rc = ef4_probe_tx_queue(tx_queue); in ef4_setup_tc()
468 if (!tx_queue->initialised) in ef4_setup_tc()
469 ef4_init_tx_queue(tx_queue); in ef4_setup_tc()
470 ef4_init_tx_queue_core_txq(tx_queue); in ef4_setup_tc()
494 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) in ef4_xmit_done() argument
497 struct ef4_nic *efx = tx_queue->efx; in ef4_xmit_done()
501 EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask); in ef4_xmit_done()
503 ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); in ef4_xmit_done()
504 tx_queue->pkts_compl += pkts_compl; in ef4_xmit_done()
505 tx_queue->bytes_compl += bytes_compl; in ef4_xmit_done()
508 ++tx_queue->merge_events; in ef4_xmit_done()
515 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && in ef4_xmit_done()
518 txq2 = ef4_tx_queue_partner(tx_queue); in ef4_xmit_done()
519 fill_level = max(tx_queue->insert_count - tx_queue->read_count, in ef4_xmit_done()
522 netif_tx_wake_queue(tx_queue->core_txq); in ef4_xmit_done()
526 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { in ef4_xmit_done()
527 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); in ef4_xmit_done()
528 if (tx_queue->read_count == tx_queue->old_write_count) { in ef4_xmit_done()
530 tx_queue->empty_read_count = in ef4_xmit_done()
531 tx_queue->read_count | EF4_EMPTY_COUNT_VALID; in ef4_xmit_done()
536 static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue) in ef4_tx_cb_page_count() argument
538 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER); in ef4_tx_cb_page_count()
541 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue) in ef4_probe_tx_queue() argument
543 struct ef4_nic *efx = tx_queue->efx; in ef4_probe_tx_queue()
550 tx_queue->ptr_mask = entries - 1; in ef4_probe_tx_queue()
554 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in ef4_probe_tx_queue()
557 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in ef4_probe_tx_queue()
559 if (!tx_queue->buffer) in ef4_probe_tx_queue()
562 tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue), in ef4_probe_tx_queue()
563 sizeof(tx_queue->cb_page[0]), GFP_KERNEL); in ef4_probe_tx_queue()
564 if (!tx_queue->cb_page) { in ef4_probe_tx_queue()
570 rc = ef4_nic_probe_tx(tx_queue); in ef4_probe_tx_queue()
577 kfree(tx_queue->cb_page); in ef4_probe_tx_queue()
578 tx_queue->cb_page = NULL; in ef4_probe_tx_queue()
580 kfree(tx_queue->buffer); in ef4_probe_tx_queue()
581 tx_queue->buffer = NULL; in ef4_probe_tx_queue()
585 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue) in ef4_init_tx_queue() argument
587 struct ef4_nic *efx = tx_queue->efx; in ef4_init_tx_queue()
590 "initialising TX queue %d\n", tx_queue->queue); in ef4_init_tx_queue()
592 tx_queue->insert_count = 0; in ef4_init_tx_queue()
593 tx_queue->write_count = 0; in ef4_init_tx_queue()
594 tx_queue->old_write_count = 0; in ef4_init_tx_queue()
595 tx_queue->read_count = 0; in ef4_init_tx_queue()
596 tx_queue->old_read_count = 0; in ef4_init_tx_queue()
597 tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID; in ef4_init_tx_queue()
598 tx_queue->xmit_more_available = false; in ef4_init_tx_queue()
601 tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0; in ef4_init_tx_queue()
604 ef4_nic_init_tx(tx_queue); in ef4_init_tx_queue()
606 tx_queue->initialised = true; in ef4_init_tx_queue()
609 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue) in ef4_fini_tx_queue() argument
613 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in ef4_fini_tx_queue()
614 "shutting down TX queue %d\n", tx_queue->queue); in ef4_fini_tx_queue()
616 if (!tx_queue->buffer) in ef4_fini_tx_queue()
620 while (tx_queue->read_count != tx_queue->write_count) { in ef4_fini_tx_queue()
622 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in ef4_fini_tx_queue()
623 ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in ef4_fini_tx_queue()
625 ++tx_queue->read_count; in ef4_fini_tx_queue()
627 tx_queue->xmit_more_available = false; in ef4_fini_tx_queue()
628 netdev_tx_reset_queue(tx_queue->core_txq); in ef4_fini_tx_queue()
631 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue) in ef4_remove_tx_queue() argument
635 if (!tx_queue->buffer) in ef4_remove_tx_queue()
638 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in ef4_remove_tx_queue()
639 "destroying TX queue %d\n", tx_queue->queue); in ef4_remove_tx_queue()
640 ef4_nic_remove_tx(tx_queue); in ef4_remove_tx_queue()
642 if (tx_queue->cb_page) { in ef4_remove_tx_queue()
643 for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++) in ef4_remove_tx_queue()
644 ef4_nic_free_buffer(tx_queue->efx, in ef4_remove_tx_queue()
645 &tx_queue->cb_page[i]); in ef4_remove_tx_queue()
646 kfree(tx_queue->cb_page); in ef4_remove_tx_queue()
647 tx_queue->cb_page = NULL; in ef4_remove_tx_queue()
650 kfree(tx_queue->buffer); in ef4_remove_tx_queue()
651 tx_queue->buffer = NULL; in ef4_remove_tx_queue()