Lines Matching refs:tx_queue
34 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer() argument
37 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); in efx_tx_get_copy_buffer()
39 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer()
44 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer()
52 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer_limited() argument
57 return efx_tx_get_copy_buffer(tx_queue, buffer); in efx_tx_get_copy_buffer_limited()
99 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_copy() argument
109 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_copy()
111 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); in efx_enqueue_skb_copy()
122 ++tx_queue->insert_count; in efx_enqueue_skb_copy()
220 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_pio() argument
224 efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_pio()
225 u8 __iomem *piobuf = tx_queue->piobuf; in efx_enqueue_skb_pio()
240 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, in efx_enqueue_skb_pio()
242 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); in efx_enqueue_skb_pio()
250 __iowrite64_copy(tx_queue->piobuf, skb->data, in efx_enqueue_skb_pio()
263 tx_queue->piobuf_offset); in efx_enqueue_skb_pio()
264 ++tx_queue->insert_count; in efx_enqueue_skb_pio()
276 static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue) in efx_tx_may_pio() argument
278 struct efx_channel *channel = tx_queue->channel; in efx_tx_may_pio()
280 if (!tx_queue->piobuf) in efx_tx_may_pio()
285 efx_for_each_channel_tx_queue(tx_queue, channel) in efx_tx_may_pio()
286 if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count)) in efx_tx_may_pio()
322 netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in __efx_enqueue_skb() argument
324 unsigned int old_insert_count = tx_queue->insert_count; in __efx_enqueue_skb()
341 switch (tx_queue->tso_version) { in __efx_enqueue_skb()
343 rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped); in __efx_enqueue_skb()
346 rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped); in __efx_enqueue_skb()
354 rc = efx_tx_tso_fallback(tx_queue, skb); in __efx_enqueue_skb()
355 tx_queue->tso_fallbacks++; in __efx_enqueue_skb()
363 efx_tx_may_pio(tx_queue)) { in __efx_enqueue_skb()
365 if (efx_enqueue_skb_pio(tx_queue, skb)) in __efx_enqueue_skb()
367 tx_queue->pio_packets++; in __efx_enqueue_skb()
372 if (efx_enqueue_skb_copy(tx_queue, skb)) in __efx_enqueue_skb()
374 tx_queue->cb_packets++; in __efx_enqueue_skb()
379 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) in __efx_enqueue_skb()
382 efx_tx_maybe_stop_queue(tx_queue); in __efx_enqueue_skb()
384 tx_queue->xmit_pending = true; in __efx_enqueue_skb()
387 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) in __efx_enqueue_skb()
388 efx_tx_send_pending(tx_queue->channel); in __efx_enqueue_skb()
391 tx_queue->tso_bursts++; in __efx_enqueue_skb()
392 tx_queue->tso_packets += segments; in __efx_enqueue_skb()
393 tx_queue->tx_packets += segments; in __efx_enqueue_skb()
395 tx_queue->tx_packets++; in __efx_enqueue_skb()
402 efx_enqueue_unwind(tx_queue, old_insert_count); in __efx_enqueue_skb()
410 efx_tx_send_pending(tx_queue->channel); in __efx_enqueue_skb()
425 struct efx_tx_queue *tx_queue; in efx_xdp_tx_buffers() local
442 tx_queue = efx->xdp_tx_queues[cpu]; in efx_xdp_tx_buffers()
443 if (unlikely(!tx_queue)) in efx_xdp_tx_buffers()
446 if (!tx_queue->initialised) in efx_xdp_tx_buffers()
450 HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); in efx_xdp_tx_buffers()
456 if (netif_tx_queue_stopped(tx_queue->core_txq)) in efx_xdp_tx_buffers()
458 efx_tx_maybe_stop_queue(tx_queue); in efx_xdp_tx_buffers()
465 tx_queue->read_count - tx_queue->insert_count; in efx_xdp_tx_buffers()
474 prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); in efx_xdp_tx_buffers()
486 tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); in efx_xdp_tx_buffers()
492 tx_queue->tx_packets++; in efx_xdp_tx_buffers()
497 efx_nic_push_buffers(tx_queue); in efx_xdp_tx_buffers()
501 HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); in efx_xdp_tx_buffers()
516 struct efx_tx_queue *tx_queue; in efx_hard_start_xmit() local
539 tx_queue = efx_get_tx_queue(efx, index, type); in efx_hard_start_xmit()
540 if (WARN_ON_ONCE(!tx_queue)) { in efx_hard_start_xmit()
555 return __efx_enqueue_skb(tx_queue, skb); in efx_hard_start_xmit()
558 void efx_xmit_done_single(struct efx_tx_queue *tx_queue) in efx_xmit_done_single() argument
564 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_xmit_done_single()
567 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; in efx_xmit_done_single()
570 struct efx_nic *efx = tx_queue->efx; in efx_xmit_done_single()
574 tx_queue->queue); in efx_xmit_done_single()
582 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in efx_xmit_done_single()
584 ++tx_queue->read_count; in efx_xmit_done_single()
585 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_xmit_done_single()
588 tx_queue->pkts_compl += pkts_compl; in efx_xmit_done_single()
589 tx_queue->bytes_compl += bytes_compl; in efx_xmit_done_single()
593 efx_xmit_done_check_empty(tx_queue); in efx_xmit_done_single()
596 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) in efx_init_tx_queue_core_txq() argument
598 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue_core_txq()
601 tx_queue->core_txq = in efx_init_tx_queue_core_txq()
603 tx_queue->channel->channel + in efx_init_tx_queue_core_txq()
604 ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? in efx_init_tx_queue_core_txq()