Lines Matching full:tx
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
100 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
116 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
277 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument
279 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
282 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
285 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
286 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
287 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
288 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
289 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
294 static int tsnep_tx_ring_create(struct tsnep_tx *tx) in tsnep_tx_ring_create() argument
296 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
303 tx->page[i] = in tsnep_tx_ring_create()
304 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
306 if (!tx->page[i]) { in tsnep_tx_ring_create()
311 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
313 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
316 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
321 entry = &tx->entry[i]; in tsnep_tx_ring_create()
322 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
329 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_create()
333 static void tsnep_tx_init(struct tsnep_tx *tx) in tsnep_tx_init() argument
337 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
338 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
339 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
340 tx->write = 0; in tsnep_tx_init()
341 tx->read = 0; in tsnep_tx_init()
342 tx->owner_counter = 1; in tsnep_tx_init()
343 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
346 static void tsnep_tx_enable(struct tsnep_tx *tx) in tsnep_tx_enable() argument
350 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
357 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) in tsnep_tx_disable() argument
362 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
368 /* wait until TX is done in hardware */ in tsnep_tx_disable()
369 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
373 /* wait until TX is also done in software */ in tsnep_tx_disable()
374 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
380 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
383 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
415 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
416 tx->owner_counter++; in tsnep_tx_activate()
417 if (tx->owner_counter == 4) in tsnep_tx_activate()
418 tx->owner_counter = 1; in tsnep_tx_activate()
419 tx->increment_owner_counter--; in tsnep_tx_activate()
420 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
421 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
424 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
441 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
443 if (tx->read <= tx->write) in tsnep_tx_desc_available()
444 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
446 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
466 memcpy(&entry->desc->tx, fragdata, len); in tsnep_tx_map_frag()
471 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), in tsnep_tx_map_frag()
482 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count, in tsnep_tx_map() argument
485 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
493 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
505 memcpy(&entry->desc->tx, skb->data, len); in tsnep_tx_map()
524 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
533 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
535 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
541 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
563 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
575 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
579 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
584 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
588 tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) { in tsnep_xmit_frame_ring()
593 retval = tsnep_tx_map(skb, tx, count, do_tstamp); in tsnep_xmit_frame_ring()
595 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
599 tx->dropped++; in tsnep_xmit_frame_ring()
606 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
608 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
615 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
617 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
619 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
625 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
628 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
641 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
667 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
682 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
692 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_frame_ring()
693 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_frame_ring()
696 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
699 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
702 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
704 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
707 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
714 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
716 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
724 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
726 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
731 struct netdev_queue *tx_nq, struct tsnep_tx *tx, in tsnep_xdp_xmit_back() argument
749 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); in tsnep_xdp_xmit_back()
760 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) in tsnep_xdp_tx_map_zc() argument
765 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
768 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
769 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
774 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
780 struct tsnep_tx *tx) in tsnep_xdp_xmit_frame_ring_zc() argument
784 length = tsnep_xdp_tx_map_zc(xdpd, tx); in tsnep_xdp_xmit_frame_ring_zc()
786 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
787 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
790 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) in tsnep_xdp_xmit_zc() argument
792 int desc_available = tsnep_tx_desc_available(tx); in tsnep_xdp_xmit_zc()
793 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
796 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_zc()
797 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_zc()
804 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
806 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); in tsnep_xdp_xmit_zc()
814 tsnep_xdp_xmit_flush(tx); in tsnep_xdp_xmit_zc()
818 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
827 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
831 if (tx->read == tx->write) in tsnep_tx_poll()
834 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
853 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
884 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
886 tx->packets++; in tsnep_tx_poll()
887 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
892 if (tx->xsk_pool) { in tsnep_tx_poll()
894 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
895 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
896 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
897 tsnep_xdp_xmit_zc(tx); in tsnep_tx_poll()
900 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
910 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
916 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
919 if (tx->read != tx->write) { in tsnep_tx_pending()
920 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
932 static int tsnep_tx_open(struct tsnep_tx *tx) in tsnep_tx_open() argument
936 retval = tsnep_tx_ring_create(tx); in tsnep_tx_open()
940 tsnep_tx_init(tx); in tsnep_tx_open()
945 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
947 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
1279 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
1292 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1324 struct tsnep_tx *tx) in tsnep_xdp_run_prog_zc() argument
1342 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1360 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1364 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1434 struct tsnep_tx *tx; in tsnep_rx_poll() local
1446 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1507 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1523 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1537 struct tsnep_tx *tx; in tsnep_rx_poll_zc() local
1549 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1607 &xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1632 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1798 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1814 if (queue->tx) in tsnep_poll()
1815 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1861 if (queue->tx && queue->rx) in tsnep_request_irq()
1864 else if (queue->tx) in tsnep_request_irq()
1865 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", in tsnep_request_irq()
1866 name, queue->tx->queue_index); in tsnep_request_irq()
1919 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1925 /* choose TX queue for XDP_TX */ in tsnep_queue_open()
1926 if (tx) in tsnep_queue_open()
1927 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1979 if (queue->tx) in tsnep_queue_enable()
1980 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1988 if (queue->tx) in tsnep_queue_disable()
1989 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
2007 if (adapter->queue[i].tx) { in tsnep_netdev_open()
2008 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
2050 if (adapter->queue[i].tx) in tsnep_netdev_open()
2051 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2071 if (adapter->queue[i].tx) in tsnep_netdev_close()
2072 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2107 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2128 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2150 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2188 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2189 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2190 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2306 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2315 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
2322 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
2323 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2328 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
2340 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
2484 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2496 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2497 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2498 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2499 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2513 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2527 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2528 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2529 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2530 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()