Lines Matching full:rx
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
100 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
116 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
950 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
952 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
957 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
958 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
959 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
961 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
967 if (rx->page_pool) in tsnep_rx_ring_cleanup()
968 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
970 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
973 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
974 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
975 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
976 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
977 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
982 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
984 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
992 rx->page[i] = in tsnep_rx_ring_create()
993 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
995 if (!rx->page[i]) { in tsnep_rx_ring_create()
1000 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
1002 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
1005 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
1017 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
1018 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
1019 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
1020 rx->page_pool = NULL; in tsnep_rx_ring_create()
1025 entry = &rx->entry[i]; in tsnep_rx_ring_create()
1026 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
1033 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
1037 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
1041 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
1042 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
1043 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
1044 rx->write = 0; in tsnep_rx_init()
1045 rx->read = 0; in tsnep_rx_init()
1046 rx->owner_counter = 1; in tsnep_rx_init()
1047 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
1050 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
1055 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
1058 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
1062 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
1063 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1068 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1070 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1071 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1073 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1076 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1083 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1085 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1091 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1099 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1100 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1101 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1110 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1116 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1119 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1121 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1124 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1127 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1132 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1134 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1135 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1137 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1141 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1143 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1148 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1149 rx->owner_counter++; in tsnep_rx_activate()
1150 if (rx->owner_counter == 4) in tsnep_rx_activate()
1151 rx->owner_counter = 1; in tsnep_rx_activate()
1152 rx->increment_owner_counter--; in tsnep_rx_activate()
1153 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1154 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1157 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1168 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1174 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1176 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1177 rx->alloc_failed++; in tsnep_rx_alloc()
1182 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1187 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1191 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1196 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1200 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1202 tsnep_rx_enable(rx); in tsnep_rx_refill()
1207 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1213 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1216 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1218 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1219 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1221 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1225 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1230 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1232 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1233 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1235 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1236 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1239 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1242 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1243 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1248 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1253 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1258 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1266 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1270 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1272 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1277 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1292 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1297 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1302 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1306 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1315 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1321 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1332 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1342 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1347 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1351 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1372 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1385 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1397 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1398 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1403 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1408 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1412 rx->packets++; in tsnep_rx_page()
1413 rx->bytes += length; in tsnep_rx_page()
1415 rx->multicast++; in tsnep_rx_page()
1419 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1421 rx->dropped++; in tsnep_rx_page()
1425 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1428 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1440 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1441 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1442 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1444 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1445 rx->tx_queue_index); in tsnep_rx_poll()
1446 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1448 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1451 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1452 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1462 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1466 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll()
1467 * RX processing in tsnep_rx_poll()
1469 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1472 rx->dropped++; in tsnep_rx_poll()
1489 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll()
1491 * consider metadata size as offset of actual data during RX in tsnep_rx_poll()
1496 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1506 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1509 rx->packets++; in tsnep_rx_poll()
1510 rx->bytes += length; in tsnep_rx_poll()
1518 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1523 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1526 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1531 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1544 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1545 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1547 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1548 rx->tx_queue_index); in tsnep_rx_poll_zc()
1549 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1552 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1553 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1563 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1567 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll_zc()
1568 * RX processing in tsnep_rx_poll_zc()
1570 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1573 rx->dropped++; in tsnep_rx_poll_zc()
1588 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); in tsnep_rx_poll_zc()
1590 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll_zc()
1592 * consider metadata size as offset of actual data during RX in tsnep_rx_poll_zc()
1597 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1606 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1609 rx->packets++; in tsnep_rx_poll_zc()
1610 rx->bytes += length; in tsnep_rx_poll_zc()
1618 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1623 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1625 rx->dropped++; in tsnep_rx_poll_zc()
1632 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1635 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1637 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1639 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1641 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1649 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1653 if (rx->read != rx->write) { in tsnep_rx_pending()
1654 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1664 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1669 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1673 tsnep_rx_init(rx); in tsnep_rx_open()
1675 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1676 if (rx->xsk_pool) in tsnep_rx_open()
1677 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1679 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1689 if (rx->xsk_pool) { in tsnep_rx_open()
1690 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1698 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1702 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1704 if (rx->xsk_pool) in tsnep_rx_close()
1705 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1707 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1710 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1712 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1715 tsnep_rx_init(rx); in tsnep_rx_reopen()
1718 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1728 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1729 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1730 rx->write++; in tsnep_rx_reopen()
1738 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1740 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1744 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1750 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1754 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1773 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1774 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1775 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1776 rx->write++; in tsnep_rx_reopen_xsk()
1786 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_reopen_xsk()
1787 int desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_reopen_xsk()
1790 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1792 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1801 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1821 if (queue->rx) { in tsnep_poll()
1822 done = queue->rx->xsk_pool ? in tsnep_poll()
1823 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1824 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1861 if (queue->tx && queue->rx) in tsnep_request_irq()
1863 name, queue->rx->queue_index); in tsnep_request_irq()
1868 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", in tsnep_request_irq()
1869 name, queue->rx->queue_index); in tsnep_request_irq()
1901 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1905 if (rx) { in tsnep_queue_close()
1906 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1907 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1908 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1909 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1918 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1924 if (rx) { in tsnep_queue_open()
1927 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1928 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1929 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1931 rx->tx_queue_index = 0; in tsnep_queue_open()
1937 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1938 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1941 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1943 rx->page_pool); in tsnep_queue_open()
1946 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1947 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1950 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1955 if (rx->xsk_pool) in tsnep_queue_open()
1956 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1982 if (queue->rx) in tsnep_queue_enable()
1983 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1994 /* disable RX after NAPI polling has been disabled, because RX can be in tsnep_queue_disable()
1997 if (queue->rx) in tsnep_queue_disable()
1998 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
2012 if (adapter->queue[i].rx) { in tsnep_netdev_open()
2013 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
2048 if (adapter->queue[i].rx) in tsnep_netdev_open()
2049 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
2069 if (adapter->queue[i].rx) in tsnep_netdev_close()
2070 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
2087 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2088 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2090 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2092 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2093 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2095 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2096 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2097 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2102 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2108 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2111 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2125 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2127 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2131 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2135 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2136 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2137 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2138 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2193 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2194 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2195 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2196 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2385 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
2484 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2500 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2501 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2502 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2503 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2513 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2531 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2532 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2533 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2534 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()