Lines Matching refs:queue
220 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument
224 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
225 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
229 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
233 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
234 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_get_rx_ref()
250 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); in rx_refill_timeout() local
251 napi_schedule(&queue->napi); in rx_refill_timeout()
254 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument
256 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available()
260 static void xennet_maybe_wake_tx(struct netfront_queue *queue) in xennet_maybe_wake_tx() argument
262 struct net_device *dev = queue->info->netdev; in xennet_maybe_wake_tx()
263 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); in xennet_maybe_wake_tx()
266 netfront_tx_slot_available(queue) && in xennet_maybe_wake_tx()
268 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); in xennet_maybe_wake_tx()
272 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) in xennet_alloc_one_rx_buffer() argument
277 skb = __netdev_alloc_skb(queue->info->netdev, in xennet_alloc_one_rx_buffer()
283 page = page_pool_alloc_pages(queue->page_pool, in xennet_alloc_one_rx_buffer()
293 skb->dev = queue->info->netdev; in xennet_alloc_one_rx_buffer()
299 static void xennet_alloc_rx_buffers(struct netfront_queue *queue) in xennet_alloc_rx_buffers() argument
301 RING_IDX req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
305 if (unlikely(!netif_carrier_ok(queue->info->netdev))) in xennet_alloc_rx_buffers()
308 for (req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
309 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; in xennet_alloc_rx_buffers()
317 skb = xennet_alloc_one_rx_buffer(queue); in xennet_alloc_rx_buffers()
325 BUG_ON(queue->rx_skbs[id]); in xennet_alloc_rx_buffers()
326 queue->rx_skbs[id] = skb; in xennet_alloc_rx_buffers()
328 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); in xennet_alloc_rx_buffers()
330 queue->grant_rx_ref[id] = ref; in xennet_alloc_rx_buffers()
334 req = RING_GET_REQUEST(&queue->rx, req_prod); in xennet_alloc_rx_buffers()
336 queue->info->xbdev->otherend_id, in xennet_alloc_rx_buffers()
343 queue->rx.req_prod_pvt = req_prod; in xennet_alloc_rx_buffers()
350 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || in xennet_alloc_rx_buffers()
352 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); in xennet_alloc_rx_buffers()
356 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); in xennet_alloc_rx_buffers()
358 notify_remote_via_irq(queue->rx_irq); in xennet_alloc_rx_buffers()
366 struct netfront_queue *queue = NULL; in xennet_open() local
372 queue = &np->queues[i]; in xennet_open()
373 napi_enable(&queue->napi); in xennet_open()
375 spin_lock_bh(&queue->rx_lock); in xennet_open()
377 xennet_alloc_rx_buffers(queue); in xennet_open()
378 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; in xennet_open()
379 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) in xennet_open()
380 napi_schedule(&queue->napi); in xennet_open()
382 spin_unlock_bh(&queue->rx_lock); in xennet_open()
390 static bool xennet_tx_buf_gc(struct netfront_queue *queue) in xennet_tx_buf_gc() argument
397 const struct device *dev = &queue->info->netdev->dev; in xennet_tx_buf_gc()
399 BUG_ON(!netif_carrier_ok(queue->info->netdev)); in xennet_tx_buf_gc()
402 prod = queue->tx.sring->rsp_prod; in xennet_tx_buf_gc()
403 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { in xennet_tx_buf_gc()
405 prod - queue->tx.rsp_cons); in xennet_tx_buf_gc()
410 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { in xennet_tx_buf_gc()
415 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); in xennet_tx_buf_gc()
420 if (id >= RING_SIZE(&queue->tx)) { in xennet_tx_buf_gc()
426 if (queue->tx_link[id] != TX_PENDING) { in xennet_tx_buf_gc()
432 queue->tx_link[id] = TX_LINK_NONE; in xennet_tx_buf_gc()
433 skb = queue->tx_skbs[id]; in xennet_tx_buf_gc()
434 queue->tx_skbs[id] = NULL; in xennet_tx_buf_gc()
436 queue->grant_tx_ref[id], GNTMAP_readonly))) { in xennet_tx_buf_gc()
442 &queue->gref_tx_head, queue->grant_tx_ref[id]); in xennet_tx_buf_gc()
443 queue->grant_tx_ref[id] = GRANT_INVALID_REF; in xennet_tx_buf_gc()
444 queue->grant_tx_page[id] = NULL; in xennet_tx_buf_gc()
445 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); in xennet_tx_buf_gc()
449 queue->tx.rsp_cons = prod; in xennet_tx_buf_gc()
451 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); in xennet_tx_buf_gc()
454 xennet_maybe_wake_tx(queue); in xennet_tx_buf_gc()
459 queue->info->broken = true; in xennet_tx_buf_gc()
466 struct netfront_queue *queue; member
483 struct netfront_queue *queue = info->queue; in xennet_tx_setup_grant() local
486 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); in xennet_tx_setup_grant()
487 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); in xennet_tx_setup_grant()
488 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); in xennet_tx_setup_grant()
491 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, in xennet_tx_setup_grant()
494 queue->tx_skbs[id] = skb; in xennet_tx_setup_grant()
495 queue->grant_tx_page[id] = page; in xennet_tx_setup_grant()
496 queue->grant_tx_ref[id] = ref; in xennet_tx_setup_grant()
510 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); in xennet_tx_setup_grant()
604 static void xennet_mark_tx_pending(struct netfront_queue *queue) in xennet_mark_tx_pending() argument
608 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != in xennet_mark_tx_pending()
610 queue->tx_link[i] = TX_PENDING; in xennet_mark_tx_pending()
614 struct netfront_queue *queue, in xennet_xdp_xmit_one() argument
620 .queue = queue, in xennet_xdp_xmit_one()
630 xennet_mark_tx_pending(queue); in xennet_xdp_xmit_one()
632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); in xennet_xdp_xmit_one()
634 notify_remote_via_irq(queue->tx_irq); in xennet_xdp_xmit_one()
641 xennet_tx_buf_gc(queue); in xennet_xdp_xmit_one()
651 struct netfront_queue *queue = NULL; in xennet_xdp_xmit() local
661 queue = &np->queues[smp_processor_id() % num_queues]; in xennet_xdp_xmit()
663 spin_lock_irqsave(&queue->tx_lock, irq_flags); in xennet_xdp_xmit()
669 err = xennet_xdp_xmit_one(dev, queue, xdpf); in xennet_xdp_xmit()
675 spin_unlock_irqrestore(&queue->tx_lock, irq_flags); in xennet_xdp_xmit()
722 struct netfront_queue *queue = NULL; in xennet_start_xmit() local
735 queue = &np->queues[queue_index]; in xennet_start_xmit()
777 spin_lock_irqsave(&queue->tx_lock, flags); in xennet_start_xmit()
782 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_start_xmit()
787 info.queue = queue; in xennet_start_xmit()
811 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); in xennet_start_xmit()
843 xennet_mark_tx_pending(queue); in xennet_start_xmit()
845 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); in xennet_start_xmit()
847 notify_remote_via_irq(queue->tx_irq); in xennet_start_xmit()
855 xennet_tx_buf_gc(queue); in xennet_start_xmit()
857 if (!netfront_tx_slot_available(queue)) in xennet_start_xmit()
858 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); in xennet_start_xmit()
860 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_start_xmit()
875 struct netfront_queue *queue; in xennet_close() local
878 queue = &np->queues[i]; in xennet_close()
879 napi_disable(&queue->napi); in xennet_close()
889 struct netfront_queue *queue = &info->queues[i]; in xennet_destroy_queues() local
892 napi_disable(&queue->napi); in xennet_destroy_queues()
893 netif_napi_del(&queue->napi); in xennet_destroy_queues()
906 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) in xennet_set_rx_rsp_cons() argument
910 spin_lock_irqsave(&queue->rx_cons_lock, flags); in xennet_set_rx_rsp_cons()
911 queue->rx.rsp_cons = val; in xennet_set_rx_rsp_cons()
912 queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); in xennet_set_rx_rsp_cons()
913 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); in xennet_set_rx_rsp_cons()
916 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, in xennet_move_rx_slot() argument
919 int new = xennet_rxidx(queue->rx.req_prod_pvt); in xennet_move_rx_slot()
921 BUG_ON(queue->rx_skbs[new]); in xennet_move_rx_slot()
922 queue->rx_skbs[new] = skb; in xennet_move_rx_slot()
923 queue->grant_rx_ref[new] = ref; in xennet_move_rx_slot()
924 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; in xennet_move_rx_slot()
925 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; in xennet_move_rx_slot()
926 queue->rx.req_prod_pvt++; in xennet_move_rx_slot()
929 static int xennet_get_extras(struct netfront_queue *queue, in xennet_get_extras() argument
935 struct device *dev = &queue->info->netdev->dev; in xennet_get_extras()
936 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_extras()
950 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); in xennet_get_extras()
962 skb = xennet_get_rx_skb(queue, cons); in xennet_get_extras()
963 ref = xennet_get_rx_ref(queue, cons); in xennet_get_extras()
964 xennet_move_rx_slot(queue, skb, ref); in xennet_get_extras()
967 xennet_set_rx_rsp_cons(queue, cons); in xennet_get_extras()
971 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, in xennet_run_xdp() argument
984 xdp->rxq = &queue->xdp_rxq; in xennet_run_xdp()
992 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); in xennet_run_xdp()
994 trace_xdp_exception(queue->info->netdev, prog, act); in xennet_run_xdp()
998 err = xdp_do_redirect(queue->info->netdev, xdp, prog); in xennet_run_xdp()
1001 trace_xdp_exception(queue->info->netdev, prog, act); in xennet_run_xdp()
1008 trace_xdp_exception(queue->info->netdev, prog, act); in xennet_run_xdp()
1018 static int xennet_get_responses(struct netfront_queue *queue, in xennet_get_responses() argument
1025 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_responses()
1026 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); in xennet_get_responses()
1028 grant_ref_t ref = xennet_get_rx_ref(queue, cons); in xennet_get_responses()
1029 struct device *dev = &queue->info->netdev->dev; in xennet_get_responses()
1037 err = xennet_get_extras(queue, extras, rp); in xennet_get_responses()
1046 cons = queue->rx.rsp_cons; in xennet_get_responses()
1055 xennet_move_rx_slot(queue, skb, ref); in xennet_get_responses()
1076 queue->info->broken = true; in xennet_get_responses()
1081 gnttab_release_grant_reference(&queue->gref_rx_head, ref); in xennet_get_responses()
1084 xdp_prog = rcu_dereference(queue->xdp_prog); in xennet_get_responses()
1088 verdict = xennet_run_xdp(queue, in xennet_get_responses()
1113 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); in xennet_get_responses()
1115 skb = xennet_get_rx_skb(queue, cons + slots); in xennet_get_responses()
1116 ref = xennet_get_rx_ref(queue, cons + slots); in xennet_get_responses()
1127 xennet_set_rx_rsp_cons(queue, cons + slots); in xennet_get_responses()
1161 static int xennet_fill_frags(struct netfront_queue *queue, in xennet_fill_frags() argument
1165 RING_IDX cons = queue->rx.rsp_cons; in xennet_fill_frags()
1172 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); in xennet_fill_frags()
1181 xennet_set_rx_rsp_cons(queue, in xennet_fill_frags()
1195 xennet_set_rx_rsp_cons(queue, cons); in xennet_fill_frags()
1224 static int handle_incoming_queue(struct netfront_queue *queue, in handle_incoming_queue() argument
1227 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); in handle_incoming_queue()
1238 skb->protocol = eth_type_trans(skb, queue->info->netdev); in handle_incoming_queue()
1241 if (checksum_setup(queue->info->netdev, skb)) { in handle_incoming_queue()
1244 queue->info->netdev->stats.rx_errors++; in handle_incoming_queue()
1254 napi_gro_receive(&queue->napi, skb); in handle_incoming_queue()
1262 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); in xennet_poll() local
1263 struct net_device *dev = queue->info->netdev; in xennet_poll()
1276 spin_lock(&queue->rx_lock); in xennet_poll()
1282 rp = queue->rx.sring->rsp_prod; in xennet_poll()
1283 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { in xennet_poll()
1285 rp - queue->rx.rsp_cons); in xennet_poll()
1286 queue->info->broken = true; in xennet_poll()
1287 spin_unlock(&queue->rx_lock); in xennet_poll()
1292 i = queue->rx.rsp_cons; in xennet_poll()
1295 RING_COPY_RESPONSE(&queue->rx, i, rx); in xennet_poll()
1298 err = xennet_get_responses(queue, &rinfo, rp, &tmpq, in xennet_poll()
1302 if (queue->info->broken) { in xennet_poll()
1303 spin_unlock(&queue->rx_lock); in xennet_poll()
1310 i = queue->rx.rsp_cons; in xennet_poll()
1322 xennet_set_rx_rsp_cons(queue, in xennet_poll()
1323 queue->rx.rsp_cons + in xennet_poll()
1338 if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) in xennet_poll()
1348 i = queue->rx.rsp_cons + 1; in xennet_poll()
1349 xennet_set_rx_rsp_cons(queue, i); in xennet_poll()
1357 work_done -= handle_incoming_queue(queue, &rxq); in xennet_poll()
1359 xennet_alloc_rx_buffers(queue); in xennet_poll()
1366 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); in xennet_poll()
1371 spin_unlock(&queue->rx_lock); in xennet_poll()
1420 static void xennet_release_tx_bufs(struct netfront_queue *queue) in xennet_release_tx_bufs() argument
1427 if (!queue->tx_skbs[i]) in xennet_release_tx_bufs()
1430 skb = queue->tx_skbs[i]; in xennet_release_tx_bufs()
1431 queue->tx_skbs[i] = NULL; in xennet_release_tx_bufs()
1432 get_page(queue->grant_tx_page[i]); in xennet_release_tx_bufs()
1433 gnttab_end_foreign_access(queue->grant_tx_ref[i], in xennet_release_tx_bufs()
1435 (unsigned long)page_address(queue->grant_tx_page[i])); in xennet_release_tx_bufs()
1436 queue->grant_tx_page[i] = NULL; in xennet_release_tx_bufs()
1437 queue->grant_tx_ref[i] = GRANT_INVALID_REF; in xennet_release_tx_bufs()
1438 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); in xennet_release_tx_bufs()
1443 static void xennet_release_rx_bufs(struct netfront_queue *queue) in xennet_release_rx_bufs() argument
1447 spin_lock_bh(&queue->rx_lock); in xennet_release_rx_bufs()
1453 skb = queue->rx_skbs[id]; in xennet_release_rx_bufs()
1457 ref = queue->grant_rx_ref[id]; in xennet_release_rx_bufs()
1469 queue->grant_rx_ref[id] = GRANT_INVALID_REF; in xennet_release_rx_bufs()
1474 spin_unlock_bh(&queue->rx_lock); in xennet_release_rx_bufs()
1513 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) in xennet_handle_tx() argument
1517 if (unlikely(queue->info->broken)) in xennet_handle_tx()
1520 spin_lock_irqsave(&queue->tx_lock, flags); in xennet_handle_tx()
1521 if (xennet_tx_buf_gc(queue)) in xennet_handle_tx()
1523 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_handle_tx()
1538 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) in xennet_handle_rx() argument
1543 if (unlikely(queue->info->broken)) in xennet_handle_rx()
1546 spin_lock_irqsave(&queue->rx_cons_lock, flags); in xennet_handle_rx()
1547 work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); in xennet_handle_rx()
1548 if (work_queued > queue->rx_rsp_unconsumed) { in xennet_handle_rx()
1549 queue->rx_rsp_unconsumed = work_queued; in xennet_handle_rx()
1551 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { in xennet_handle_rx()
1552 const struct device *dev = &queue->info->netdev->dev; in xennet_handle_rx()
1554 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); in xennet_handle_rx()
1557 queue->info->broken = true; in xennet_handle_rx()
1560 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); in xennet_handle_rx()
1562 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) in xennet_handle_rx()
1563 napi_schedule(&queue->napi); in xennet_handle_rx()
1820 struct netfront_queue *queue = &info->queues[i]; in xennet_disconnect_backend() local
1822 del_timer_sync(&queue->rx_refill_timer); in xennet_disconnect_backend()
1824 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) in xennet_disconnect_backend()
1825 unbind_from_irqhandler(queue->tx_irq, queue); in xennet_disconnect_backend()
1826 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { in xennet_disconnect_backend()
1827 unbind_from_irqhandler(queue->tx_irq, queue); in xennet_disconnect_backend()
1828 unbind_from_irqhandler(queue->rx_irq, queue); in xennet_disconnect_backend()
1830 queue->tx_evtchn = queue->rx_evtchn = 0; in xennet_disconnect_backend()
1831 queue->tx_irq = queue->rx_irq = 0; in xennet_disconnect_backend()
1834 napi_synchronize(&queue->napi); in xennet_disconnect_backend()
1836 xennet_release_tx_bufs(queue); in xennet_disconnect_backend()
1837 xennet_release_rx_bufs(queue); in xennet_disconnect_backend()
1838 gnttab_free_grant_references(queue->gref_tx_head); in xennet_disconnect_backend()
1839 gnttab_free_grant_references(queue->gref_rx_head); in xennet_disconnect_backend()
1842 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); in xennet_disconnect_backend()
1843 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); in xennet_disconnect_backend()
1845 queue->tx_ring_ref = GRANT_INVALID_REF; in xennet_disconnect_backend()
1846 queue->rx_ring_ref = GRANT_INVALID_REF; in xennet_disconnect_backend()
1847 queue->tx.sring = NULL; in xennet_disconnect_backend()
1848 queue->rx.sring = NULL; in xennet_disconnect_backend()
1850 page_pool_destroy(queue->page_pool); in xennet_disconnect_backend()
1902 static int setup_netfront_single(struct netfront_queue *queue) in setup_netfront_single() argument
1906 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); in setup_netfront_single()
1910 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, in setup_netfront_single()
1912 queue->info->netdev->name, in setup_netfront_single()
1913 queue); in setup_netfront_single()
1916 queue->rx_evtchn = queue->tx_evtchn; in setup_netfront_single()
1917 queue->rx_irq = queue->tx_irq = err; in setup_netfront_single()
1922 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); in setup_netfront_single()
1923 queue->tx_evtchn = 0; in setup_netfront_single()
1928 static int setup_netfront_split(struct netfront_queue *queue) in setup_netfront_split() argument
1932 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); in setup_netfront_split()
1935 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); in setup_netfront_split()
1939 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), in setup_netfront_split()
1940 "%s-tx", queue->name); in setup_netfront_split()
1941 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, in setup_netfront_split()
1943 queue->tx_irq_name, queue); in setup_netfront_split()
1946 queue->tx_irq = err; in setup_netfront_split()
1948 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), in setup_netfront_split()
1949 "%s-rx", queue->name); in setup_netfront_split()
1950 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, in setup_netfront_split()
1952 queue->rx_irq_name, queue); in setup_netfront_split()
1955 queue->rx_irq = err; in setup_netfront_split()
1960 unbind_from_irqhandler(queue->tx_irq, queue); in setup_netfront_split()
1961 queue->tx_irq = 0; in setup_netfront_split()
1963 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); in setup_netfront_split()
1964 queue->rx_evtchn = 0; in setup_netfront_split()
1966 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); in setup_netfront_split()
1967 queue->tx_evtchn = 0; in setup_netfront_split()
1973 struct netfront_queue *queue, unsigned int feature_split_evtchn) in setup_netfront() argument
1980 queue->tx_ring_ref = GRANT_INVALID_REF; in setup_netfront()
1981 queue->rx_ring_ref = GRANT_INVALID_REF; in setup_netfront()
1982 queue->rx.sring = NULL; in setup_netfront()
1983 queue->tx.sring = NULL; in setup_netfront()
1992 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); in setup_netfront()
1997 queue->tx_ring_ref = gref; in setup_netfront()
2006 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); in setup_netfront()
2011 queue->rx_ring_ref = gref; in setup_netfront()
2014 err = setup_netfront_split(queue); in setup_netfront()
2020 err = setup_netfront_single(queue); in setup_netfront()
2031 if (queue->rx_ring_ref != GRANT_INVALID_REF) { in setup_netfront()
2032 gnttab_end_foreign_access(queue->rx_ring_ref, 0, in setup_netfront()
2034 queue->rx_ring_ref = GRANT_INVALID_REF; in setup_netfront()
2038 if (queue->tx_ring_ref != GRANT_INVALID_REF) { in setup_netfront()
2039 gnttab_end_foreign_access(queue->tx_ring_ref, 0, in setup_netfront()
2041 queue->tx_ring_ref = GRANT_INVALID_REF; in setup_netfront()
2052 static int xennet_init_queue(struct netfront_queue *queue) in xennet_init_queue() argument
2058 spin_lock_init(&queue->tx_lock); in xennet_init_queue()
2059 spin_lock_init(&queue->rx_lock); in xennet_init_queue()
2060 spin_lock_init(&queue->rx_cons_lock); in xennet_init_queue()
2062 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); in xennet_init_queue()
2064 devid = strrchr(queue->info->xbdev->nodename, '/') + 1; in xennet_init_queue()
2065 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", in xennet_init_queue()
2066 devid, queue->id); in xennet_init_queue()
2069 queue->tx_skb_freelist = 0; in xennet_init_queue()
2070 queue->tx_pend_queue = TX_LINK_NONE; in xennet_init_queue()
2072 queue->tx_link[i] = i + 1; in xennet_init_queue()
2073 queue->grant_tx_ref[i] = GRANT_INVALID_REF; in xennet_init_queue()
2074 queue->grant_tx_page[i] = NULL; in xennet_init_queue()
2076 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; in xennet_init_queue()
2080 queue->rx_skbs[i] = NULL; in xennet_init_queue()
2081 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_init_queue()
2086 &queue->gref_tx_head) < 0) { in xennet_init_queue()
2094 &queue->gref_rx_head) < 0) { in xennet_init_queue()
2103 gnttab_free_grant_references(queue->gref_tx_head); in xennet_init_queue()
2108 static int write_queue_xenstore_keys(struct netfront_queue *queue, in write_queue_xenstore_keys() argument
2115 struct xenbus_device *dev = queue->info->xbdev; in write_queue_xenstore_keys()
2131 dev->nodename, queue->id); in write_queue_xenstore_keys()
2138 queue->tx_ring_ref); in write_queue_xenstore_keys()
2145 queue->rx_ring_ref); in write_queue_xenstore_keys()
2154 if (queue->tx_evtchn == queue->rx_evtchn) { in write_queue_xenstore_keys()
2157 "event-channel", "%u", queue->tx_evtchn); in write_queue_xenstore_keys()
2165 "event-channel-tx", "%u", queue->tx_evtchn); in write_queue_xenstore_keys()
2172 "event-channel-rx", "%u", queue->rx_evtchn); in write_queue_xenstore_keys()
2192 static int xennet_create_page_pool(struct netfront_queue *queue) in xennet_create_page_pool() argument
2200 .dev = &queue->info->netdev->dev, in xennet_create_page_pool()
2205 queue->page_pool = page_pool_create(&pp_params); in xennet_create_page_pool()
2206 if (IS_ERR(queue->page_pool)) { in xennet_create_page_pool()
2207 err = PTR_ERR(queue->page_pool); in xennet_create_page_pool()
2208 queue->page_pool = NULL; in xennet_create_page_pool()
2212 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev, in xennet_create_page_pool()
2213 queue->id); in xennet_create_page_pool()
2215 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n"); in xennet_create_page_pool()
2219 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, in xennet_create_page_pool()
2220 MEM_TYPE_PAGE_POOL, queue->page_pool); in xennet_create_page_pool()
2222 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n"); in xennet_create_page_pool()
2228 xdp_rxq_info_unreg(&queue->xdp_rxq); in xennet_create_page_pool()
2230 page_pool_destroy(queue->page_pool); in xennet_create_page_pool()
2231 queue->page_pool = NULL; in xennet_create_page_pool()
2247 struct netfront_queue *queue = &info->queues[i]; in xennet_create_queues() local
2249 queue->id = i; in xennet_create_queues()
2250 queue->info = info; in xennet_create_queues()
2252 ret = xennet_init_queue(queue); in xennet_create_queues()
2261 ret = xennet_create_page_pool(queue); in xennet_create_queues()
2268 netif_napi_add(queue->info->netdev, &queue->napi, in xennet_create_queues()
2271 napi_enable(&queue->napi); in xennet_create_queues()
2293 struct netfront_queue *queue = NULL; in talk_to_netback() local
2347 queue = &info->queues[i]; in talk_to_netback()
2348 err = setup_netfront(dev, queue, feature_split_evtchn); in talk_to_netback()
2378 queue = &info->queues[i]; in talk_to_netback()
2379 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ in talk_to_netback()
2455 struct netfront_queue *queue = NULL; in xennet_connect() local
2500 queue = &np->queues[j]; in xennet_connect()
2502 notify_remote_via_irq(queue->tx_irq); in xennet_connect()
2503 if (queue->tx_irq != queue->rx_irq) in xennet_connect()
2504 notify_remote_via_irq(queue->rx_irq); in xennet_connect()
2506 spin_lock_irq(&queue->tx_lock); in xennet_connect()
2507 xennet_tx_buf_gc(queue); in xennet_connect()
2508 spin_unlock_irq(&queue->tx_lock); in xennet_connect()
2510 spin_lock_bh(&queue->rx_lock); in xennet_connect()
2511 xennet_alloc_rx_buffers(queue); in xennet_connect()
2512 spin_unlock_bh(&queue->rx_lock); in xennet_connect()