Lines Matching refs:queue
218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument
222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref()
248 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); in rx_refill_timeout() local
249 napi_schedule(&queue->napi); in rx_refill_timeout()
252 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument
254 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available()
258 static void xennet_maybe_wake_tx(struct netfront_queue *queue) in xennet_maybe_wake_tx() argument
260 struct net_device *dev = queue->info->netdev; in xennet_maybe_wake_tx()
261 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); in xennet_maybe_wake_tx()
264 netfront_tx_slot_available(queue) && in xennet_maybe_wake_tx()
266 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); in xennet_maybe_wake_tx()
270 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) in xennet_alloc_one_rx_buffer() argument
275 skb = __netdev_alloc_skb(queue->info->netdev, in xennet_alloc_one_rx_buffer()
281 page = page_pool_alloc_pages(queue->page_pool, in xennet_alloc_one_rx_buffer()
292 skb->dev = queue->info->netdev; in xennet_alloc_one_rx_buffer()
298 static void xennet_alloc_rx_buffers(struct netfront_queue *queue) in xennet_alloc_rx_buffers() argument
300 RING_IDX req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
304 if (unlikely(!netif_carrier_ok(queue->info->netdev))) in xennet_alloc_rx_buffers()
307 for (req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
308 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; in xennet_alloc_rx_buffers()
316 skb = xennet_alloc_one_rx_buffer(queue); in xennet_alloc_rx_buffers()
324 BUG_ON(queue->rx_skbs[id]); in xennet_alloc_rx_buffers()
325 queue->rx_skbs[id] = skb; in xennet_alloc_rx_buffers()
327 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); in xennet_alloc_rx_buffers()
329 queue->grant_rx_ref[id] = ref; in xennet_alloc_rx_buffers()
333 req = RING_GET_REQUEST(&queue->rx, req_prod); in xennet_alloc_rx_buffers()
335 queue->info->xbdev->otherend_id, in xennet_alloc_rx_buffers()
342 queue->rx.req_prod_pvt = req_prod; in xennet_alloc_rx_buffers()
349 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || in xennet_alloc_rx_buffers()
351 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); in xennet_alloc_rx_buffers()
355 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); in xennet_alloc_rx_buffers()
357 notify_remote_via_irq(queue->rx_irq); in xennet_alloc_rx_buffers()
365 struct netfront_queue *queue = NULL; in xennet_open() local
371 queue = &np->queues[i]; in xennet_open()
372 napi_enable(&queue->napi); in xennet_open()
374 spin_lock_bh(&queue->rx_lock); in xennet_open()
376 xennet_alloc_rx_buffers(queue); in xennet_open()
377 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; in xennet_open()
378 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) in xennet_open()
379 napi_schedule(&queue->napi); in xennet_open()
381 spin_unlock_bh(&queue->rx_lock); in xennet_open()
389 static bool xennet_tx_buf_gc(struct netfront_queue *queue) in xennet_tx_buf_gc() argument
396 const struct device *dev = &queue->info->netdev->dev; in xennet_tx_buf_gc()
398 BUG_ON(!netif_carrier_ok(queue->info->netdev)); in xennet_tx_buf_gc()
401 prod = queue->tx.sring->rsp_prod; in xennet_tx_buf_gc()
402 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { in xennet_tx_buf_gc()
404 prod - queue->tx.rsp_cons); in xennet_tx_buf_gc()
409 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { in xennet_tx_buf_gc()
414 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); in xennet_tx_buf_gc()
419 if (id >= RING_SIZE(&queue->tx)) { in xennet_tx_buf_gc()
425 if (queue->tx_link[id] != TX_PENDING) { in xennet_tx_buf_gc()
431 queue->tx_link[id] = TX_LINK_NONE; in xennet_tx_buf_gc()
432 skb = queue->tx_skbs[id]; in xennet_tx_buf_gc()
433 queue->tx_skbs[id] = NULL; in xennet_tx_buf_gc()
435 queue->grant_tx_ref[id]))) { in xennet_tx_buf_gc()
441 &queue->gref_tx_head, queue->grant_tx_ref[id]); in xennet_tx_buf_gc()
442 queue->grant_tx_ref[id] = INVALID_GRANT_REF; in xennet_tx_buf_gc()
443 queue->grant_tx_page[id] = NULL; in xennet_tx_buf_gc()
444 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); in xennet_tx_buf_gc()
448 queue->tx.rsp_cons = prod; in xennet_tx_buf_gc()
450 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); in xennet_tx_buf_gc()
453 xennet_maybe_wake_tx(queue); in xennet_tx_buf_gc()
458 queue->info->broken = true; in xennet_tx_buf_gc()
465 struct netfront_queue *queue; member
482 struct netfront_queue *queue = info->queue; in xennet_tx_setup_grant() local
485 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); in xennet_tx_setup_grant()
486 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); in xennet_tx_setup_grant()
487 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); in xennet_tx_setup_grant()
490 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, in xennet_tx_setup_grant()
493 queue->tx_skbs[id] = skb; in xennet_tx_setup_grant()
494 queue->grant_tx_page[id] = page; in xennet_tx_setup_grant()
495 queue->grant_tx_ref[id] = ref; in xennet_tx_setup_grant()
509 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); in xennet_tx_setup_grant()
603 static void xennet_mark_tx_pending(struct netfront_queue *queue) in xennet_mark_tx_pending() argument
607 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != in xennet_mark_tx_pending()
609 queue->tx_link[i] = TX_PENDING; in xennet_mark_tx_pending()
613 struct netfront_queue *queue, in xennet_xdp_xmit_one() argument
619 .queue = queue, in xennet_xdp_xmit_one()
629 xennet_mark_tx_pending(queue); in xennet_xdp_xmit_one()
631 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); in xennet_xdp_xmit_one()
633 notify_remote_via_irq(queue->tx_irq); in xennet_xdp_xmit_one()
648 struct netfront_queue *queue = NULL; in xennet_xdp_xmit() local
658 queue = &np->queues[smp_processor_id() % num_queues]; in xennet_xdp_xmit()
660 spin_lock_irqsave(&queue->tx_lock, irq_flags); in xennet_xdp_xmit()
666 if (xennet_xdp_xmit_one(dev, queue, xdpf)) in xennet_xdp_xmit()
670 spin_unlock_irqrestore(&queue->tx_lock, irq_flags); in xennet_xdp_xmit()
717 struct netfront_queue *queue = NULL; in xennet_start_xmit() local
730 queue = &np->queues[queue_index]; in xennet_start_xmit()
772 spin_lock_irqsave(&queue->tx_lock, flags); in xennet_start_xmit()
777 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_start_xmit()
782 info.queue = queue; in xennet_start_xmit()
806 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); in xennet_start_xmit()
838 xennet_mark_tx_pending(queue); in xennet_start_xmit()
840 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); in xennet_start_xmit()
842 notify_remote_via_irq(queue->tx_irq); in xennet_start_xmit()
849 if (!netfront_tx_slot_available(queue)) in xennet_start_xmit()
850 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); in xennet_start_xmit()
852 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_start_xmit()
867 struct netfront_queue *queue; in xennet_close() local
870 queue = &np->queues[i]; in xennet_close()
871 napi_disable(&queue->napi); in xennet_close()
884 struct netfront_queue *queue = &info->queues[i]; in xennet_destroy_queues() local
887 napi_disable(&queue->napi); in xennet_destroy_queues()
888 netif_napi_del(&queue->napi); in xennet_destroy_queues()
901 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) in xennet_set_rx_rsp_cons() argument
905 spin_lock_irqsave(&queue->rx_cons_lock, flags); in xennet_set_rx_rsp_cons()
906 queue->rx.rsp_cons = val; in xennet_set_rx_rsp_cons()
907 queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); in xennet_set_rx_rsp_cons()
908 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); in xennet_set_rx_rsp_cons()
911 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, in xennet_move_rx_slot() argument
914 int new = xennet_rxidx(queue->rx.req_prod_pvt); in xennet_move_rx_slot()
916 BUG_ON(queue->rx_skbs[new]); in xennet_move_rx_slot()
917 queue->rx_skbs[new] = skb; in xennet_move_rx_slot()
918 queue->grant_rx_ref[new] = ref; in xennet_move_rx_slot()
919 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; in xennet_move_rx_slot()
920 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; in xennet_move_rx_slot()
921 queue->rx.req_prod_pvt++; in xennet_move_rx_slot()
924 static int xennet_get_extras(struct netfront_queue *queue, in xennet_get_extras() argument
930 struct device *dev = &queue->info->netdev->dev; in xennet_get_extras()
931 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_extras()
945 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); in xennet_get_extras()
957 skb = xennet_get_rx_skb(queue, cons); in xennet_get_extras()
958 ref = xennet_get_rx_ref(queue, cons); in xennet_get_extras()
959 xennet_move_rx_slot(queue, skb, ref); in xennet_get_extras()
962 xennet_set_rx_rsp_cons(queue, cons); in xennet_get_extras()
966 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, in xennet_run_xdp() argument
976 &queue->xdp_rxq); in xennet_run_xdp()
985 trace_xdp_exception(queue->info->netdev, prog, act); in xennet_run_xdp()
989 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); in xennet_run_xdp()
992 trace_xdp_exception(queue->info->netdev, prog, act); in xennet_run_xdp()
998 err = xdp_do_redirect(queue->info->netdev, xdp, prog); in xennet_run_xdp()
1001 trace_xdp_exception(queue->info->netdev, prog, act); in xennet_run_xdp()
1010 trace_xdp_exception(queue->info->netdev, prog, act); in xennet_run_xdp()
1014 bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act); in xennet_run_xdp()
1020 static int xennet_get_responses(struct netfront_queue *queue, in xennet_get_responses() argument
1027 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_responses()
1028 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); in xennet_get_responses()
1030 grant_ref_t ref = xennet_get_rx_ref(queue, cons); in xennet_get_responses()
1031 struct device *dev = &queue->info->netdev->dev; in xennet_get_responses()
1039 err = xennet_get_extras(queue, extras, rp); in xennet_get_responses()
1048 cons = queue->rx.rsp_cons; in xennet_get_responses()
1070 xennet_move_rx_slot(queue, skb, ref); in xennet_get_responses()
1078 queue->info->broken = true; in xennet_get_responses()
1083 gnttab_release_grant_reference(&queue->gref_rx_head, ref); in xennet_get_responses()
1086 xdp_prog = rcu_dereference(queue->xdp_prog); in xennet_get_responses()
1090 verdict = xennet_run_xdp(queue, in xennet_get_responses()
1115 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); in xennet_get_responses()
1117 skb = xennet_get_rx_skb(queue, cons + slots); in xennet_get_responses()
1118 ref = xennet_get_rx_ref(queue, cons + slots); in xennet_get_responses()
1129 xennet_set_rx_rsp_cons(queue, cons + slots); in xennet_get_responses()
1163 static int xennet_fill_frags(struct netfront_queue *queue, in xennet_fill_frags() argument
1167 RING_IDX cons = queue->rx.rsp_cons; in xennet_fill_frags()
1174 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); in xennet_fill_frags()
1183 xennet_set_rx_rsp_cons(queue, in xennet_fill_frags()
1197 xennet_set_rx_rsp_cons(queue, cons); in xennet_fill_frags()
1226 static int handle_incoming_queue(struct netfront_queue *queue, in handle_incoming_queue() argument
1229 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); in handle_incoming_queue()
1240 skb->protocol = eth_type_trans(skb, queue->info->netdev); in handle_incoming_queue()
1243 if (checksum_setup(queue->info->netdev, skb)) { in handle_incoming_queue()
1246 queue->info->netdev->stats.rx_errors++; in handle_incoming_queue()
1256 napi_gro_receive(&queue->napi, skb); in handle_incoming_queue()
1264 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); in xennet_poll() local
1265 struct net_device *dev = queue->info->netdev; in xennet_poll()
1278 spin_lock(&queue->rx_lock); in xennet_poll()
1284 rp = queue->rx.sring->rsp_prod; in xennet_poll()
1285 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { in xennet_poll()
1287 rp - queue->rx.rsp_cons); in xennet_poll()
1288 queue->info->broken = true; in xennet_poll()
1289 spin_unlock(&queue->rx_lock); in xennet_poll()
1294 i = queue->rx.rsp_cons; in xennet_poll()
1297 RING_COPY_RESPONSE(&queue->rx, i, rx); in xennet_poll()
1300 err = xennet_get_responses(queue, &rinfo, rp, &tmpq, in xennet_poll()
1304 if (queue->info->broken) { in xennet_poll()
1305 spin_unlock(&queue->rx_lock); in xennet_poll()
1312 i = queue->rx.rsp_cons; in xennet_poll()
1324 xennet_set_rx_rsp_cons(queue, in xennet_poll()
1325 queue->rx.rsp_cons + in xennet_poll()
1340 if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) in xennet_poll()
1350 i = queue->rx.rsp_cons + 1; in xennet_poll()
1351 xennet_set_rx_rsp_cons(queue, i); in xennet_poll()
1359 work_done -= handle_incoming_queue(queue, &rxq); in xennet_poll()
1361 xennet_alloc_rx_buffers(queue); in xennet_poll()
1368 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); in xennet_poll()
1373 spin_unlock(&queue->rx_lock); in xennet_poll()
1422 static void xennet_release_tx_bufs(struct netfront_queue *queue) in xennet_release_tx_bufs() argument
1429 if (!queue->tx_skbs[i]) in xennet_release_tx_bufs()
1432 skb = queue->tx_skbs[i]; in xennet_release_tx_bufs()
1433 queue->tx_skbs[i] = NULL; in xennet_release_tx_bufs()
1434 get_page(queue->grant_tx_page[i]); in xennet_release_tx_bufs()
1435 gnttab_end_foreign_access(queue->grant_tx_ref[i], in xennet_release_tx_bufs()
1436 queue->grant_tx_page[i]); in xennet_release_tx_bufs()
1437 queue->grant_tx_page[i] = NULL; in xennet_release_tx_bufs()
1438 queue->grant_tx_ref[i] = INVALID_GRANT_REF; in xennet_release_tx_bufs()
1439 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); in xennet_release_tx_bufs()
1444 static void xennet_release_rx_bufs(struct netfront_queue *queue) in xennet_release_rx_bufs() argument
1448 spin_lock_bh(&queue->rx_lock); in xennet_release_rx_bufs()
1454 skb = queue->rx_skbs[id]; in xennet_release_rx_bufs()
1458 ref = queue->grant_rx_ref[id]; in xennet_release_rx_bufs()
1469 queue->grant_rx_ref[id] = INVALID_GRANT_REF; in xennet_release_rx_bufs()
1474 spin_unlock_bh(&queue->rx_lock); in xennet_release_rx_bufs()
1513 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) in xennet_handle_tx() argument
1517 if (unlikely(queue->info->broken)) in xennet_handle_tx()
1520 spin_lock_irqsave(&queue->tx_lock, flags); in xennet_handle_tx()
1521 if (xennet_tx_buf_gc(queue)) in xennet_handle_tx()
1523 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_handle_tx()
1538 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) in xennet_handle_rx() argument
1543 if (unlikely(queue->info->broken)) in xennet_handle_rx()
1546 spin_lock_irqsave(&queue->rx_cons_lock, flags); in xennet_handle_rx()
1547 work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); in xennet_handle_rx()
1548 if (work_queued > queue->rx_rsp_unconsumed) { in xennet_handle_rx()
1549 queue->rx_rsp_unconsumed = work_queued; in xennet_handle_rx()
1551 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { in xennet_handle_rx()
1552 const struct device *dev = &queue->info->netdev->dev; in xennet_handle_rx()
1554 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); in xennet_handle_rx()
1557 queue->info->broken = true; in xennet_handle_rx()
1560 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); in xennet_handle_rx()
1562 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) in xennet_handle_rx()
1563 napi_schedule(&queue->napi); in xennet_handle_rx()
1822 struct netfront_queue *queue = &info->queues[i]; in xennet_disconnect_backend() local
1824 del_timer_sync(&queue->rx_refill_timer); in xennet_disconnect_backend()
1826 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) in xennet_disconnect_backend()
1827 unbind_from_irqhandler(queue->tx_irq, queue); in xennet_disconnect_backend()
1828 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { in xennet_disconnect_backend()
1829 unbind_from_irqhandler(queue->tx_irq, queue); in xennet_disconnect_backend()
1830 unbind_from_irqhandler(queue->rx_irq, queue); in xennet_disconnect_backend()
1832 queue->tx_evtchn = queue->rx_evtchn = 0; in xennet_disconnect_backend()
1833 queue->tx_irq = queue->rx_irq = 0; in xennet_disconnect_backend()
1836 napi_synchronize(&queue->napi); in xennet_disconnect_backend()
1838 xennet_release_tx_bufs(queue); in xennet_disconnect_backend()
1839 xennet_release_rx_bufs(queue); in xennet_disconnect_backend()
1840 gnttab_free_grant_references(queue->gref_tx_head); in xennet_disconnect_backend()
1841 gnttab_free_grant_references(queue->gref_rx_head); in xennet_disconnect_backend()
1844 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); in xennet_disconnect_backend()
1845 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); in xennet_disconnect_backend()
1847 queue->tx_ring_ref = INVALID_GRANT_REF; in xennet_disconnect_backend()
1848 queue->rx_ring_ref = INVALID_GRANT_REF; in xennet_disconnect_backend()
1849 queue->tx.sring = NULL; in xennet_disconnect_backend()
1850 queue->rx.sring = NULL; in xennet_disconnect_backend()
1852 page_pool_destroy(queue->page_pool); in xennet_disconnect_backend()
1904 static int setup_netfront_single(struct netfront_queue *queue) in setup_netfront_single() argument
1908 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); in setup_netfront_single()
1912 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, in setup_netfront_single()
1914 queue->info->netdev->name, in setup_netfront_single()
1915 queue); in setup_netfront_single()
1918 queue->rx_evtchn = queue->tx_evtchn; in setup_netfront_single()
1919 queue->rx_irq = queue->tx_irq = err; in setup_netfront_single()
1924 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); in setup_netfront_single()
1925 queue->tx_evtchn = 0; in setup_netfront_single()
1930 static int setup_netfront_split(struct netfront_queue *queue) in setup_netfront_split() argument
1934 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); in setup_netfront_split()
1937 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); in setup_netfront_split()
1941 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), in setup_netfront_split()
1942 "%s-tx", queue->name); in setup_netfront_split()
1943 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, in setup_netfront_split()
1945 queue->tx_irq_name, queue); in setup_netfront_split()
1948 queue->tx_irq = err; in setup_netfront_split()
1950 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), in setup_netfront_split()
1951 "%s-rx", queue->name); in setup_netfront_split()
1952 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, in setup_netfront_split()
1954 queue->rx_irq_name, queue); in setup_netfront_split()
1957 queue->rx_irq = err; in setup_netfront_split()
1962 unbind_from_irqhandler(queue->tx_irq, queue); in setup_netfront_split()
1963 queue->tx_irq = 0; in setup_netfront_split()
1965 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); in setup_netfront_split()
1966 queue->rx_evtchn = 0; in setup_netfront_split()
1968 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); in setup_netfront_split()
1969 queue->tx_evtchn = 0; in setup_netfront_split()
1975 struct netfront_queue *queue, unsigned int feature_split_evtchn) in setup_netfront() argument
1981 queue->tx_ring_ref = INVALID_GRANT_REF; in setup_netfront()
1982 queue->rx_ring_ref = INVALID_GRANT_REF; in setup_netfront()
1983 queue->rx.sring = NULL; in setup_netfront()
1984 queue->tx.sring = NULL; in setup_netfront()
1987 1, &queue->tx_ring_ref); in setup_netfront()
1991 XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); in setup_netfront()
1994 1, &queue->rx_ring_ref); in setup_netfront()
1998 XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); in setup_netfront()
2001 err = setup_netfront_split(queue); in setup_netfront()
2007 err = setup_netfront_single(queue); in setup_netfront()
2015 xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref); in setup_netfront()
2016 xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref); in setup_netfront()
2025 static int xennet_init_queue(struct netfront_queue *queue) in xennet_init_queue() argument
2031 spin_lock_init(&queue->tx_lock); in xennet_init_queue()
2032 spin_lock_init(&queue->rx_lock); in xennet_init_queue()
2033 spin_lock_init(&queue->rx_cons_lock); in xennet_init_queue()
2035 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); in xennet_init_queue()
2037 devid = strrchr(queue->info->xbdev->nodename, '/') + 1; in xennet_init_queue()
2038 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", in xennet_init_queue()
2039 devid, queue->id); in xennet_init_queue()
2042 queue->tx_skb_freelist = 0; in xennet_init_queue()
2043 queue->tx_pend_queue = TX_LINK_NONE; in xennet_init_queue()
2045 queue->tx_link[i] = i + 1; in xennet_init_queue()
2046 queue->grant_tx_ref[i] = INVALID_GRANT_REF; in xennet_init_queue()
2047 queue->grant_tx_page[i] = NULL; in xennet_init_queue()
2049 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; in xennet_init_queue()
2053 queue->rx_skbs[i] = NULL; in xennet_init_queue()
2054 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_init_queue()
2059 &queue->gref_tx_head) < 0) { in xennet_init_queue()
2067 &queue->gref_rx_head) < 0) { in xennet_init_queue()
2076 gnttab_free_grant_references(queue->gref_tx_head); in xennet_init_queue()
2081 static int write_queue_xenstore_keys(struct netfront_queue *queue, in write_queue_xenstore_keys() argument
2088 struct xenbus_device *dev = queue->info->xbdev; in write_queue_xenstore_keys()
2104 dev->nodename, queue->id); in write_queue_xenstore_keys()
2111 queue->tx_ring_ref); in write_queue_xenstore_keys()
2118 queue->rx_ring_ref); in write_queue_xenstore_keys()
2127 if (queue->tx_evtchn == queue->rx_evtchn) { in write_queue_xenstore_keys()
2130 "event-channel", "%u", queue->tx_evtchn); in write_queue_xenstore_keys()
2138 "event-channel-tx", "%u", queue->tx_evtchn); in write_queue_xenstore_keys()
2145 "event-channel-rx", "%u", queue->rx_evtchn); in write_queue_xenstore_keys()
2165 static int xennet_create_page_pool(struct netfront_queue *queue) in xennet_create_page_pool() argument
2173 .dev = &queue->info->netdev->dev, in xennet_create_page_pool()
2178 queue->page_pool = page_pool_create(&pp_params); in xennet_create_page_pool()
2179 if (IS_ERR(queue->page_pool)) { in xennet_create_page_pool()
2180 err = PTR_ERR(queue->page_pool); in xennet_create_page_pool()
2181 queue->page_pool = NULL; in xennet_create_page_pool()
2185 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev, in xennet_create_page_pool()
2186 queue->id, 0); in xennet_create_page_pool()
2188 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n"); in xennet_create_page_pool()
2192 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, in xennet_create_page_pool()
2193 MEM_TYPE_PAGE_POOL, queue->page_pool); in xennet_create_page_pool()
2195 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n"); in xennet_create_page_pool()
2201 xdp_rxq_info_unreg(&queue->xdp_rxq); in xennet_create_page_pool()
2203 page_pool_destroy(queue->page_pool); in xennet_create_page_pool()
2204 queue->page_pool = NULL; in xennet_create_page_pool()
2220 struct netfront_queue *queue = &info->queues[i]; in xennet_create_queues() local
2222 queue->id = i; in xennet_create_queues()
2223 queue->info = info; in xennet_create_queues()
2225 ret = xennet_init_queue(queue); in xennet_create_queues()
2234 ret = xennet_create_page_pool(queue); in xennet_create_queues()
2241 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll); in xennet_create_queues()
2243 napi_enable(&queue->napi); in xennet_create_queues()
2265 struct netfront_queue *queue = NULL; in talk_to_netback() local
2321 queue = &info->queues[i]; in talk_to_netback()
2322 err = setup_netfront(dev, queue, feature_split_evtchn); in talk_to_netback()
2352 queue = &info->queues[i]; in talk_to_netback()
2353 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ in talk_to_netback()
2429 struct netfront_queue *queue = NULL; in xennet_connect() local
2474 queue = &np->queues[j]; in xennet_connect()
2476 notify_remote_via_irq(queue->tx_irq); in xennet_connect()
2477 if (queue->tx_irq != queue->rx_irq) in xennet_connect()
2478 notify_remote_via_irq(queue->rx_irq); in xennet_connect()
2480 spin_lock_bh(&queue->rx_lock); in xennet_connect()
2481 xennet_alloc_rx_buffers(queue); in xennet_connect()
2482 spin_unlock_bh(&queue->rx_lock); in xennet_connect()