Lines Matching full:queue
58 /* The time that packets can stay on the guest Rx internal queue
106 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
109 static void make_tx_response(struct xenvif_queue *queue,
113 static void push_tx_responses(struct xenvif_queue *queue);
115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
117 static inline int tx_work_todo(struct xenvif_queue *queue);
119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument
122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument
128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr()
161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument
163 wake_up(&queue->wq); in xenvif_kick_thread()
166 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) in xenvif_napi_schedule_or_enable_events() argument
170 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); in xenvif_napi_schedule_or_enable_events()
173 napi_schedule(&queue->napi); in xenvif_napi_schedule_or_enable_events()
175 &queue->eoi_pending) & in xenvif_napi_schedule_or_enable_events()
177 xen_irq_lateeoi(queue->tx_irq, 0); in xenvif_napi_schedule_or_enable_events()
180 static void tx_add_credit(struct xenvif_queue *queue) in tx_add_credit() argument
188 max_burst = max(131072UL, queue->credit_bytes); in tx_add_credit()
191 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit()
192 if (max_credit < queue->remaining_credit) in tx_add_credit()
195 queue->remaining_credit = min(max_credit, max_burst); in tx_add_credit()
196 queue->rate_limited = false; in tx_add_credit()
201 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout); in xenvif_tx_credit_callback() local
202 tx_add_credit(queue); in xenvif_tx_credit_callback()
203 xenvif_napi_schedule_or_enable_events(queue); in xenvif_tx_credit_callback()
206 static void xenvif_tx_err(struct xenvif_queue *queue, in xenvif_tx_err() argument
210 RING_IDX cons = queue->tx.req_cons; in xenvif_tx_err()
214 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_tx_err()
215 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR); in xenvif_tx_err()
216 push_tx_responses(queue); in xenvif_tx_err()
217 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_tx_err()
220 RING_COPY_REQUEST(&queue->tx, cons++, txp); in xenvif_tx_err()
223 queue->tx.req_cons = cons; in xenvif_tx_err()
230 /* Disable the vif from queue 0's kthread */ in xenvif_fatal_tx_err()
235 static int xenvif_count_requests(struct xenvif_queue *queue, in xenvif_count_requests() argument
241 RING_IDX cons = queue->tx.req_cons; in xenvif_count_requests()
253 netdev_err(queue->vif->dev, in xenvif_count_requests()
256 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
264 netdev_err(queue->vif->dev, in xenvif_count_requests()
267 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
280 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
289 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); in xenvif_count_requests()
302 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
312 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
314 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
326 xenvif_tx_err(queue, first, extra_count, cons + slots); in xenvif_count_requests()
344 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, in xenvif_tx_create_map_op() argument
350 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op()
351 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), in xenvif_tx_create_map_op()
353 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
355 memcpy(&queue->pending_tx_info[pending_idx].req, txp, in xenvif_tx_create_map_op()
357 queue->pending_tx_info[pending_idx].extra_count = extra_count; in xenvif_tx_create_map_op()
379 static void xenvif_get_requests(struct xenvif_queue *queue, in xenvif_get_requests() argument
395 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; in xenvif_get_requests()
396 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; in xenvif_get_requests()
411 cop->source.domid = queue->vif->domid; in xenvif_get_requests()
431 index = pending_index(queue->pending_cons); in xenvif_get_requests()
432 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
433 callback_param(queue, pending_idx).ctx = NULL; in xenvif_get_requests()
444 memcpy(&queue->pending_tx_info[pending_idx].req, in xenvif_get_requests()
446 queue->pending_tx_info[pending_idx].extra_count = in xenvif_get_requests()
453 queue->pending_cons++; in xenvif_get_requests()
470 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_get_requests()
471 make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); in xenvif_get_requests()
472 push_tx_responses(queue); in xenvif_get_requests()
473 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_get_requests()
478 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
479 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
480 xenvif_tx_create_map_op(queue, pending_idx, txp, in xenvif_get_requests()
501 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_get_requests()
502 make_tx_response(queue, txp, 0, in xenvif_get_requests()
504 push_tx_responses(queue); in xenvif_get_requests()
505 spin_unlock_irqrestore(&queue->response_lock, in xenvif_get_requests()
510 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
511 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
512 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, in xenvif_get_requests()
534 (*copy_ops) = cop - queue->tx_copy_ops; in xenvif_get_requests()
535 (*map_ops) = gop - queue->tx_map_ops; in xenvif_get_requests()
538 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, in xenvif_grant_handle_set() argument
542 if (unlikely(queue->grant_tx_handle[pending_idx] != in xenvif_grant_handle_set()
544 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
549 queue->grant_tx_handle[pending_idx] = handle; in xenvif_grant_handle_set()
552 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, in xenvif_grant_handle_reset() argument
555 if (unlikely(queue->grant_tx_handle[pending_idx] == in xenvif_grant_handle_reset()
557 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
562 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; in xenvif_grant_handle_reset()
565 static int xenvif_tx_check_gop(struct xenvif_queue *queue, in xenvif_tx_check_gop() argument
603 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
608 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
615 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
631 xenvif_grant_handle_set(queue, in xenvif_tx_check_gop()
636 xenvif_idx_unmap(queue, pending_idx); in xenvif_tx_check_gop()
642 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
645 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
653 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
660 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); in xenvif_tx_check_gop()
669 xenvif_idx_unmap(queue, pending_idx); in xenvif_tx_check_gop()
670 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
680 xenvif_idx_unmap(queue, pending_idx); in xenvif_tx_check_gop()
681 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
702 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_fill_frags() argument
720 &callback_param(queue, pending_idx); in xenvif_fill_frags()
722 callback_param(queue, prev_pending_idx).ctx = in xenvif_fill_frags()
723 &callback_param(queue, pending_idx); in xenvif_fill_frags()
725 callback_param(queue, pending_idx).ctx = NULL; in xenvif_fill_frags()
728 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_fill_frags()
729 page = virt_to_page(idx_to_kaddr(queue, pending_idx)); in xenvif_fill_frags()
736 get_page(queue->mmap_pages[pending_idx]); in xenvif_fill_frags()
740 static int xenvif_get_extras(struct xenvif_queue *queue, in xenvif_get_extras() argument
746 RING_IDX cons = queue->tx.req_cons; in xenvif_get_extras()
750 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
751 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
755 RING_COPY_REQUEST(&queue->tx, cons, &extra); in xenvif_get_extras()
757 queue->tx.req_cons = ++cons; in xenvif_get_extras()
762 netdev_err(queue->vif->dev, in xenvif_get_extras()
764 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
803 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) in checksum_setup() argument
813 queue->stats.rx_gso_checksum_fixup++; in checksum_setup()
825 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) in tx_credit_exceeded() argument
828 u64 next_credit = queue->credit_window_start + in tx_credit_exceeded()
829 msecs_to_jiffies(queue->credit_usec / 1000); in tx_credit_exceeded()
832 if (timer_pending(&queue->credit_timeout)) { in tx_credit_exceeded()
833 queue->rate_limited = true; in tx_credit_exceeded()
839 queue->credit_window_start = now; in tx_credit_exceeded()
840 tx_add_credit(queue); in tx_credit_exceeded()
844 if (size > queue->remaining_credit) { in tx_credit_exceeded()
845 mod_timer(&queue->credit_timeout, in tx_credit_exceeded()
847 queue->credit_window_start = next_credit; in tx_credit_exceeded()
848 queue->rate_limited = true; in tx_credit_exceeded()
915 /* No need for locking or RCU here. NAPI poll and TX queue in xenvif_mcast_addr_list_free()
930 static void xenvif_tx_build_gops(struct xenvif_queue *queue, in xenvif_tx_build_gops() argument
939 while (skb_queue_len(&queue->tx_queue) < budget) { in xenvif_tx_build_gops()
950 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops()
952 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
955 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops()
957 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
961 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_tx_build_gops()
965 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
967 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
970 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
971 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops()
974 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
977 queue->tx.req_cons = ++idx; in xenvif_tx_build_gops()
982 work_to_do = xenvif_get_extras(queue, extras, in xenvif_tx_build_gops()
985 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
994 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
996 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops()
1000 push_tx_responses(queue); in xenvif_tx_build_gops()
1008 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1010 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops()
1012 push_tx_responses(queue); in xenvif_tx_build_gops()
1019 ret = xenvif_count_requests(queue, &txreq, extra_count, in xenvif_tx_build_gops()
1028 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1030 xenvif_tx_err(queue, &txreq, extra_count, idx); in xenvif_tx_build_gops()
1036 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", in xenvif_tx_build_gops()
1038 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1042 index = pending_index(queue->pending_cons); in xenvif_tx_build_gops()
1043 pending_idx = queue->pending_ring[index]; in xenvif_tx_build_gops()
1050 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1052 xenvif_tx_err(queue, &txreq, extra_count, idx); in xenvif_tx_build_gops()
1070 xenvif_tx_err(queue, &txreq, extra_count, idx); in xenvif_tx_build_gops()
1072 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1082 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
1118 xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops, in xenvif_tx_build_gops()
1122 __skb_queue_tail(&queue->tx_queue, skb); in xenvif_tx_build_gops()
1124 queue->tx.req_cons = idx; in xenvif_tx_build_gops()
1126 if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || in xenvif_tx_build_gops()
1127 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) in xenvif_tx_build_gops()
1137 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_handle_frag_list() argument
1145 queue->stats.tx_zerocopy_sent += 2; in xenvif_handle_frag_list()
1146 queue->stats.tx_frag_overflow++; in xenvif_handle_frag_list()
1148 xenvif_fill_frags(queue, nskb); in xenvif_handle_frag_list()
1187 atomic_inc(&queue->inflight_packets); in xenvif_handle_frag_list()
1199 static int xenvif_tx_submit(struct xenvif_queue *queue) in xenvif_tx_submit() argument
1201 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; in xenvif_tx_submit()
1202 struct gnttab_copy *gop_copy = queue->tx_copy_ops; in xenvif_tx_submit()
1206 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { in xenvif_tx_submit()
1211 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_tx_submit()
1214 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { in xenvif_tx_submit()
1234 xenvif_fill_frags(queue, skb); in xenvif_tx_submit()
1238 xenvif_skb_zerocopy_prepare(queue, nskb); in xenvif_tx_submit()
1239 if (xenvif_handle_frag_list(queue, skb)) { in xenvif_tx_submit()
1241 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1243 xenvif_skb_zerocopy_prepare(queue, skb); in xenvif_tx_submit()
1252 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1256 if (checksum_setup(queue, skb)) { in xenvif_tx_submit()
1257 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1261 xenvif_skb_zerocopy_prepare(queue, skb); in xenvif_tx_submit()
1291 queue->stats.rx_bytes += skb->len; in xenvif_tx_submit()
1292 queue->stats.rx_packets++; in xenvif_tx_submit()
1302 xenvif_skb_zerocopy_prepare(queue, skb); in xenvif_tx_submit()
1303 queue->stats.tx_zerocopy_sent++; in xenvif_tx_submit()
1316 struct xenvif_queue *queue = ubuf_to_queue(ubuf); in xenvif_zerocopy_callback() local
1321 spin_lock_irqsave(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1325 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback()
1327 index = pending_index(queue->dealloc_prod); in xenvif_zerocopy_callback()
1328 queue->dealloc_ring[index] = pending_idx; in xenvif_zerocopy_callback()
1333 queue->dealloc_prod++; in xenvif_zerocopy_callback()
1335 spin_unlock_irqrestore(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1338 queue->stats.tx_zerocopy_success++; in xenvif_zerocopy_callback()
1340 queue->stats.tx_zerocopy_fail++; in xenvif_zerocopy_callback()
1341 xenvif_skb_zerocopy_complete(queue); in xenvif_zerocopy_callback()
1344 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) in xenvif_tx_dealloc_action() argument
1351 dc = queue->dealloc_cons; in xenvif_tx_dealloc_action()
1352 gop = queue->tx_unmap_ops; in xenvif_tx_dealloc_action()
1356 dp = queue->dealloc_prod; in xenvif_tx_dealloc_action()
1364 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); in xenvif_tx_dealloc_action()
1366 queue->dealloc_ring[pending_index(dc++)]; in xenvif_tx_dealloc_action()
1368 pending_idx_release[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1370 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1371 queue->mmap_pages[pending_idx]; in xenvif_tx_dealloc_action()
1373 idx_to_kaddr(queue, pending_idx), in xenvif_tx_dealloc_action()
1375 queue->grant_tx_handle[pending_idx]); in xenvif_tx_dealloc_action()
1376 xenvif_grant_handle_reset(queue, pending_idx); in xenvif_tx_dealloc_action()
1380 } while (dp != queue->dealloc_prod); in xenvif_tx_dealloc_action()
1382 queue->dealloc_cons = dc; in xenvif_tx_dealloc_action()
1384 if (gop - queue->tx_unmap_ops > 0) { in xenvif_tx_dealloc_action()
1386 ret = gnttab_unmap_refs(queue->tx_unmap_ops, in xenvif_tx_dealloc_action()
1388 queue->pages_to_unmap, in xenvif_tx_dealloc_action()
1389 gop - queue->tx_unmap_ops); in xenvif_tx_dealloc_action()
1391 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1392 gop - queue->tx_unmap_ops, ret); in xenvif_tx_dealloc_action()
1393 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { in xenvif_tx_dealloc_action()
1395 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1405 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) in xenvif_tx_dealloc_action()
1406 xenvif_idx_release(queue, pending_idx_release[i], in xenvif_tx_dealloc_action()
1412 int xenvif_tx_action(struct xenvif_queue *queue, int budget) in xenvif_tx_action() argument
1417 if (unlikely(!tx_work_todo(queue))) in xenvif_tx_action()
1420 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); in xenvif_tx_action()
1425 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); in xenvif_tx_action()
1427 ret = gnttab_map_refs(queue->tx_map_ops, in xenvif_tx_action()
1429 queue->pages_to_map, in xenvif_tx_action()
1434 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", in xenvif_tx_action()
1437 WARN_ON_ONCE(queue->tx_map_ops[i].status == in xenvif_tx_action()
1442 work_done = xenvif_tx_submit(queue); in xenvif_tx_action()
1447 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, in xenvif_idx_release() argument
1454 pending_tx_info = &queue->pending_tx_info[pending_idx]; in xenvif_idx_release()
1456 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_idx_release()
1458 make_tx_response(queue, &pending_tx_info->req, in xenvif_idx_release()
1465 index = pending_index(queue->pending_prod++); in xenvif_idx_release()
1466 queue->pending_ring[index] = pending_idx; in xenvif_idx_release()
1468 push_tx_responses(queue); in xenvif_idx_release()
1470 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_idx_release()
1474 static void make_tx_response(struct xenvif_queue *queue, in make_tx_response() argument
1479 RING_IDX i = queue->tx.rsp_prod_pvt; in make_tx_response()
1482 resp = RING_GET_RESPONSE(&queue->tx, i); in make_tx_response()
1487 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; in make_tx_response()
1489 queue->tx.rsp_prod_pvt = ++i; in make_tx_response()
1492 static void push_tx_responses(struct xenvif_queue *queue) in push_tx_responses() argument
1496 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); in push_tx_responses()
1498 notify_remote_via_irq(queue->tx_irq); in push_tx_responses()
1501 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) in xenvif_idx_unmap() argument
1507 idx_to_kaddr(queue, pending_idx), in xenvif_idx_unmap()
1509 queue->grant_tx_handle[pending_idx]); in xenvif_idx_unmap()
1510 xenvif_grant_handle_reset(queue, pending_idx); in xenvif_idx_unmap()
1513 &queue->mmap_pages[pending_idx], 1); in xenvif_idx_unmap()
1515 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1526 static inline int tx_work_todo(struct xenvif_queue *queue) in tx_work_todo() argument
1528 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) in tx_work_todo()
1534 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) in tx_dealloc_work_todo() argument
1536 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo()
1539 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue) in xenvif_unmap_frontend_data_rings() argument
1541 if (queue->tx.sring) in xenvif_unmap_frontend_data_rings()
1542 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1543 queue->tx.sring); in xenvif_unmap_frontend_data_rings()
1544 if (queue->rx.sring) in xenvif_unmap_frontend_data_rings()
1545 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1546 queue->rx.sring); in xenvif_unmap_frontend_data_rings()
1549 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, in xenvif_map_frontend_data_rings() argument
1559 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1568 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1571 if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) in xenvif_map_frontend_data_rings()
1574 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1583 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1586 if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) in xenvif_map_frontend_data_rings()
1592 xenvif_unmap_frontend_data_rings(queue); in xenvif_map_frontend_data_rings()
1596 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) in xenvif_dealloc_kthread_should_stop() argument
1602 !atomic_read(&queue->inflight_packets); in xenvif_dealloc_kthread_should_stop()
1607 struct xenvif_queue *queue = data; in xenvif_dealloc_kthread() local
1610 wait_event_interruptible(queue->dealloc_wq, in xenvif_dealloc_kthread()
1611 tx_dealloc_work_todo(queue) || in xenvif_dealloc_kthread()
1612 xenvif_dealloc_kthread_should_stop(queue)); in xenvif_dealloc_kthread()
1613 if (xenvif_dealloc_kthread_should_stop(queue)) in xenvif_dealloc_kthread()
1616 xenvif_tx_dealloc_action(queue); in xenvif_dealloc_kthread()
1621 if (tx_dealloc_work_todo(queue)) in xenvif_dealloc_kthread()
1622 xenvif_tx_dealloc_action(queue); in xenvif_dealloc_kthread()