Lines Matching +full:tx +full:- +full:slots
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
65 * because it isn't providing Rx slots.
77 * This is the maximum slots a skb can have. If a guest sends a skb
84 /* The amount to copy out of the first guest Tx slot into the skb's
100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
132 (vif->pending_tx_info[pending_idx].callback_struct)
138 u16 pending_idx = ubuf->desc; in ubuf_to_queue()
141 return container_of(temp - pending_idx, in ubuf_to_queue()
158 return i & (MAX_PENDING_REQS-1); in pending_index()
163 wake_up(&queue->wq); in xenvif_kick_thread()
170 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); in xenvif_napi_schedule_or_enable_events()
173 napi_schedule(&queue->napi); in xenvif_napi_schedule_or_enable_events()
175 &queue->eoi_pending) & in xenvif_napi_schedule_or_enable_events()
177 xen_irq_lateeoi(queue->tx_irq, 0); in xenvif_napi_schedule_or_enable_events()
188 max_burst = max(131072UL, queue->credit_bytes); in tx_add_credit()
191 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit()
192 if (max_credit < queue->remaining_credit) in tx_add_credit()
195 queue->remaining_credit = min(max_credit, max_burst); in tx_add_credit()
196 queue->rate_limited = false; in tx_add_credit()
210 RING_IDX cons = queue->tx.req_cons; in xenvif_tx_err()
214 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_tx_err()
217 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_tx_err()
220 RING_COPY_REQUEST(&queue->tx, cons++, txp); in xenvif_tx_err()
223 queue->tx.req_cons = cons; in xenvif_tx_err()
228 netdev_err(vif->dev, "fatal error; disabling device\n"); in xenvif_fatal_tx_err()
229 vif->disabled = true; in xenvif_fatal_tx_err()
231 if (vif->num_queues) in xenvif_fatal_tx_err()
232 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
241 RING_IDX cons = queue->tx.req_cons; in xenvif_count_requests()
242 int slots = 0; in xenvif_count_requests() local
246 if (!(first->flags & XEN_NETTXF_more_data)) in xenvif_count_requests()
252 if (slots >= work_to_do) { in xenvif_count_requests()
253 netdev_err(queue->vif->dev, in xenvif_count_requests()
254 "Asked for %d slots but exceeds this limit\n", in xenvif_count_requests()
256 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
257 return -ENODATA; in xenvif_count_requests()
260 /* This guest is really using too many slots and in xenvif_count_requests()
263 if (unlikely(slots >= fatal_skb_slots)) { in xenvif_count_requests()
264 netdev_err(queue->vif->dev, in xenvif_count_requests()
265 "Malicious frontend using %d slots, threshold %u\n", in xenvif_count_requests()
266 slots, fatal_skb_slots); in xenvif_count_requests()
267 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
268 return -E2BIG; in xenvif_count_requests()
275 * 18 slots but less than fatal_skb_slots slots is in xenvif_count_requests()
278 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { in xenvif_count_requests()
280 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
281 "Too many slots (%d) exceeding limit (%d), dropping packet\n", in xenvif_count_requests()
282 slots, XEN_NETBK_LEGACY_SLOTS_MAX); in xenvif_count_requests()
283 drop_err = -E2BIG; in xenvif_count_requests()
289 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); in xenvif_count_requests()
292 * first->size overflowed and following slots will in xenvif_count_requests()
298 * Consume all slots and drop the packet. in xenvif_count_requests()
300 if (!drop_err && txp->size > first->size) { in xenvif_count_requests()
302 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
303 "Invalid tx request, slot size %u > remaining size %u\n", in xenvif_count_requests()
304 txp->size, first->size); in xenvif_count_requests()
305 drop_err = -EIO; in xenvif_count_requests()
308 first->size -= txp->size; in xenvif_count_requests()
309 slots++; in xenvif_count_requests()
311 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { in xenvif_count_requests()
312 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
313 txp->offset, txp->size); in xenvif_count_requests()
314 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
315 return -EINVAL; in xenvif_count_requests()
318 more_data = txp->flags & XEN_NETTXF_more_data; in xenvif_count_requests()
326 xenvif_tx_err(queue, first, extra_count, cons + slots); in xenvif_count_requests()
330 return slots; in xenvif_count_requests()
340 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
341 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
342 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
350 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op()
353 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
355 memcpy(&queue->pending_tx_info[pending_idx].req, txp, in xenvif_tx_create_map_op()
357 queue->pending_tx_info[pending_idx].extra_count = extra_count; in xenvif_tx_create_map_op()
366 BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); in xenvif_alloc_skb()
374 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb()
391 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
395 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; in xenvif_get_requests()
396 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; in xenvif_get_requests()
399 nr_slots = shinfo->nr_frags + frag_overflow + 1; in xenvif_get_requests()
402 XENVIF_TX_CB(skb)->split_mask = 0; in xenvif_get_requests()
407 int amount = data_len > txp->size ? txp->size : data_len; in xenvif_get_requests()
410 cop->source.u.ref = txp->gref; in xenvif_get_requests()
411 cop->source.domid = queue->vif->domid; in xenvif_get_requests()
412 cop->source.offset = txp->offset; in xenvif_get_requests()
414 cop->dest.domid = DOMID_SELF; in xenvif_get_requests()
415 cop->dest.offset = (offset_in_page(skb->data + in xenvif_get_requests()
416 skb_headlen(skb) - in xenvif_get_requests()
418 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) in xenvif_get_requests()
419 - data_len); in xenvif_get_requests()
422 if (cop->dest.offset + amount > XEN_PAGE_SIZE) { in xenvif_get_requests()
423 amount = XEN_PAGE_SIZE - cop->dest.offset; in xenvif_get_requests()
424 XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); in xenvif_get_requests()
428 cop->len = amount; in xenvif_get_requests()
429 cop->flags = GNTCOPY_source_gref; in xenvif_get_requests()
431 index = pending_index(queue->pending_cons); in xenvif_get_requests()
432 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
439 data_len -= amount; in xenvif_get_requests()
441 if (amount == txp->size) { in xenvif_get_requests()
444 memcpy(&queue->pending_tx_info[pending_idx].req, in xenvif_get_requests()
446 queue->pending_tx_info[pending_idx].extra_count = in xenvif_get_requests()
453 queue->pending_cons++; in xenvif_get_requests()
454 nr_slots--; in xenvif_get_requests()
460 txp->offset += amount; in xenvif_get_requests()
461 txp->size -= amount; in xenvif_get_requests()
465 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; in xenvif_get_requests()
466 nr_slots--) { in xenvif_get_requests()
467 if (unlikely(!txp->size)) { in xenvif_get_requests()
470 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_get_requests()
473 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_get_requests()
478 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
479 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
482 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
483 ++shinfo->nr_frags; in xenvif_get_requests()
495 frags = shinfo->frags; in xenvif_get_requests()
497 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { in xenvif_get_requests()
498 if (unlikely(!txp->size)) { in xenvif_get_requests()
501 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_get_requests()
505 spin_unlock_irqrestore(&queue->response_lock, in xenvif_get_requests()
510 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
511 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
514 frag_set_pending_idx(&frags[shinfo->nr_frags], in xenvif_get_requests()
516 ++shinfo->nr_frags; in xenvif_get_requests()
520 if (shinfo->nr_frags) { in xenvif_get_requests()
521 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests()
528 * because enough slots were converted to copy ops above or some in xenvif_get_requests()
534 (*copy_ops) = cop - queue->tx_copy_ops; in xenvif_get_requests()
535 (*map_ops) = gop - queue->tx_map_ops; in xenvif_get_requests()
542 if (unlikely(queue->grant_tx_handle[pending_idx] != in xenvif_grant_handle_set()
544 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
549 queue->grant_tx_handle[pending_idx] = handle; in xenvif_grant_handle_set()
555 if (unlikely(queue->grant_tx_handle[pending_idx] == in xenvif_grant_handle_reset()
557 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
562 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; in xenvif_grant_handle_reset()
576 /* If this is non-NULL, we are currently checking the frag_list skb, and in xenvif_tx_check_gop()
580 int nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
582 frag_get_pending_idx(&shinfo->frags[0]) == in xenvif_tx_check_gop()
583 copy_pending_idx(skb, copy_count(skb) - 1); in xenvif_tx_check_gop()
592 newerr = (*gopp_copy)->status; in xenvif_tx_check_gop()
595 if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { in xenvif_tx_check_gop()
598 newerr = (*gopp_copy)->status; in xenvif_tx_check_gop()
602 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
608 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
610 (*gopp_copy)->status, in xenvif_tx_check_gop()
612 (*gopp_copy)->source.u.ref); in xenvif_tx_check_gop()
614 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
625 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); in xenvif_tx_check_gop()
628 newerr = gop_map->status; in xenvif_tx_check_gop()
633 gop_map->handle); in xenvif_tx_check_gop()
653 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
656 gop_map->status, in xenvif_tx_check_gop()
658 gop_map->ref); in xenvif_tx_check_gop()
668 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); in xenvif_tx_check_gop()
678 for (j = 0; j < first_shinfo->nr_frags; j++) { in xenvif_tx_check_gop()
679 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); in xenvif_tx_check_gop()
692 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); in xenvif_tx_check_gop()
693 nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
705 int nr_frags = shinfo->nr_frags; in xenvif_fill_frags()
710 skb_frag_t *frag = shinfo->frags + i; in xenvif_fill_frags()
719 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags()
728 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_fill_frags()
730 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); in xenvif_fill_frags()
731 skb->len += txp->size; in xenvif_fill_frags()
732 skb->data_len += txp->size; in xenvif_fill_frags()
733 skb->truesize += txp->size; in xenvif_fill_frags()
736 get_page(queue->mmap_pages[pending_idx]); in xenvif_fill_frags()
746 RING_IDX cons = queue->tx.req_cons; in xenvif_get_extras()
749 if (unlikely(work_to_do-- <= 0)) { in xenvif_get_extras()
750 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
751 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
752 return -EBADR; in xenvif_get_extras()
755 RING_COPY_REQUEST(&queue->tx, cons, &extra); in xenvif_get_extras()
757 queue->tx.req_cons = ++cons; in xenvif_get_extras()
762 netdev_err(queue->vif->dev, in xenvif_get_extras()
764 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
765 return -EINVAL; in xenvif_get_extras()
768 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); in xenvif_get_extras()
778 if (!gso->u.gso.size) { in xenvif_set_skb_gso()
779 netdev_err(vif->dev, "GSO size must not be zero.\n"); in xenvif_set_skb_gso()
781 return -EINVAL; in xenvif_set_skb_gso()
784 switch (gso->u.gso.type) { in xenvif_set_skb_gso()
786 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso()
789 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso()
792 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); in xenvif_set_skb_gso()
794 return -EINVAL; in xenvif_set_skb_gso()
797 skb_shinfo(skb)->gso_size = gso->u.gso.size; in xenvif_set_skb_gso()
812 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { in checksum_setup()
813 queue->stats.rx_gso_checksum_fixup++; in checksum_setup()
814 skb->ip_summed = CHECKSUM_PARTIAL; in checksum_setup()
818 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ in checksum_setup()
819 if (skb->ip_summed != CHECKSUM_PARTIAL) in checksum_setup()
828 u64 next_credit = queue->credit_window_start + in tx_credit_exceeded()
829 msecs_to_jiffies(queue->credit_usec / 1000); in tx_credit_exceeded()
832 if (timer_pending(&queue->credit_timeout)) { in tx_credit_exceeded()
833 queue->rate_limited = true; in tx_credit_exceeded()
839 queue->credit_window_start = now; in tx_credit_exceeded()
844 if (size > queue->remaining_credit) { in tx_credit_exceeded()
845 mod_timer(&queue->credit_timeout, in tx_credit_exceeded()
847 queue->credit_window_start = next_credit; in tx_credit_exceeded()
848 queue->rate_limited = true; in tx_credit_exceeded()
865 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { in xenvif_mcast_add()
867 netdev_err(vif->dev, in xenvif_mcast_add()
869 return -ENOSPC; in xenvif_mcast_add()
874 return -ENOMEM; in xenvif_mcast_add()
876 ether_addr_copy(mcast->addr, addr); in xenvif_mcast_add()
877 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); in xenvif_mcast_add()
878 vif->fe_mcast_count++; in xenvif_mcast_add()
887 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_del()
888 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_del()
889 --vif->fe_mcast_count; in xenvif_mcast_del()
890 list_del_rcu(&mcast->entry); in xenvif_mcast_del()
902 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_match()
903 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_match()
915 /* No need for locking or RCU here. NAPI poll and TX queue in xenvif_mcast_addr_list_free()
918 while (!list_empty(&vif->fe_mcast_addr)) { in xenvif_mcast_addr_list_free()
921 mcast = list_first_entry(&vif->fe_mcast_addr, in xenvif_mcast_addr_list_free()
924 --vif->fe_mcast_count; in xenvif_mcast_addr_list_free()
925 list_del(&mcast->entry); in xenvif_mcast_addr_list_free()
939 while (skb_queue_len(&queue->tx_queue) < budget) { in xenvif_tx_build_gops()
942 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; in xenvif_tx_build_gops()
950 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops()
952 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
955 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops()
957 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
961 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_tx_build_gops()
965 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
967 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
969 /* Credit-based scheduling. */ in xenvif_tx_build_gops()
970 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
974 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
976 work_to_do--; in xenvif_tx_build_gops()
977 queue->tx.req_cons = ++idx; in xenvif_tx_build_gops()
985 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
990 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { in xenvif_tx_build_gops()
993 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; in xenvif_tx_build_gops()
994 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1004 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { in xenvif_tx_build_gops()
1007 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; in xenvif_tx_build_gops()
1008 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1028 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1036 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", in xenvif_tx_build_gops()
1038 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1042 index = pending_index(queue->pending_cons); in xenvif_tx_build_gops()
1043 pending_idx = queue->pending_ring[index]; in xenvif_tx_build_gops()
1045 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) in xenvif_tx_build_gops()
1050 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1056 skb_shinfo(skb)->nr_frags = ret; in xenvif_tx_build_gops()
1057 /* At this point shinfo->nr_frags is in fact the number of in xenvif_tx_build_gops()
1058 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. in xenvif_tx_build_gops()
1062 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { in xenvif_tx_build_gops()
1063 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1065 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1068 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1072 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1078 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { in xenvif_tx_build_gops()
1080 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; in xenvif_tx_build_gops()
1082 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
1084 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1091 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) { in xenvif_tx_build_gops()
1095 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; in xenvif_tx_build_gops()
1097 switch (extra->u.hash.type) { in xenvif_tx_build_gops()
1114 *(u32 *)extra->u.hash.value, in xenvif_tx_build_gops()
1122 __skb_queue_tail(&queue->tx_queue, skb); in xenvif_tx_build_gops()
1124 queue->tx.req_cons = idx; in xenvif_tx_build_gops()
1126 if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || in xenvif_tx_build_gops()
1127 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) in xenvif_tx_build_gops()
1135 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1143 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_handle_frag_list()
1145 queue->stats.tx_zerocopy_sent += 2; in xenvif_handle_frag_list()
1146 queue->stats.tx_frag_overflow++; in xenvif_handle_frag_list()
1150 skb->truesize -= skb->data_len; in xenvif_handle_frag_list()
1151 skb->len += nskb->len; in xenvif_handle_frag_list()
1152 skb->data_len += nskb->len; in xenvif_handle_frag_list()
1155 for (i = 0; offset < skb->len; i++) { in xenvif_handle_frag_list()
1163 skb->truesize += skb->data_len; in xenvif_handle_frag_list()
1166 return -ENOMEM; in xenvif_handle_frag_list()
1169 if (offset + PAGE_SIZE < skb->len) in xenvif_handle_frag_list()
1172 len = skb->len - offset; in xenvif_handle_frag_list()
1183 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in xenvif_handle_frag_list()
1185 uarg = skb_shinfo(skb)->destructor_arg; in xenvif_handle_frag_list()
1187 atomic_inc(&queue->inflight_packets); in xenvif_handle_frag_list()
1188 uarg->callback(uarg, true); in xenvif_handle_frag_list()
1189 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_handle_frag_list()
1192 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); in xenvif_handle_frag_list()
1193 skb_shinfo(skb)->nr_frags = i; in xenvif_handle_frag_list()
1194 skb->truesize += i * PAGE_SIZE; in xenvif_handle_frag_list()
1201 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; in xenvif_tx_submit()
1202 struct gnttab_copy *gop_copy = queue->tx_copy_ops; in xenvif_tx_submit()
1206 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { in xenvif_tx_submit()
1211 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_tx_submit()
1219 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_submit()
1222 skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1223 skb_shinfo(nskb)->nr_frags = 0; in xenvif_tx_submit()
1229 if (txp->flags & XEN_NETTXF_csum_blank) in xenvif_tx_submit()
1230 skb->ip_summed = CHECKSUM_PARTIAL; in xenvif_tx_submit()
1231 else if (txp->flags & XEN_NETTXF_data_validated) in xenvif_tx_submit()
1232 skb->ip_summed = CHECKSUM_UNNECESSARY; in xenvif_tx_submit()
1237 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1241 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1247 /* Copied all the bits from the frag list -- free it. */ in xenvif_tx_submit()
1252 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1253 skb->protocol = eth_type_trans(skb, skb->dev); in xenvif_tx_submit()
1257 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1260 if (skb_shinfo(skb)->destructor_arg) in xenvif_tx_submit()
1282 mss = skb_shinfo(skb)->gso_size; in xenvif_tx_submit()
1283 hdrlen = skb_transport_header(skb) - in xenvif_tx_submit()
1287 skb_shinfo(skb)->gso_segs = in xenvif_tx_submit()
1288 DIV_ROUND_UP(skb->len - hdrlen, mss); in xenvif_tx_submit()
1291 queue->stats.rx_bytes += skb->len; in xenvif_tx_submit()
1292 queue->stats.rx_packets++; in xenvif_tx_submit()
1301 if (skb_shinfo(skb)->destructor_arg) { in xenvif_tx_submit()
1303 queue->stats.tx_zerocopy_sent++; in xenvif_tx_submit()
1321 spin_lock_irqsave(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1323 u16 pending_idx = ubuf->desc; in xenvif_zerocopy_callback()
1324 ubuf = (struct ubuf_info *) ubuf->ctx; in xenvif_zerocopy_callback()
1325 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback()
1327 index = pending_index(queue->dealloc_prod); in xenvif_zerocopy_callback()
1328 queue->dealloc_ring[index] = pending_idx; in xenvif_zerocopy_callback()
1333 queue->dealloc_prod++; in xenvif_zerocopy_callback()
1335 spin_unlock_irqrestore(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1338 queue->stats.tx_zerocopy_success++; in xenvif_zerocopy_callback()
1340 queue->stats.tx_zerocopy_fail++; in xenvif_zerocopy_callback()
1351 dc = queue->dealloc_cons; in xenvif_tx_dealloc_action()
1352 gop = queue->tx_unmap_ops; in xenvif_tx_dealloc_action()
1356 dp = queue->dealloc_prod; in xenvif_tx_dealloc_action()
1364 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); in xenvif_tx_dealloc_action()
1366 queue->dealloc_ring[pending_index(dc++)]; in xenvif_tx_dealloc_action()
1368 pending_idx_release[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1370 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1371 queue->mmap_pages[pending_idx]; in xenvif_tx_dealloc_action()
1375 queue->grant_tx_handle[pending_idx]); in xenvif_tx_dealloc_action()
1380 } while (dp != queue->dealloc_prod); in xenvif_tx_dealloc_action()
1382 queue->dealloc_cons = dc; in xenvif_tx_dealloc_action()
1384 if (gop - queue->tx_unmap_ops > 0) { in xenvif_tx_dealloc_action()
1386 ret = gnttab_unmap_refs(queue->tx_unmap_ops, in xenvif_tx_dealloc_action()
1388 queue->pages_to_unmap, in xenvif_tx_dealloc_action()
1389 gop - queue->tx_unmap_ops); in xenvif_tx_dealloc_action()
1391 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1392 gop - queue->tx_unmap_ops, ret); in xenvif_tx_dealloc_action()
1393 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { in xenvif_tx_dealloc_action()
1395 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1405 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) in xenvif_tx_dealloc_action()
1425 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); in xenvif_tx_action()
1427 ret = gnttab_map_refs(queue->tx_map_ops, in xenvif_tx_action()
1429 queue->pages_to_map, in xenvif_tx_action()
1434 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", in xenvif_tx_action()
1437 WARN_ON_ONCE(queue->tx_map_ops[i].status == in xenvif_tx_action()
1454 pending_tx_info = &queue->pending_tx_info[pending_idx]; in xenvif_idx_release()
1456 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_idx_release()
1458 make_tx_response(queue, &pending_tx_info->req, in xenvif_idx_release()
1459 pending_tx_info->extra_count, status); in xenvif_idx_release()
1461 /* Release the pending index before pusing the Tx response so in xenvif_idx_release()
1462 * its available before a new Tx request is pushed by the in xenvif_idx_release()
1465 index = pending_index(queue->pending_prod++); in xenvif_idx_release()
1466 queue->pending_ring[index] = pending_idx; in xenvif_idx_release()
1470 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_idx_release()
1479 RING_IDX i = queue->tx.rsp_prod_pvt; in make_tx_response()
1482 resp = RING_GET_RESPONSE(&queue->tx, i); in make_tx_response()
1483 resp->id = txp->id; in make_tx_response()
1484 resp->status = st; in make_tx_response()
1486 while (extra_count-- != 0) in make_tx_response()
1487 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; in make_tx_response()
1489 queue->tx.rsp_prod_pvt = ++i; in make_tx_response()
1496 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); in push_tx_responses()
1498 notify_remote_via_irq(queue->tx_irq); in push_tx_responses()
1509 queue->grant_tx_handle[pending_idx]); in xenvif_idx_unmap()
1513 &queue->mmap_pages[pending_idx], 1); in xenvif_idx_unmap()
1515 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1528 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) in tx_work_todo()
1536 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo()
1541 if (queue->tx.sring) in xenvif_unmap_frontend_data_rings()
1542 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1543 queue->tx.sring); in xenvif_unmap_frontend_data_rings()
1544 if (queue->rx.sring) in xenvif_unmap_frontend_data_rings()
1545 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1546 queue->rx.sring); in xenvif_unmap_frontend_data_rings()
1557 int err = -ENOMEM; in xenvif_map_frontend_data_rings()
1559 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1565 rsp_prod = READ_ONCE(txs->rsp_prod); in xenvif_map_frontend_data_rings()
1566 req_prod = READ_ONCE(txs->req_prod); in xenvif_map_frontend_data_rings()
1568 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1570 err = -EIO; in xenvif_map_frontend_data_rings()
1571 if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) in xenvif_map_frontend_data_rings()
1574 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1580 rsp_prod = READ_ONCE(rxs->rsp_prod); in xenvif_map_frontend_data_rings()
1581 req_prod = READ_ONCE(rxs->req_prod); in xenvif_map_frontend_data_rings()
1583 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1585 err = -EIO; in xenvif_map_frontend_data_rings()
1586 if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) in xenvif_map_frontend_data_rings()
1602 !atomic_read(&queue->inflight_packets); in xenvif_dealloc_kthread_should_stop()
1610 wait_event_interruptible(queue->dealloc_wq, in xenvif_dealloc_kthread()
1631 RING_IDX idx = vif->ctrl.rsp_prod_pvt; in make_ctrl_response()
1633 .id = req->id, in make_ctrl_response()
1634 .type = req->type, in make_ctrl_response()
1639 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; in make_ctrl_response()
1640 vif->ctrl.rsp_prod_pvt = ++idx; in make_ctrl_response()
1647 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); in push_ctrl_response()
1649 notify_remote_via_irq(vif->ctrl_irq); in push_ctrl_response()
1658 switch (req->type) { in process_ctrl_request()
1660 status = xenvif_set_hash_alg(vif, req->data[0]); in process_ctrl_request()
1668 status = xenvif_set_hash_flags(vif, req->data[0]); in process_ctrl_request()
1672 status = xenvif_set_hash_key(vif, req->data[0], in process_ctrl_request()
1673 req->data[1]); in process_ctrl_request()
1683 req->data[0]); in process_ctrl_request()
1687 status = xenvif_set_hash_mapping(vif, req->data[0], in process_ctrl_request()
1688 req->data[1], in process_ctrl_request()
1689 req->data[2]); in process_ctrl_request()
1705 req_prod = vif->ctrl.sring->req_prod; in xenvif_ctrl_action()
1706 req_cons = vif->ctrl.req_cons; in xenvif_ctrl_action()
1717 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); in xenvif_ctrl_action()
1723 vif->ctrl.req_cons = req_cons; in xenvif_ctrl_action()
1724 vif->ctrl.sring->req_event = req_cons + 1; in xenvif_ctrl_action()
1730 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) in xenvif_ctrl_work_todo()
1756 return -ENODEV; in netback_init()
1776 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); in netback_init()
1797 MODULE_ALIAS("xen-backend:vif");