• Home
  • Raw
  • Download

Lines Matching +full:charge +full:- +full:ctrl +full:- +full:value

2  * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
120 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
130 (vif->pending_tx_info[pending_idx].callback_struct)
136 u16 pending_idx = ubuf->desc; in ubuf_to_queue()
139 return container_of(temp - pending_idx, in ubuf_to_queue()
156 return i & (MAX_PENDING_REQS-1); in pending_index()
161 wake_up(&queue->wq); in xenvif_kick_thread()
168 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); in xenvif_napi_schedule_or_enable_events()
171 napi_schedule(&queue->napi); in xenvif_napi_schedule_or_enable_events()
173 &queue->eoi_pending) & in xenvif_napi_schedule_or_enable_events()
175 xen_irq_lateeoi(queue->tx_irq, 0); in xenvif_napi_schedule_or_enable_events()
186 max_burst = max(131072UL, queue->credit_bytes); in tx_add_credit()
189 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit()
190 if (max_credit < queue->remaining_credit) in tx_add_credit()
193 queue->remaining_credit = min(max_credit, max_burst); in tx_add_credit()
194 queue->rate_limited = false; in tx_add_credit()
208 RING_IDX cons = queue->tx.req_cons; in xenvif_tx_err()
212 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_tx_err()
215 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_tx_err()
218 RING_COPY_REQUEST(&queue->tx, cons++, txp); in xenvif_tx_err()
221 queue->tx.req_cons = cons; in xenvif_tx_err()
226 netdev_err(vif->dev, "fatal error; disabling device\n"); in xenvif_fatal_tx_err()
227 vif->disabled = true; in xenvif_fatal_tx_err()
229 if (vif->num_queues) in xenvif_fatal_tx_err()
230 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
239 RING_IDX cons = queue->tx.req_cons; in xenvif_count_requests()
244 if (!(first->flags & XEN_NETTXF_more_data)) in xenvif_count_requests()
251 netdev_err(queue->vif->dev, in xenvif_count_requests()
254 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
255 return -ENODATA; in xenvif_count_requests()
262 netdev_err(queue->vif->dev, in xenvif_count_requests()
265 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
266 return -E2BIG; in xenvif_count_requests()
271 * the historical MAX_SKB_FRAGS value 18 to honor the in xenvif_count_requests()
278 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
281 drop_err = -E2BIG; in xenvif_count_requests()
287 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); in xenvif_count_requests()
290 * first->size overflowed and following slots will in xenvif_count_requests()
298 if (!drop_err && txp->size > first->size) { in xenvif_count_requests()
300 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
302 txp->size, first->size); in xenvif_count_requests()
303 drop_err = -EIO; in xenvif_count_requests()
306 first->size -= txp->size; in xenvif_count_requests()
309 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { in xenvif_count_requests()
310 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
311 txp->offset, txp->size); in xenvif_count_requests()
312 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
313 return -EINVAL; in xenvif_count_requests()
316 more_data = txp->flags & XEN_NETTXF_more_data; in xenvif_count_requests()
337 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
338 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
339 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
347 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op()
350 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
352 memcpy(&queue->pending_tx_info[pending_idx].req, txp, in xenvif_tx_create_map_op()
354 queue->pending_tx_info[pending_idx].extra_count = extra_count; in xenvif_tx_create_map_op()
369 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb()
386 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
390 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; in xenvif_get_requests()
391 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; in xenvif_get_requests()
394 nr_slots = shinfo->nr_frags + 1; in xenvif_get_requests()
401 int amount = data_len > txp->size ? txp->size : data_len; in xenvif_get_requests()
403 cop->source.u.ref = txp->gref; in xenvif_get_requests()
404 cop->source.domid = queue->vif->domid; in xenvif_get_requests()
405 cop->source.offset = txp->offset; in xenvif_get_requests()
407 cop->dest.domid = DOMID_SELF; in xenvif_get_requests()
408 cop->dest.offset = (offset_in_page(skb->data + in xenvif_get_requests()
409 skb_headlen(skb) - in xenvif_get_requests()
411 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) in xenvif_get_requests()
412 - data_len); in xenvif_get_requests()
414 cop->len = amount; in xenvif_get_requests()
415 cop->flags = GNTCOPY_source_gref; in xenvif_get_requests()
417 index = pending_index(queue->pending_cons); in xenvif_get_requests()
418 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
424 data_len -= amount; in xenvif_get_requests()
426 if (amount == txp->size) { in xenvif_get_requests()
429 memcpy(&queue->pending_tx_info[pending_idx].req, in xenvif_get_requests()
431 queue->pending_tx_info[pending_idx].extra_count = in xenvif_get_requests()
438 queue->pending_cons++; in xenvif_get_requests()
439 nr_slots--; in xenvif_get_requests()
444 txp->offset += amount; in xenvif_get_requests()
445 txp->size -= amount; in xenvif_get_requests()
449 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; in xenvif_get_requests()
450 shinfo->nr_frags++, gop++) { in xenvif_get_requests()
451 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
452 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
455 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
466 frags = shinfo->frags; in xenvif_get_requests()
468 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; in xenvif_get_requests()
469 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests()
470 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
471 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
474 frag_set_pending_idx(&frags[shinfo->nr_frags], in xenvif_get_requests()
478 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests()
481 (*copy_ops) = cop - queue->tx_copy_ops; in xenvif_get_requests()
482 (*map_ops) = gop - queue->tx_map_ops; in xenvif_get_requests()
489 if (unlikely(queue->grant_tx_handle[pending_idx] != in xenvif_grant_handle_set()
491 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
496 queue->grant_tx_handle[pending_idx] = handle; in xenvif_grant_handle_set()
502 if (unlikely(queue->grant_tx_handle[pending_idx] == in xenvif_grant_handle_reset()
504 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
509 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; in xenvif_grant_handle_reset()
523 /* If this is non-NULL, we are currently checking the frag_list skb, and in xenvif_tx_check_gop()
527 int nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
529 frag_get_pending_idx(&shinfo->frags[0]) == in xenvif_tx_check_gop()
530 copy_pending_idx(skb, copy_count(skb) - 1); in xenvif_tx_check_gop()
539 newerr = (*gopp_copy)->status; in xenvif_tx_check_gop()
542 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
548 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
550 (*gopp_copy)->status, in xenvif_tx_check_gop()
552 (*gopp_copy)->source.u.ref); in xenvif_tx_check_gop()
554 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
565 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); in xenvif_tx_check_gop()
568 newerr = gop_map->status; in xenvif_tx_check_gop()
573 gop_map->handle); in xenvif_tx_check_gop()
593 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
596 gop_map->status, in xenvif_tx_check_gop()
598 gop_map->ref); in xenvif_tx_check_gop()
608 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); in xenvif_tx_check_gop()
618 for (j = 0; j < first_shinfo->nr_frags; j++) { in xenvif_tx_check_gop()
619 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); in xenvif_tx_check_gop()
632 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); in xenvif_tx_check_gop()
633 nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
645 int nr_frags = shinfo->nr_frags; in xenvif_fill_frags()
650 skb_frag_t *frag = shinfo->frags + i; in xenvif_fill_frags()
659 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags()
668 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_fill_frags()
670 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); in xenvif_fill_frags()
671 skb->len += txp->size; in xenvif_fill_frags()
672 skb->data_len += txp->size; in xenvif_fill_frags()
673 skb->truesize += txp->size; in xenvif_fill_frags()
676 get_page(queue->mmap_pages[pending_idx]); in xenvif_fill_frags()
686 RING_IDX cons = queue->tx.req_cons; in xenvif_get_extras()
689 if (unlikely(work_to_do-- <= 0)) { in xenvif_get_extras()
690 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
691 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
692 return -EBADR; in xenvif_get_extras()
695 RING_COPY_REQUEST(&queue->tx, cons, &extra); in xenvif_get_extras()
697 queue->tx.req_cons = ++cons; in xenvif_get_extras()
702 netdev_err(queue->vif->dev, in xenvif_get_extras()
704 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
705 return -EINVAL; in xenvif_get_extras()
708 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); in xenvif_get_extras()
718 if (!gso->u.gso.size) { in xenvif_set_skb_gso()
719 netdev_err(vif->dev, "GSO size must not be zero.\n"); in xenvif_set_skb_gso()
721 return -EINVAL; in xenvif_set_skb_gso()
724 switch (gso->u.gso.type) { in xenvif_set_skb_gso()
726 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso()
729 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso()
732 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); in xenvif_set_skb_gso()
734 return -EINVAL; in xenvif_set_skb_gso()
737 skb_shinfo(skb)->gso_size = gso->u.gso.size; in xenvif_set_skb_gso()
752 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { in checksum_setup()
753 queue->stats.rx_gso_checksum_fixup++; in checksum_setup()
754 skb->ip_summed = CHECKSUM_PARTIAL; in checksum_setup()
758 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ in checksum_setup()
759 if (skb->ip_summed != CHECKSUM_PARTIAL) in checksum_setup()
768 u64 next_credit = queue->credit_window_start + in tx_credit_exceeded()
769 msecs_to_jiffies(queue->credit_usec / 1000); in tx_credit_exceeded()
772 if (timer_pending(&queue->credit_timeout)) { in tx_credit_exceeded()
773 queue->rate_limited = true; in tx_credit_exceeded()
779 queue->credit_window_start = now; in tx_credit_exceeded()
784 if (size > queue->remaining_credit) { in tx_credit_exceeded()
785 mod_timer(&queue->credit_timeout, in tx_credit_exceeded()
787 queue->credit_window_start = next_credit; in tx_credit_exceeded()
788 queue->rate_limited = true; in tx_credit_exceeded()
805 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { in xenvif_mcast_add()
807 netdev_err(vif->dev, in xenvif_mcast_add()
809 return -ENOSPC; in xenvif_mcast_add()
814 return -ENOMEM; in xenvif_mcast_add()
816 ether_addr_copy(mcast->addr, addr); in xenvif_mcast_add()
817 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); in xenvif_mcast_add()
818 vif->fe_mcast_count++; in xenvif_mcast_add()
827 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_del()
828 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_del()
829 --vif->fe_mcast_count; in xenvif_mcast_del()
830 list_del_rcu(&mcast->entry); in xenvif_mcast_del()
842 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_match()
843 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_match()
858 while (!list_empty(&vif->fe_mcast_addr)) { in xenvif_mcast_addr_list_free()
861 mcast = list_first_entry(&vif->fe_mcast_addr, in xenvif_mcast_addr_list_free()
864 --vif->fe_mcast_count; in xenvif_mcast_addr_list_free()
865 list_del(&mcast->entry); in xenvif_mcast_addr_list_free()
879 while (skb_queue_len(&queue->tx_queue) < budget) { in xenvif_tx_build_gops()
882 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; in xenvif_tx_build_gops()
890 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops()
892 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
895 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops()
897 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
901 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_tx_build_gops()
905 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
907 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
909 /* Credit-based scheduling. */ in xenvif_tx_build_gops()
910 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
914 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
916 work_to_do--; in xenvif_tx_build_gops()
917 queue->tx.req_cons = ++idx; in xenvif_tx_build_gops()
925 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
930 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { in xenvif_tx_build_gops()
933 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; in xenvif_tx_build_gops()
934 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
944 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { in xenvif_tx_build_gops()
947 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; in xenvif_tx_build_gops()
948 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
968 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
976 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
980 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
984 index = pending_index(queue->pending_cons); in xenvif_tx_build_gops()
985 pending_idx = queue->pending_ring[index]; in xenvif_tx_build_gops()
987 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) in xenvif_tx_build_gops()
992 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
998 skb_shinfo(skb)->nr_frags = ret; in xenvif_tx_build_gops()
999 /* At this point shinfo->nr_frags is in fact the number of in xenvif_tx_build_gops()
1004 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { in xenvif_tx_build_gops()
1005 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1007 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1010 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1014 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1020 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { in xenvif_tx_build_gops()
1022 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; in xenvif_tx_build_gops()
1024 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
1026 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1033 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) { in xenvif_tx_build_gops()
1037 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; in xenvif_tx_build_gops()
1039 switch (extra->u.hash.type) { in xenvif_tx_build_gops()
1056 *(u32 *)extra->u.hash.value, in xenvif_tx_build_gops()
1064 __skb_queue_tail(&queue->tx_queue, skb); in xenvif_tx_build_gops()
1066 queue->tx.req_cons = idx; in xenvif_tx_build_gops()
1068 if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || in xenvif_tx_build_gops()
1069 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) in xenvif_tx_build_gops()
1077 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1085 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_handle_frag_list()
1087 queue->stats.tx_zerocopy_sent += 2; in xenvif_handle_frag_list()
1088 queue->stats.tx_frag_overflow++; in xenvif_handle_frag_list()
1092 skb->truesize -= skb->data_len; in xenvif_handle_frag_list()
1093 skb->len += nskb->len; in xenvif_handle_frag_list()
1094 skb->data_len += nskb->len; in xenvif_handle_frag_list()
1097 for (i = 0; offset < skb->len; i++) { in xenvif_handle_frag_list()
1105 skb->truesize += skb->data_len; in xenvif_handle_frag_list()
1108 return -ENOMEM; in xenvif_handle_frag_list()
1111 if (offset + PAGE_SIZE < skb->len) in xenvif_handle_frag_list()
1114 len = skb->len - offset; in xenvif_handle_frag_list()
1125 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in xenvif_handle_frag_list()
1127 uarg = skb_shinfo(skb)->destructor_arg; in xenvif_handle_frag_list()
1129 atomic_inc(&queue->inflight_packets); in xenvif_handle_frag_list()
1130 uarg->callback(uarg, true); in xenvif_handle_frag_list()
1131 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_handle_frag_list()
1134 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); in xenvif_handle_frag_list()
1135 skb_shinfo(skb)->nr_frags = i; in xenvif_handle_frag_list()
1136 skb->truesize += i * PAGE_SIZE; in xenvif_handle_frag_list()
1143 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; in xenvif_tx_submit()
1144 struct gnttab_copy *gop_copy = queue->tx_copy_ops; in xenvif_tx_submit()
1148 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { in xenvif_tx_submit()
1153 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_tx_submit()
1161 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_submit()
1164 skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1165 skb_shinfo(nskb)->nr_frags = 0; in xenvif_tx_submit()
1171 if (txp->flags & XEN_NETTXF_csum_blank) in xenvif_tx_submit()
1172 skb->ip_summed = CHECKSUM_PARTIAL; in xenvif_tx_submit()
1173 else if (txp->flags & XEN_NETTXF_data_validated) in xenvif_tx_submit()
1174 skb->ip_summed = CHECKSUM_UNNECESSARY; in xenvif_tx_submit()
1179 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1183 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1189 /* Copied all the bits from the frag list -- free it. */ in xenvif_tx_submit()
1194 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1195 skb->protocol = eth_type_trans(skb, skb->dev); in xenvif_tx_submit()
1199 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1202 if (skb_shinfo(skb)->destructor_arg) in xenvif_tx_submit()
1224 mss = skb_shinfo(skb)->gso_size; in xenvif_tx_submit()
1225 hdrlen = skb_transport_header(skb) - in xenvif_tx_submit()
1229 skb_shinfo(skb)->gso_segs = in xenvif_tx_submit()
1230 DIV_ROUND_UP(skb->len - hdrlen, mss); in xenvif_tx_submit()
1233 queue->stats.rx_bytes += skb->len; in xenvif_tx_submit()
1234 queue->stats.rx_packets++; in xenvif_tx_submit()
1243 if (skb_shinfo(skb)->destructor_arg) { in xenvif_tx_submit()
1245 queue->stats.tx_zerocopy_sent++; in xenvif_tx_submit()
1263 spin_lock_irqsave(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1265 u16 pending_idx = ubuf->desc; in xenvif_zerocopy_callback()
1266 ubuf = (struct ubuf_info *) ubuf->ctx; in xenvif_zerocopy_callback()
1267 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback()
1269 index = pending_index(queue->dealloc_prod); in xenvif_zerocopy_callback()
1270 queue->dealloc_ring[index] = pending_idx; in xenvif_zerocopy_callback()
1275 queue->dealloc_prod++; in xenvif_zerocopy_callback()
1277 spin_unlock_irqrestore(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1280 queue->stats.tx_zerocopy_success++; in xenvif_zerocopy_callback()
1282 queue->stats.tx_zerocopy_fail++; in xenvif_zerocopy_callback()
1293 dc = queue->dealloc_cons; in xenvif_tx_dealloc_action()
1294 gop = queue->tx_unmap_ops; in xenvif_tx_dealloc_action()
1298 dp = queue->dealloc_prod; in xenvif_tx_dealloc_action()
1306 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); in xenvif_tx_dealloc_action()
1308 queue->dealloc_ring[pending_index(dc++)]; in xenvif_tx_dealloc_action()
1310 pending_idx_release[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1312 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1313 queue->mmap_pages[pending_idx]; in xenvif_tx_dealloc_action()
1317 queue->grant_tx_handle[pending_idx]); in xenvif_tx_dealloc_action()
1322 } while (dp != queue->dealloc_prod); in xenvif_tx_dealloc_action()
1324 queue->dealloc_cons = dc; in xenvif_tx_dealloc_action()
1326 if (gop - queue->tx_unmap_ops > 0) { in xenvif_tx_dealloc_action()
1328 ret = gnttab_unmap_refs(queue->tx_unmap_ops, in xenvif_tx_dealloc_action()
1330 queue->pages_to_unmap, in xenvif_tx_dealloc_action()
1331 gop - queue->tx_unmap_ops); in xenvif_tx_dealloc_action()
1333 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1334 gop - queue->tx_unmap_ops, ret); in xenvif_tx_dealloc_action()
1335 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { in xenvif_tx_dealloc_action()
1337 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1347 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) in xenvif_tx_dealloc_action()
1367 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); in xenvif_tx_action()
1369 ret = gnttab_map_refs(queue->tx_map_ops, in xenvif_tx_action()
1371 queue->pages_to_map, in xenvif_tx_action()
1376 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", in xenvif_tx_action()
1379 WARN_ON_ONCE(queue->tx_map_ops[i].status == in xenvif_tx_action()
1396 pending_tx_info = &queue->pending_tx_info[pending_idx]; in xenvif_idx_release()
1398 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_idx_release()
1400 make_tx_response(queue, &pending_tx_info->req, in xenvif_idx_release()
1401 pending_tx_info->extra_count, status); in xenvif_idx_release()
1407 index = pending_index(queue->pending_prod++); in xenvif_idx_release()
1408 queue->pending_ring[index] = pending_idx; in xenvif_idx_release()
1412 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_idx_release()
1421 RING_IDX i = queue->tx.rsp_prod_pvt; in make_tx_response()
1424 resp = RING_GET_RESPONSE(&queue->tx, i); in make_tx_response()
1425 resp->id = txp->id; in make_tx_response()
1426 resp->status = st; in make_tx_response()
1428 while (extra_count-- != 0) in make_tx_response()
1429 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; in make_tx_response()
1431 queue->tx.rsp_prod_pvt = ++i; in make_tx_response()
1438 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); in push_tx_responses()
1440 notify_remote_via_irq(queue->tx_irq); in push_tx_responses()
1451 queue->grant_tx_handle[pending_idx]); in xenvif_idx_unmap()
1455 &queue->mmap_pages[pending_idx], 1); in xenvif_idx_unmap()
1457 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1470 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) in tx_work_todo()
1478 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo()
1483 if (queue->tx.sring) in xenvif_unmap_frontend_data_rings()
1484 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1485 queue->tx.sring); in xenvif_unmap_frontend_data_rings()
1486 if (queue->rx.sring) in xenvif_unmap_frontend_data_rings()
1487 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1488 queue->rx.sring); in xenvif_unmap_frontend_data_rings()
1499 int err = -ENOMEM; in xenvif_map_frontend_data_rings()
1501 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1507 rsp_prod = READ_ONCE(txs->rsp_prod); in xenvif_map_frontend_data_rings()
1508 req_prod = READ_ONCE(txs->req_prod); in xenvif_map_frontend_data_rings()
1510 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1512 err = -EIO; in xenvif_map_frontend_data_rings()
1513 if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) in xenvif_map_frontend_data_rings()
1516 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1522 rsp_prod = READ_ONCE(rxs->rsp_prod); in xenvif_map_frontend_data_rings()
1523 req_prod = READ_ONCE(rxs->req_prod); in xenvif_map_frontend_data_rings()
1525 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1527 err = -EIO; in xenvif_map_frontend_data_rings()
1528 if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) in xenvif_map_frontend_data_rings()
1544 !atomic_read(&queue->inflight_packets); in xenvif_dealloc_kthread_should_stop()
1552 wait_event_interruptible(queue->dealloc_wq, in xenvif_dealloc_kthread()
1573 RING_IDX idx = vif->ctrl.rsp_prod_pvt; in make_ctrl_response()
1575 .id = req->id, in make_ctrl_response()
1576 .type = req->type, in make_ctrl_response()
1581 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; in make_ctrl_response()
1582 vif->ctrl.rsp_prod_pvt = ++idx; in make_ctrl_response()
1589 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); in push_ctrl_response()
1591 notify_remote_via_irq(vif->ctrl_irq); in push_ctrl_response()
1600 switch (req->type) { in process_ctrl_request()
1602 status = xenvif_set_hash_alg(vif, req->data[0]); in process_ctrl_request()
1610 status = xenvif_set_hash_flags(vif, req->data[0]); in process_ctrl_request()
1614 status = xenvif_set_hash_key(vif, req->data[0], in process_ctrl_request()
1615 req->data[1]); in process_ctrl_request()
1625 req->data[0]); in process_ctrl_request()
1629 status = xenvif_set_hash_mapping(vif, req->data[0], in process_ctrl_request()
1630 req->data[1], in process_ctrl_request()
1631 req->data[2]); in process_ctrl_request()
1647 req_prod = vif->ctrl.sring->req_prod; in xenvif_ctrl_action()
1648 req_cons = vif->ctrl.req_cons; in xenvif_ctrl_action()
1659 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); in xenvif_ctrl_action()
1665 vif->ctrl.req_cons = req_cons; in xenvif_ctrl_action()
1666 vif->ctrl.sring->req_event = req_cons + 1; in xenvif_ctrl_action()
1672 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) in xenvif_ctrl_work_todo()
1698 return -ENODEV; in netback_init()
1701 * specified a value. in netback_init()
1718 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); in netback_init()
1739 MODULE_ALIAS("xen-backend:vif");