• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 //#define DEBUG
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/module.h>
23 #include <linux/virtio.h>
24 #include <linux/virtio_net.h>
25 #include <linux/scatterlist.h>
26 #include <linux/if_vlan.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/average.h>
30 #include <net/busy_poll.h>
31 
32 static int napi_weight = NAPI_POLL_WEIGHT;
33 module_param(napi_weight, int, 0444);
34 
35 static bool csum = true, gso = true;
36 module_param(csum, bool, 0444);
37 module_param(gso, bool, 0444);
38 
39 /* FIXME: MTU in config. */
40 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
41 #define GOOD_COPY_LEN	128
42 
43 /* RX packet size EWMA. The average packet size is used to determine the packet
44  * buffer size when refilling RX rings. As the entire RX ring may be refilled
45  * at once, the weight is chosen so that the EWMA will be insensitive to short-
46  * term, transient changes in packet size.
47  */
48 DECLARE_EWMA(pkt_len, 1, 64)
49 
50 /* With mergeable buffers we align buffer address and use the low bits to
51  * encode its true size. Buffer size is up to 1 page so we need to align to
52  * square root of page size to ensure we reserve enough bits to encode the true
53  * size.
54  */
55 #define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
56 
57 /* Minimum alignment for mergeable packet buffers. */
58 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
59 				   1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
60 
61 #define VIRTNET_DRIVER_VERSION "1.0.0"
62 
63 struct virtnet_stats {
64 	struct u64_stats_sync tx_syncp;
65 	struct u64_stats_sync rx_syncp;
66 	u64 tx_bytes;
67 	u64 tx_packets;
68 
69 	u64 rx_bytes;
70 	u64 rx_packets;
71 };
72 
73 /* Internal representation of a send virtqueue */
74 struct send_queue {
75 	/* Virtqueue associated with this send _queue */
76 	struct virtqueue *vq;
77 
78 	/* TX: fragments + linear part + virtio header */
79 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
80 
81 	/* Name of the send queue: output.$index */
82 	char name[40];
83 };
84 
85 /* Internal representation of a receive virtqueue */
86 struct receive_queue {
87 	/* Virtqueue associated with this receive_queue */
88 	struct virtqueue *vq;
89 
90 	struct napi_struct napi;
91 
92 	/* Chain pages by the private ptr. */
93 	struct page *pages;
94 
95 	/* Average packet length for mergeable receive buffers. */
96 	struct ewma_pkt_len mrg_avg_pkt_len;
97 
98 	/* Page frag for packet buffer allocation. */
99 	struct page_frag alloc_frag;
100 
101 	/* RX: fragments + linear part + virtio header */
102 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
103 
104 	/* Name of this receive queue: input.$index */
105 	char name[40];
106 };
107 
108 struct virtnet_info {
109 	struct virtio_device *vdev;
110 	struct virtqueue *cvq;
111 	struct net_device *dev;
112 	struct send_queue *sq;
113 	struct receive_queue *rq;
114 	unsigned int status;
115 
116 	/* Max # of queue pairs supported by the device */
117 	u16 max_queue_pairs;
118 
119 	/* # of queue pairs currently used by the driver */
120 	u16 curr_queue_pairs;
121 
122 	/* I like... big packets and I cannot lie! */
123 	bool big_packets;
124 
125 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
126 	bool mergeable_rx_bufs;
127 
128 	/* Has control virtqueue */
129 	bool has_cvq;
130 
131 	/* Host can handle any s/g split between our header and packet data */
132 	bool any_header_sg;
133 
134 	/* Packet virtio header size */
135 	u8 hdr_len;
136 
137 	/* Active statistics */
138 	struct virtnet_stats __percpu *stats;
139 
140 	/* Work struct for refilling if we run low on memory. */
141 	struct delayed_work refill;
142 
143 	/* Work struct for config space updates */
144 	struct work_struct config_work;
145 
146 	/* Does the affinity hint is set for virtqueues? */
147 	bool affinity_hint_set;
148 
149 	/* CPU hot plug notifier */
150 	struct notifier_block nb;
151 
152 	/* Control VQ buffers: protected by the rtnl lock */
153 	struct virtio_net_ctrl_hdr ctrl_hdr;
154 	virtio_net_ctrl_ack ctrl_status;
155 	u8 ctrl_promisc;
156 	u8 ctrl_allmulti;
157 };
158 
159 struct padded_vnet_hdr {
160 	struct virtio_net_hdr_mrg_rxbuf hdr;
161 	/*
162 	 * hdr is in a separate sg buffer, and data sg buffer shares same page
163 	 * with this header sg. This padding makes next sg 16 byte aligned
164 	 * after the header.
165 	 */
166 	char padding[4];
167 };
168 
169 /* Converting between virtqueue no. and kernel tx/rx queue no.
170  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
171  */
vq2txq(struct virtqueue * vq)172 static int vq2txq(struct virtqueue *vq)
173 {
174 	return (vq->index - 1) / 2;
175 }
176 
txq2vq(int txq)177 static int txq2vq(int txq)
178 {
179 	return txq * 2 + 1;
180 }
181 
vq2rxq(struct virtqueue * vq)182 static int vq2rxq(struct virtqueue *vq)
183 {
184 	return vq->index / 2;
185 }
186 
rxq2vq(int rxq)187 static int rxq2vq(int rxq)
188 {
189 	return rxq * 2;
190 }
191 
skb_vnet_hdr(struct sk_buff * skb)192 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
193 {
194 	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
195 }
196 
197 /*
198  * private is used to chain pages for big packets, put the whole
199  * most recent used list in the beginning for reuse
200  */
give_pages(struct receive_queue * rq,struct page * page)201 static void give_pages(struct receive_queue *rq, struct page *page)
202 {
203 	struct page *end;
204 
205 	/* Find end of list, sew whole thing into vi->rq.pages. */
206 	for (end = page; end->private; end = (struct page *)end->private);
207 	end->private = (unsigned long)rq->pages;
208 	rq->pages = page;
209 }
210 
get_a_page(struct receive_queue * rq,gfp_t gfp_mask)211 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
212 {
213 	struct page *p = rq->pages;
214 
215 	if (p) {
216 		rq->pages = (struct page *)p->private;
217 		/* clear private here, it is used to chain pages */
218 		p->private = 0;
219 	} else
220 		p = alloc_page(gfp_mask);
221 	return p;
222 }
223 
skb_xmit_done(struct virtqueue * vq)224 static void skb_xmit_done(struct virtqueue *vq)
225 {
226 	struct virtnet_info *vi = vq->vdev->priv;
227 
228 	/* Suppress further interrupts. */
229 	virtqueue_disable_cb(vq);
230 
231 	/* We were probably waiting for more output buffers. */
232 	netif_wake_subqueue(vi->dev, vq2txq(vq));
233 }
234 
mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)235 static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
236 {
237 	unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
238 	return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
239 }
240 
mergeable_ctx_to_buf_address(unsigned long mrg_ctx)241 static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
242 {
243 	return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
244 
245 }
246 
mergeable_buf_to_ctx(void * buf,unsigned int truesize)247 static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
248 {
249 	unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
250 	return (unsigned long)buf | (size - 1);
251 }
252 
253 /* Called from bottom half context */
page_to_skb(struct virtnet_info * vi,struct receive_queue * rq,struct page * page,unsigned int offset,unsigned int len,unsigned int truesize)254 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
255 				   struct receive_queue *rq,
256 				   struct page *page, unsigned int offset,
257 				   unsigned int len, unsigned int truesize)
258 {
259 	struct sk_buff *skb;
260 	struct virtio_net_hdr_mrg_rxbuf *hdr;
261 	unsigned int copy, hdr_len, hdr_padded_len;
262 	char *p;
263 
264 	p = page_address(page) + offset;
265 
266 	/* copy small packet so we can reuse these pages for small data */
267 	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
268 	if (unlikely(!skb))
269 		return NULL;
270 
271 	hdr = skb_vnet_hdr(skb);
272 
273 	hdr_len = vi->hdr_len;
274 	if (vi->mergeable_rx_bufs)
275 		hdr_padded_len = sizeof *hdr;
276 	else
277 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
278 
279 	memcpy(hdr, p, hdr_len);
280 
281 	len -= hdr_len;
282 	offset += hdr_padded_len;
283 	p += hdr_padded_len;
284 
285 	copy = len;
286 	if (copy > skb_tailroom(skb))
287 		copy = skb_tailroom(skb);
288 	memcpy(skb_put(skb, copy), p, copy);
289 
290 	len -= copy;
291 	offset += copy;
292 
293 	if (vi->mergeable_rx_bufs) {
294 		if (len)
295 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
296 		else
297 			put_page(page);
298 		return skb;
299 	}
300 
301 	/*
302 	 * Verify that we can indeed put this data into a skb.
303 	 * This is here to handle cases when the device erroneously
304 	 * tries to receive more than is possible. This is usually
305 	 * the case of a broken device.
306 	 */
307 	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
308 		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
309 		dev_kfree_skb(skb);
310 		return NULL;
311 	}
312 	BUG_ON(offset >= PAGE_SIZE);
313 	while (len) {
314 		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
315 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
316 				frag_size, truesize);
317 		len -= frag_size;
318 		page = (struct page *)page->private;
319 		offset = 0;
320 	}
321 
322 	if (page)
323 		give_pages(rq, page);
324 
325 	return skb;
326 }
327 
receive_small(struct virtnet_info * vi,void * buf,unsigned int len)328 static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
329 {
330 	struct sk_buff * skb = buf;
331 
332 	len -= vi->hdr_len;
333 	skb_trim(skb, len);
334 
335 	return skb;
336 }
337 
receive_big(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len)338 static struct sk_buff *receive_big(struct net_device *dev,
339 				   struct virtnet_info *vi,
340 				   struct receive_queue *rq,
341 				   void *buf,
342 				   unsigned int len)
343 {
344 	struct page *page = buf;
345 	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
346 
347 	if (unlikely(!skb))
348 		goto err;
349 
350 	return skb;
351 
352 err:
353 	dev->stats.rx_dropped++;
354 	give_pages(rq, page);
355 	return NULL;
356 }
357 
receive_mergeable(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,unsigned long ctx,unsigned int len)358 static struct sk_buff *receive_mergeable(struct net_device *dev,
359 					 struct virtnet_info *vi,
360 					 struct receive_queue *rq,
361 					 unsigned long ctx,
362 					 unsigned int len)
363 {
364 	void *buf = mergeable_ctx_to_buf_address(ctx);
365 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
366 	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
367 	struct page *page = virt_to_head_page(buf);
368 	int offset = buf - page_address(page);
369 	unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
370 
371 	struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
372 					       truesize);
373 	struct sk_buff *curr_skb = head_skb;
374 
375 	if (unlikely(!curr_skb))
376 		goto err_skb;
377 	while (--num_buf) {
378 		int num_skb_frags;
379 
380 		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
381 		if (unlikely(!ctx)) {
382 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
383 				 dev->name, num_buf,
384 				 virtio16_to_cpu(vi->vdev,
385 						 hdr->num_buffers));
386 			dev->stats.rx_length_errors++;
387 			goto err_buf;
388 		}
389 
390 		buf = mergeable_ctx_to_buf_address(ctx);
391 		page = virt_to_head_page(buf);
392 
393 		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
394 		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
395 			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
396 
397 			if (unlikely(!nskb))
398 				goto err_skb;
399 			if (curr_skb == head_skb)
400 				skb_shinfo(curr_skb)->frag_list = nskb;
401 			else
402 				curr_skb->next = nskb;
403 			curr_skb = nskb;
404 			head_skb->truesize += nskb->truesize;
405 			num_skb_frags = 0;
406 		}
407 		truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
408 		if (curr_skb != head_skb) {
409 			head_skb->data_len += len;
410 			head_skb->len += len;
411 			head_skb->truesize += truesize;
412 		}
413 		offset = buf - page_address(page);
414 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
415 			put_page(page);
416 			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
417 					     len, truesize);
418 		} else {
419 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
420 					offset, len, truesize);
421 		}
422 	}
423 
424 	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
425 	return head_skb;
426 
427 err_skb:
428 	put_page(page);
429 	while (--num_buf) {
430 		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
431 		if (unlikely(!ctx)) {
432 			pr_debug("%s: rx error: %d buffers missing\n",
433 				 dev->name, num_buf);
434 			dev->stats.rx_length_errors++;
435 			break;
436 		}
437 		page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
438 		put_page(page);
439 	}
440 err_buf:
441 	dev->stats.rx_dropped++;
442 	dev_kfree_skb(head_skb);
443 	return NULL;
444 }
445 
receive_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len)446 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
447 			void *buf, unsigned int len)
448 {
449 	struct net_device *dev = vi->dev;
450 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
451 	struct sk_buff *skb;
452 	struct virtio_net_hdr_mrg_rxbuf *hdr;
453 
454 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
455 		pr_debug("%s: short packet %i\n", dev->name, len);
456 		dev->stats.rx_length_errors++;
457 		if (vi->mergeable_rx_bufs) {
458 			unsigned long ctx = (unsigned long)buf;
459 			void *base = mergeable_ctx_to_buf_address(ctx);
460 			put_page(virt_to_head_page(base));
461 		} else if (vi->big_packets) {
462 			give_pages(rq, buf);
463 		} else {
464 			dev_kfree_skb(buf);
465 		}
466 		return;
467 	}
468 
469 	if (vi->mergeable_rx_bufs)
470 		skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
471 	else if (vi->big_packets)
472 		skb = receive_big(dev, vi, rq, buf, len);
473 	else
474 		skb = receive_small(vi, buf, len);
475 
476 	if (unlikely(!skb))
477 		return;
478 
479 	hdr = skb_vnet_hdr(skb);
480 
481 	u64_stats_update_begin(&stats->rx_syncp);
482 	stats->rx_bytes += skb->len;
483 	stats->rx_packets++;
484 	u64_stats_update_end(&stats->rx_syncp);
485 
486 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
487 		pr_debug("Needs csum!\n");
488 		if (!skb_partial_csum_set(skb,
489 			  virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start),
490 			  virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset)))
491 			goto frame_err;
492 	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
493 		skb->ip_summed = CHECKSUM_UNNECESSARY;
494 	}
495 
496 	skb->protocol = eth_type_trans(skb, dev);
497 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
498 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
499 
500 	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
501 		pr_debug("GSO!\n");
502 		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
503 		case VIRTIO_NET_HDR_GSO_TCPV4:
504 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
505 			break;
506 		case VIRTIO_NET_HDR_GSO_UDP:
507 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
508 			break;
509 		case VIRTIO_NET_HDR_GSO_TCPV6:
510 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
511 			break;
512 		default:
513 			net_warn_ratelimited("%s: bad gso type %u.\n",
514 					     dev->name, hdr->hdr.gso_type);
515 			goto frame_err;
516 		}
517 
518 		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
519 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
520 
521 		skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev,
522 							    hdr->hdr.gso_size);
523 		if (skb_shinfo(skb)->gso_size == 0) {
524 			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
525 			goto frame_err;
526 		}
527 
528 		/* Header must be checked, and gso_segs computed. */
529 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
530 		skb_shinfo(skb)->gso_segs = 0;
531 	}
532 
533 	skb_mark_napi_id(skb, &rq->napi);
534 
535 	napi_gro_receive(&rq->napi, skb);
536 	return;
537 
538 frame_err:
539 	dev->stats.rx_frame_errors++;
540 	dev_kfree_skb(skb);
541 }
542 
add_recvbuf_small(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)543 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
544 			     gfp_t gfp)
545 {
546 	struct sk_buff *skb;
547 	struct virtio_net_hdr_mrg_rxbuf *hdr;
548 	int err;
549 
550 	skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
551 	if (unlikely(!skb))
552 		return -ENOMEM;
553 
554 	skb_put(skb, GOOD_PACKET_LEN);
555 
556 	hdr = skb_vnet_hdr(skb);
557 	sg_init_table(rq->sg, 2);
558 	sg_set_buf(rq->sg, hdr, vi->hdr_len);
559 
560 	err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
561 	if (unlikely(err < 0)) {
562 		dev_kfree_skb(skb);
563 		return err;
564 	}
565 
566 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
567 	if (err < 0)
568 		dev_kfree_skb(skb);
569 
570 	return err;
571 }
572 
add_recvbuf_big(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)573 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
574 			   gfp_t gfp)
575 {
576 	struct page *first, *list = NULL;
577 	char *p;
578 	int i, err, offset;
579 
580 	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
581 
582 	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
583 	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
584 		first = get_a_page(rq, gfp);
585 		if (!first) {
586 			if (list)
587 				give_pages(rq, list);
588 			return -ENOMEM;
589 		}
590 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
591 
592 		/* chain new page in list head to match sg */
593 		first->private = (unsigned long)list;
594 		list = first;
595 	}
596 
597 	first = get_a_page(rq, gfp);
598 	if (!first) {
599 		give_pages(rq, list);
600 		return -ENOMEM;
601 	}
602 	p = page_address(first);
603 
604 	/* rq->sg[0], rq->sg[1] share the same page */
605 	/* a separated rq->sg[0] for header - required in case !any_header_sg */
606 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
607 
608 	/* rq->sg[1] for data packet, from offset */
609 	offset = sizeof(struct padded_vnet_hdr);
610 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
611 
612 	/* chain first in list head */
613 	first->private = (unsigned long)list;
614 	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
615 				  first, gfp);
616 	if (err < 0)
617 		give_pages(rq, first);
618 
619 	return err;
620 }
621 
get_mergeable_buf_len(struct ewma_pkt_len * avg_pkt_len)622 static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
623 {
624 	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
625 	unsigned int len;
626 
627 	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
628 			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
629 	return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
630 }
631 
add_recvbuf_mergeable(struct receive_queue * rq,gfp_t gfp)632 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
633 {
634 	struct page_frag *alloc_frag = &rq->alloc_frag;
635 	char *buf;
636 	unsigned long ctx;
637 	int err;
638 	unsigned int len, hole;
639 
640 	len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
641 	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
642 		return -ENOMEM;
643 
644 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
645 	ctx = mergeable_buf_to_ctx(buf, len);
646 	get_page(alloc_frag->page);
647 	alloc_frag->offset += len;
648 	hole = alloc_frag->size - alloc_frag->offset;
649 	if (hole < len) {
650 		/* To avoid internal fragmentation, if there is very likely not
651 		 * enough space for another buffer, add the remaining space to
652 		 * the current buffer. This extra space is not included in
653 		 * the truesize stored in ctx.
654 		 */
655 		len += hole;
656 		alloc_frag->offset += hole;
657 	}
658 
659 	sg_init_one(rq->sg, buf, len);
660 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
661 	if (err < 0)
662 		put_page(virt_to_head_page(buf));
663 
664 	return err;
665 }
666 
667 /*
668  * Returns false if we couldn't fill entirely (OOM).
669  *
670  * Normally run in the receive path, but can also be run from ndo_open
671  * before we're receiving packets, or from refill_work which is
672  * careful to disable receiving (using napi_disable).
673  */
try_fill_recv(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)674 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
675 			  gfp_t gfp)
676 {
677 	int err;
678 	bool oom;
679 
680 	gfp |= __GFP_COLD;
681 	do {
682 		if (vi->mergeable_rx_bufs)
683 			err = add_recvbuf_mergeable(rq, gfp);
684 		else if (vi->big_packets)
685 			err = add_recvbuf_big(vi, rq, gfp);
686 		else
687 			err = add_recvbuf_small(vi, rq, gfp);
688 
689 		oom = err == -ENOMEM;
690 		if (err)
691 			break;
692 	} while (rq->vq->num_free);
693 	virtqueue_kick(rq->vq);
694 	return !oom;
695 }
696 
skb_recv_done(struct virtqueue * rvq)697 static void skb_recv_done(struct virtqueue *rvq)
698 {
699 	struct virtnet_info *vi = rvq->vdev->priv;
700 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
701 
702 	/* Schedule NAPI, Suppress further interrupts if successful. */
703 	if (napi_schedule_prep(&rq->napi)) {
704 		virtqueue_disable_cb(rvq);
705 		__napi_schedule(&rq->napi);
706 	}
707 }
708 
virtnet_napi_enable(struct receive_queue * rq)709 static void virtnet_napi_enable(struct receive_queue *rq)
710 {
711 	napi_enable(&rq->napi);
712 
713 	/* If all buffers were filled by other side before we napi_enabled, we
714 	 * won't get another interrupt, so process any outstanding packets
715 	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
716 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
717 	if (napi_schedule_prep(&rq->napi)) {
718 		virtqueue_disable_cb(rq->vq);
719 		local_bh_disable();
720 		__napi_schedule(&rq->napi);
721 		local_bh_enable();
722 	}
723 }
724 
refill_work(struct work_struct * work)725 static void refill_work(struct work_struct *work)
726 {
727 	struct virtnet_info *vi =
728 		container_of(work, struct virtnet_info, refill.work);
729 	bool still_empty;
730 	int i;
731 
732 	for (i = 0; i < vi->curr_queue_pairs; i++) {
733 		struct receive_queue *rq = &vi->rq[i];
734 
735 		napi_disable(&rq->napi);
736 		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
737 		virtnet_napi_enable(rq);
738 
739 		/* In theory, this can happen: if we don't get any buffers in
740 		 * we will *never* try to fill again.
741 		 */
742 		if (still_empty)
743 			schedule_delayed_work(&vi->refill, HZ/2);
744 	}
745 }
746 
virtnet_receive(struct receive_queue * rq,int budget)747 static int virtnet_receive(struct receive_queue *rq, int budget)
748 {
749 	struct virtnet_info *vi = rq->vq->vdev->priv;
750 	unsigned int len, received = 0;
751 	void *buf;
752 
753 	while (received < budget &&
754 	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
755 		receive_buf(vi, rq, buf, len);
756 		received++;
757 	}
758 
759 	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
760 		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
761 			schedule_delayed_work(&vi->refill, 0);
762 	}
763 
764 	return received;
765 }
766 
virtnet_poll(struct napi_struct * napi,int budget)767 static int virtnet_poll(struct napi_struct *napi, int budget)
768 {
769 	struct receive_queue *rq =
770 		container_of(napi, struct receive_queue, napi);
771 	unsigned int r, received;
772 
773 	received = virtnet_receive(rq, budget);
774 
775 	/* Out of packets? */
776 	if (received < budget) {
777 		r = virtqueue_enable_cb_prepare(rq->vq);
778 		napi_complete_done(napi, received);
779 		if (unlikely(virtqueue_poll(rq->vq, r)) &&
780 		    napi_schedule_prep(napi)) {
781 			virtqueue_disable_cb(rq->vq);
782 			__napi_schedule(napi);
783 		}
784 	}
785 
786 	return received;
787 }
788 
789 #ifdef CONFIG_NET_RX_BUSY_POLL
790 /* must be called with local_bh_disable()d */
virtnet_busy_poll(struct napi_struct * napi)791 static int virtnet_busy_poll(struct napi_struct *napi)
792 {
793 	struct receive_queue *rq =
794 		container_of(napi, struct receive_queue, napi);
795 	struct virtnet_info *vi = rq->vq->vdev->priv;
796 	int r, received = 0, budget = 4;
797 
798 	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
799 		return LL_FLUSH_FAILED;
800 
801 	if (!napi_schedule_prep(napi))
802 		return LL_FLUSH_BUSY;
803 
804 	virtqueue_disable_cb(rq->vq);
805 
806 again:
807 	received += virtnet_receive(rq, budget);
808 
809 	r = virtqueue_enable_cb_prepare(rq->vq);
810 	clear_bit(NAPI_STATE_SCHED, &napi->state);
811 	if (unlikely(virtqueue_poll(rq->vq, r)) &&
812 	    napi_schedule_prep(napi)) {
813 		virtqueue_disable_cb(rq->vq);
814 		if (received < budget) {
815 			budget -= received;
816 			goto again;
817 		} else {
818 			__napi_schedule(napi);
819 		}
820 	}
821 
822 	return received;
823 }
824 #endif	/* CONFIG_NET_RX_BUSY_POLL */
825 
virtnet_open(struct net_device * dev)826 static int virtnet_open(struct net_device *dev)
827 {
828 	struct virtnet_info *vi = netdev_priv(dev);
829 	int i;
830 
831 	for (i = 0; i < vi->max_queue_pairs; i++) {
832 		if (i < vi->curr_queue_pairs)
833 			/* Make sure we have some buffers: if oom use wq. */
834 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
835 				schedule_delayed_work(&vi->refill, 0);
836 		virtnet_napi_enable(&vi->rq[i]);
837 	}
838 
839 	return 0;
840 }
841 
free_old_xmit_skbs(struct send_queue * sq)842 static void free_old_xmit_skbs(struct send_queue *sq)
843 {
844 	struct sk_buff *skb;
845 	unsigned int len;
846 	struct virtnet_info *vi = sq->vq->vdev->priv;
847 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
848 
849 	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
850 		pr_debug("Sent skb %p\n", skb);
851 
852 		u64_stats_update_begin(&stats->tx_syncp);
853 		stats->tx_bytes += skb->len;
854 		stats->tx_packets++;
855 		u64_stats_update_end(&stats->tx_syncp);
856 
857 		dev_kfree_skb_any(skb);
858 	}
859 }
860 
xmit_skb(struct send_queue * sq,struct sk_buff * skb)861 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
862 {
863 	struct virtio_net_hdr_mrg_rxbuf *hdr;
864 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
865 	struct virtnet_info *vi = sq->vq->vdev->priv;
866 	int num_sg;
867 	unsigned hdr_len = vi->hdr_len;
868 	bool can_push;
869 
870 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
871 
872 	can_push = vi->any_header_sg &&
873 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
874 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
875 	/* Even if we can, don't push here yet as this would skew
876 	 * csum_start offset below. */
877 	if (can_push)
878 		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
879 	else
880 		hdr = skb_vnet_hdr(skb);
881 
882 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
883 		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
884 		hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev,
885 						skb_checksum_start_offset(skb));
886 		hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev,
887 							 skb->csum_offset);
888 	} else {
889 		hdr->hdr.flags = 0;
890 		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
891 	}
892 
893 	if (skb_is_gso(skb)) {
894 		hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb));
895 		hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev,
896 						    skb_shinfo(skb)->gso_size);
897 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
898 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
899 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
900 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
901 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
902 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
903 		else
904 			BUG();
905 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
906 			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
907 	} else {
908 		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
909 		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
910 	}
911 
912 	if (vi->mergeable_rx_bufs)
913 		hdr->num_buffers = 0;
914 
915 	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
916 	if (can_push) {
917 		__skb_push(skb, hdr_len);
918 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
919 		if (unlikely(num_sg < 0))
920 			return num_sg;
921 		/* Pull header back to avoid skew in tx bytes calculations. */
922 		__skb_pull(skb, hdr_len);
923 	} else {
924 		sg_set_buf(sq->sg, hdr, hdr_len);
925 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
926 		if (unlikely(num_sg < 0))
927 			return num_sg;
928 		num_sg++;
929 	}
930 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
931 }
932 
start_xmit(struct sk_buff * skb,struct net_device * dev)933 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
934 {
935 	struct virtnet_info *vi = netdev_priv(dev);
936 	int qnum = skb_get_queue_mapping(skb);
937 	struct send_queue *sq = &vi->sq[qnum];
938 	int err;
939 	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
940 	bool kick = !skb->xmit_more;
941 
942 	/* Free up any pending old buffers before queueing new ones. */
943 	free_old_xmit_skbs(sq);
944 
945 	/* timestamp packet in software */
946 	skb_tx_timestamp(skb);
947 
948 	/* Try to transmit */
949 	err = xmit_skb(sq, skb);
950 
951 	/* This should not happen! */
952 	if (unlikely(err)) {
953 		dev->stats.tx_fifo_errors++;
954 		if (net_ratelimit())
955 			dev_warn(&dev->dev,
956 				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
957 		dev->stats.tx_dropped++;
958 		dev_kfree_skb_any(skb);
959 		return NETDEV_TX_OK;
960 	}
961 
962 	/* Don't wait up for transmitted skbs to be freed. */
963 	skb_orphan(skb);
964 	nf_reset(skb);
965 
966 	/* If running out of space, stop queue to avoid getting packets that we
967 	 * are then unable to transmit.
968 	 * An alternative would be to force queuing layer to requeue the skb by
969 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
970 	 * returned in a normal path of operation: it means that driver is not
971 	 * maintaining the TX queue stop/start state properly, and causes
972 	 * the stack to do a non-trivial amount of useless work.
973 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
974 	 * early means 16 slots are typically wasted.
975 	 */
976 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
977 		netif_stop_subqueue(dev, qnum);
978 		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
979 			/* More just got used, free them then recheck. */
980 			free_old_xmit_skbs(sq);
981 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
982 				netif_start_subqueue(dev, qnum);
983 				virtqueue_disable_cb(sq->vq);
984 			}
985 		}
986 	}
987 
988 	if (kick || netif_xmit_stopped(txq))
989 		virtqueue_kick(sq->vq);
990 
991 	return NETDEV_TX_OK;
992 }
993 
994 /*
995  * Send command via the control virtqueue and check status.  Commands
996  * supported by the hypervisor, as indicated by feature bits, should
997  * never fail unless improperly formatted.
998  */
virtnet_send_command(struct virtnet_info * vi,u8 class,u8 cmd,struct scatterlist * out)999 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1000 				 struct scatterlist *out)
1001 {
1002 	struct scatterlist *sgs[4], hdr, stat;
1003 	unsigned out_num = 0, tmp;
1004 
1005 	/* Caller should know better */
1006 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1007 
1008 	vi->ctrl_status = ~0;
1009 	vi->ctrl_hdr.class = class;
1010 	vi->ctrl_hdr.cmd = cmd;
1011 	/* Add header */
1012 	sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
1013 	sgs[out_num++] = &hdr;
1014 
1015 	if (out)
1016 		sgs[out_num++] = out;
1017 
1018 	/* Add return status. */
1019 	sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
1020 	sgs[out_num] = &stat;
1021 
1022 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1023 	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1024 
1025 	if (unlikely(!virtqueue_kick(vi->cvq)))
1026 		return vi->ctrl_status == VIRTIO_NET_OK;
1027 
1028 	/* Spin for a response, the kick causes an ioport write, trapping
1029 	 * into the hypervisor, so the request should be handled immediately.
1030 	 */
1031 	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1032 	       !virtqueue_is_broken(vi->cvq))
1033 		cpu_relax();
1034 
1035 	return vi->ctrl_status == VIRTIO_NET_OK;
1036 }
1037 
virtnet_set_mac_address(struct net_device * dev,void * p)1038 static int virtnet_set_mac_address(struct net_device *dev, void *p)
1039 {
1040 	struct virtnet_info *vi = netdev_priv(dev);
1041 	struct virtio_device *vdev = vi->vdev;
1042 	int ret;
1043 	struct sockaddr *addr = p;
1044 	struct scatterlist sg;
1045 
1046 	ret = eth_prepare_mac_addr_change(dev, p);
1047 	if (ret)
1048 		return ret;
1049 
1050 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1051 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
1052 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1053 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1054 			dev_warn(&vdev->dev,
1055 				 "Failed to set mac address by vq command.\n");
1056 			return -EINVAL;
1057 		}
1058 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1059 		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1060 		unsigned int i;
1061 
1062 		/* Naturally, this has an atomicity problem. */
1063 		for (i = 0; i < dev->addr_len; i++)
1064 			virtio_cwrite8(vdev,
1065 				       offsetof(struct virtio_net_config, mac) +
1066 				       i, addr->sa_data[i]);
1067 	}
1068 
1069 	eth_commit_mac_addr_change(dev, p);
1070 
1071 	return 0;
1072 }
1073 
virtnet_stats(struct net_device * dev,struct rtnl_link_stats64 * tot)1074 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
1075 					       struct rtnl_link_stats64 *tot)
1076 {
1077 	struct virtnet_info *vi = netdev_priv(dev);
1078 	int cpu;
1079 	unsigned int start;
1080 
1081 	for_each_possible_cpu(cpu) {
1082 		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
1083 		u64 tpackets, tbytes, rpackets, rbytes;
1084 
1085 		do {
1086 			start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
1087 			tpackets = stats->tx_packets;
1088 			tbytes   = stats->tx_bytes;
1089 		} while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
1090 
1091 		do {
1092 			start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
1093 			rpackets = stats->rx_packets;
1094 			rbytes   = stats->rx_bytes;
1095 		} while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
1096 
1097 		tot->rx_packets += rpackets;
1098 		tot->tx_packets += tpackets;
1099 		tot->rx_bytes   += rbytes;
1100 		tot->tx_bytes   += tbytes;
1101 	}
1102 
1103 	tot->tx_dropped = dev->stats.tx_dropped;
1104 	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1105 	tot->rx_dropped = dev->stats.rx_dropped;
1106 	tot->rx_length_errors = dev->stats.rx_length_errors;
1107 	tot->rx_frame_errors = dev->stats.rx_frame_errors;
1108 
1109 	return tot;
1110 }
1111 
1112 #ifdef CONFIG_NET_POLL_CONTROLLER
virtnet_netpoll(struct net_device * dev)1113 static void virtnet_netpoll(struct net_device *dev)
1114 {
1115 	struct virtnet_info *vi = netdev_priv(dev);
1116 	int i;
1117 
1118 	for (i = 0; i < vi->curr_queue_pairs; i++)
1119 		napi_schedule(&vi->rq[i].napi);
1120 }
1121 #endif
1122 
virtnet_ack_link_announce(struct virtnet_info * vi)1123 static void virtnet_ack_link_announce(struct virtnet_info *vi)
1124 {
1125 	rtnl_lock();
1126 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1127 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1128 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1129 	rtnl_unlock();
1130 }
1131 
virtnet_set_queues(struct virtnet_info * vi,u16 queue_pairs)1132 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1133 {
1134 	struct scatterlist sg;
1135 	struct virtio_net_ctrl_mq s;
1136 	struct net_device *dev = vi->dev;
1137 
1138 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1139 		return 0;
1140 
1141 	s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1142 	sg_init_one(&sg, &s, sizeof(s));
1143 
1144 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1145 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1146 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1147 			 queue_pairs);
1148 		return -EINVAL;
1149 	} else {
1150 		vi->curr_queue_pairs = queue_pairs;
1151 		/* virtnet_open() will refill when device is going to up. */
1152 		if (dev->flags & IFF_UP)
1153 			schedule_delayed_work(&vi->refill, 0);
1154 	}
1155 
1156 	return 0;
1157 }
1158 
virtnet_close(struct net_device * dev)1159 static int virtnet_close(struct net_device *dev)
1160 {
1161 	struct virtnet_info *vi = netdev_priv(dev);
1162 	int i;
1163 
1164 	/* Make sure refill_work doesn't re-enable napi! */
1165 	cancel_delayed_work_sync(&vi->refill);
1166 
1167 	for (i = 0; i < vi->max_queue_pairs; i++)
1168 		napi_disable(&vi->rq[i].napi);
1169 
1170 	return 0;
1171 }
1172 
virtnet_set_rx_mode(struct net_device * dev)1173 static void virtnet_set_rx_mode(struct net_device *dev)
1174 {
1175 	struct virtnet_info *vi = netdev_priv(dev);
1176 	struct scatterlist sg[2];
1177 	struct virtio_net_ctrl_mac *mac_data;
1178 	struct netdev_hw_addr *ha;
1179 	int uc_count;
1180 	int mc_count;
1181 	void *buf;
1182 	int i;
1183 
1184 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1185 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1186 		return;
1187 
1188 	vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
1189 	vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1190 
1191 	sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
1192 
1193 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1194 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
1195 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1196 			 vi->ctrl_promisc ? "en" : "dis");
1197 
1198 	sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
1199 
1200 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1201 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1202 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1203 			 vi->ctrl_allmulti ? "en" : "dis");
1204 
1205 	uc_count = netdev_uc_count(dev);
1206 	mc_count = netdev_mc_count(dev);
1207 	/* MAC filter - use one buffer for both lists */
1208 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1209 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1210 	mac_data = buf;
1211 	if (!buf)
1212 		return;
1213 
1214 	sg_init_table(sg, 2);
1215 
1216 	/* Store the unicast list and count in the front of the buffer */
1217 	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1218 	i = 0;
1219 	netdev_for_each_uc_addr(ha, dev)
1220 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1221 
1222 	sg_set_buf(&sg[0], mac_data,
1223 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1224 
1225 	/* multicast list and count fill the end */
1226 	mac_data = (void *)&mac_data->macs[uc_count][0];
1227 
1228 	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1229 	i = 0;
1230 	netdev_for_each_mc_addr(ha, dev)
1231 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1232 
1233 	sg_set_buf(&sg[1], mac_data,
1234 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1235 
1236 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1237 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1238 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1239 
1240 	kfree(buf);
1241 }
1242 
virtnet_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1243 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1244 				   __be16 proto, u16 vid)
1245 {
1246 	struct virtnet_info *vi = netdev_priv(dev);
1247 	struct scatterlist sg;
1248 
1249 	sg_init_one(&sg, &vid, sizeof(vid));
1250 
1251 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1252 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1253 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1254 	return 0;
1255 }
1256 
virtnet_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1257 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1258 				    __be16 proto, u16 vid)
1259 {
1260 	struct virtnet_info *vi = netdev_priv(dev);
1261 	struct scatterlist sg;
1262 
1263 	sg_init_one(&sg, &vid, sizeof(vid));
1264 
1265 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1266 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1267 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1268 	return 0;
1269 }
1270 
virtnet_clean_affinity(struct virtnet_info * vi,long hcpu)1271 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1272 {
1273 	int i;
1274 
1275 	if (vi->affinity_hint_set) {
1276 		for (i = 0; i < vi->max_queue_pairs; i++) {
1277 			virtqueue_set_affinity(vi->rq[i].vq, -1);
1278 			virtqueue_set_affinity(vi->sq[i].vq, -1);
1279 		}
1280 
1281 		vi->affinity_hint_set = false;
1282 	}
1283 }
1284 
virtnet_set_affinity(struct virtnet_info * vi)1285 static void virtnet_set_affinity(struct virtnet_info *vi)
1286 {
1287 	int i;
1288 	int cpu;
1289 
1290 	/* In multiqueue mode, when the number of cpu is equal to the number of
1291 	 * queue pairs, we let the queue pairs to be private to one cpu by
1292 	 * setting the affinity hint to eliminate the contention.
1293 	 */
1294 	if (vi->curr_queue_pairs == 1 ||
1295 	    vi->max_queue_pairs != num_online_cpus()) {
1296 		virtnet_clean_affinity(vi, -1);
1297 		return;
1298 	}
1299 
1300 	i = 0;
1301 	for_each_online_cpu(cpu) {
1302 		virtqueue_set_affinity(vi->rq[i].vq, cpu);
1303 		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1304 		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1305 		i++;
1306 	}
1307 
1308 	vi->affinity_hint_set = true;
1309 }
1310 
virtnet_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)1311 static int virtnet_cpu_callback(struct notifier_block *nfb,
1312 			        unsigned long action, void *hcpu)
1313 {
1314 	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1315 
1316 	switch(action & ~CPU_TASKS_FROZEN) {
1317 	case CPU_ONLINE:
1318 	case CPU_DOWN_FAILED:
1319 	case CPU_DEAD:
1320 		virtnet_set_affinity(vi);
1321 		break;
1322 	case CPU_DOWN_PREPARE:
1323 		virtnet_clean_affinity(vi, (long)hcpu);
1324 		break;
1325 	default:
1326 		break;
1327 	}
1328 
1329 	return NOTIFY_OK;
1330 }
1331 
virtnet_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring)1332 static void virtnet_get_ringparam(struct net_device *dev,
1333 				struct ethtool_ringparam *ring)
1334 {
1335 	struct virtnet_info *vi = netdev_priv(dev);
1336 
1337 	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1338 	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1339 	ring->rx_pending = ring->rx_max_pending;
1340 	ring->tx_pending = ring->tx_max_pending;
1341 }
1342 
1343 
virtnet_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1344 static void virtnet_get_drvinfo(struct net_device *dev,
1345 				struct ethtool_drvinfo *info)
1346 {
1347 	struct virtnet_info *vi = netdev_priv(dev);
1348 	struct virtio_device *vdev = vi->vdev;
1349 
1350 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1351 	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1352 	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1353 
1354 }
1355 
1356 /* TODO: Eliminate OOO packets during switching */
virtnet_set_channels(struct net_device * dev,struct ethtool_channels * channels)1357 static int virtnet_set_channels(struct net_device *dev,
1358 				struct ethtool_channels *channels)
1359 {
1360 	struct virtnet_info *vi = netdev_priv(dev);
1361 	u16 queue_pairs = channels->combined_count;
1362 	int err;
1363 
1364 	/* We don't support separate rx/tx channels.
1365 	 * We don't allow setting 'other' channels.
1366 	 */
1367 	if (channels->rx_count || channels->tx_count || channels->other_count)
1368 		return -EINVAL;
1369 
1370 	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1371 		return -EINVAL;
1372 
1373 	get_online_cpus();
1374 	err = virtnet_set_queues(vi, queue_pairs);
1375 	if (err) {
1376 		put_online_cpus();
1377 		goto err;
1378 	}
1379 	virtnet_set_affinity(vi);
1380 	put_online_cpus();
1381 
1382 	netif_set_real_num_tx_queues(dev, queue_pairs);
1383 	netif_set_real_num_rx_queues(dev, queue_pairs);
1384 err:
1385 	return err;
1386 }
1387 
virtnet_get_channels(struct net_device * dev,struct ethtool_channels * channels)1388 static void virtnet_get_channels(struct net_device *dev,
1389 				 struct ethtool_channels *channels)
1390 {
1391 	struct virtnet_info *vi = netdev_priv(dev);
1392 
1393 	channels->combined_count = vi->curr_queue_pairs;
1394 	channels->max_combined = vi->max_queue_pairs;
1395 	channels->max_other = 0;
1396 	channels->rx_count = 0;
1397 	channels->tx_count = 0;
1398 	channels->other_count = 0;
1399 }
1400 
1401 static const struct ethtool_ops virtnet_ethtool_ops = {
1402 	.get_drvinfo = virtnet_get_drvinfo,
1403 	.get_link = ethtool_op_get_link,
1404 	.get_ringparam = virtnet_get_ringparam,
1405 	.set_channels = virtnet_set_channels,
1406 	.get_channels = virtnet_get_channels,
1407 	.get_ts_info = ethtool_op_get_ts_info,
1408 };
1409 
1410 #define MIN_MTU 68
1411 #define MAX_MTU 65535
1412 
virtnet_change_mtu(struct net_device * dev,int new_mtu)1413 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1414 {
1415 	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
1416 		return -EINVAL;
1417 	dev->mtu = new_mtu;
1418 	return 0;
1419 }
1420 
1421 static const struct net_device_ops virtnet_netdev = {
1422 	.ndo_open            = virtnet_open,
1423 	.ndo_stop   	     = virtnet_close,
1424 	.ndo_start_xmit      = start_xmit,
1425 	.ndo_validate_addr   = eth_validate_addr,
1426 	.ndo_set_mac_address = virtnet_set_mac_address,
1427 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1428 	.ndo_change_mtu	     = virtnet_change_mtu,
1429 	.ndo_get_stats64     = virtnet_stats,
1430 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1431 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1432 #ifdef CONFIG_NET_POLL_CONTROLLER
1433 	.ndo_poll_controller = virtnet_netpoll,
1434 #endif
1435 #ifdef CONFIG_NET_RX_BUSY_POLL
1436 	.ndo_busy_poll		= virtnet_busy_poll,
1437 #endif
1438 	.ndo_features_check	= passthru_features_check,
1439 };
1440 
virtnet_config_changed_work(struct work_struct * work)1441 static void virtnet_config_changed_work(struct work_struct *work)
1442 {
1443 	struct virtnet_info *vi =
1444 		container_of(work, struct virtnet_info, config_work);
1445 	u16 v;
1446 
1447 	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
1448 				 struct virtio_net_config, status, &v) < 0)
1449 		return;
1450 
1451 	if (v & VIRTIO_NET_S_ANNOUNCE) {
1452 		netdev_notify_peers(vi->dev);
1453 		virtnet_ack_link_announce(vi);
1454 	}
1455 
1456 	/* Ignore unknown (future) status bits */
1457 	v &= VIRTIO_NET_S_LINK_UP;
1458 
1459 	if (vi->status == v)
1460 		return;
1461 
1462 	vi->status = v;
1463 
1464 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
1465 		netif_carrier_on(vi->dev);
1466 		netif_tx_wake_all_queues(vi->dev);
1467 	} else {
1468 		netif_carrier_off(vi->dev);
1469 		netif_tx_stop_all_queues(vi->dev);
1470 	}
1471 }
1472 
virtnet_config_changed(struct virtio_device * vdev)1473 static void virtnet_config_changed(struct virtio_device *vdev)
1474 {
1475 	struct virtnet_info *vi = vdev->priv;
1476 
1477 	schedule_work(&vi->config_work);
1478 }
1479 
virtnet_free_queues(struct virtnet_info * vi)1480 static void virtnet_free_queues(struct virtnet_info *vi)
1481 {
1482 	int i;
1483 
1484 	for (i = 0; i < vi->max_queue_pairs; i++) {
1485 		napi_hash_del(&vi->rq[i].napi);
1486 		netif_napi_del(&vi->rq[i].napi);
1487 	}
1488 
1489 	/* We called napi_hash_del() before netif_napi_del(),
1490 	 * we need to respect an RCU grace period before freeing vi->rq
1491 	 */
1492 	synchronize_net();
1493 
1494 	kfree(vi->rq);
1495 	kfree(vi->sq);
1496 }
1497 
free_receive_bufs(struct virtnet_info * vi)1498 static void free_receive_bufs(struct virtnet_info *vi)
1499 {
1500 	int i;
1501 
1502 	for (i = 0; i < vi->max_queue_pairs; i++) {
1503 		while (vi->rq[i].pages)
1504 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1505 	}
1506 }
1507 
free_receive_page_frags(struct virtnet_info * vi)1508 static void free_receive_page_frags(struct virtnet_info *vi)
1509 {
1510 	int i;
1511 	for (i = 0; i < vi->max_queue_pairs; i++)
1512 		if (vi->rq[i].alloc_frag.page)
1513 			put_page(vi->rq[i].alloc_frag.page);
1514 }
1515 
free_unused_bufs(struct virtnet_info * vi)1516 static void free_unused_bufs(struct virtnet_info *vi)
1517 {
1518 	void *buf;
1519 	int i;
1520 
1521 	for (i = 0; i < vi->max_queue_pairs; i++) {
1522 		struct virtqueue *vq = vi->sq[i].vq;
1523 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
1524 			dev_kfree_skb(buf);
1525 	}
1526 
1527 	for (i = 0; i < vi->max_queue_pairs; i++) {
1528 		struct virtqueue *vq = vi->rq[i].vq;
1529 
1530 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1531 			if (vi->mergeable_rx_bufs) {
1532 				unsigned long ctx = (unsigned long)buf;
1533 				void *base = mergeable_ctx_to_buf_address(ctx);
1534 				put_page(virt_to_head_page(base));
1535 			} else if (vi->big_packets) {
1536 				give_pages(&vi->rq[i], buf);
1537 			} else {
1538 				dev_kfree_skb(buf);
1539 			}
1540 		}
1541 	}
1542 }
1543 
virtnet_del_vqs(struct virtnet_info * vi)1544 static void virtnet_del_vqs(struct virtnet_info *vi)
1545 {
1546 	struct virtio_device *vdev = vi->vdev;
1547 
1548 	virtnet_clean_affinity(vi, -1);
1549 
1550 	vdev->config->del_vqs(vdev);
1551 
1552 	virtnet_free_queues(vi);
1553 }
1554 
virtnet_find_vqs(struct virtnet_info * vi)1555 static int virtnet_find_vqs(struct virtnet_info *vi)
1556 {
1557 	vq_callback_t **callbacks;
1558 	struct virtqueue **vqs;
1559 	int ret = -ENOMEM;
1560 	int i, total_vqs;
1561 	const char **names;
1562 
1563 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1564 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1565 	 * possible control vq.
1566 	 */
1567 	total_vqs = vi->max_queue_pairs * 2 +
1568 		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1569 
1570 	/* Allocate space for find_vqs parameters */
1571 	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1572 	if (!vqs)
1573 		goto err_vq;
1574 	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1575 	if (!callbacks)
1576 		goto err_callback;
1577 	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1578 	if (!names)
1579 		goto err_names;
1580 
1581 	/* Parameters for control virtqueue, if any */
1582 	if (vi->has_cvq) {
1583 		callbacks[total_vqs - 1] = NULL;
1584 		names[total_vqs - 1] = "control";
1585 	}
1586 
1587 	/* Allocate/initialize parameters for send/receive virtqueues */
1588 	for (i = 0; i < vi->max_queue_pairs; i++) {
1589 		callbacks[rxq2vq(i)] = skb_recv_done;
1590 		callbacks[txq2vq(i)] = skb_xmit_done;
1591 		sprintf(vi->rq[i].name, "input.%d", i);
1592 		sprintf(vi->sq[i].name, "output.%d", i);
1593 		names[rxq2vq(i)] = vi->rq[i].name;
1594 		names[txq2vq(i)] = vi->sq[i].name;
1595 	}
1596 
1597 	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
1598 					 names);
1599 	if (ret)
1600 		goto err_find;
1601 
1602 	if (vi->has_cvq) {
1603 		vi->cvq = vqs[total_vqs - 1];
1604 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1605 			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1606 	}
1607 
1608 	for (i = 0; i < vi->max_queue_pairs; i++) {
1609 		vi->rq[i].vq = vqs[rxq2vq(i)];
1610 		vi->sq[i].vq = vqs[txq2vq(i)];
1611 	}
1612 
1613 	kfree(names);
1614 	kfree(callbacks);
1615 	kfree(vqs);
1616 
1617 	return 0;
1618 
1619 err_find:
1620 	kfree(names);
1621 err_names:
1622 	kfree(callbacks);
1623 err_callback:
1624 	kfree(vqs);
1625 err_vq:
1626 	return ret;
1627 }
1628 
virtnet_alloc_queues(struct virtnet_info * vi)1629 static int virtnet_alloc_queues(struct virtnet_info *vi)
1630 {
1631 	int i;
1632 
1633 	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
1634 	if (!vi->sq)
1635 		goto err_sq;
1636 	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1637 	if (!vi->rq)
1638 		goto err_rq;
1639 
1640 	INIT_DELAYED_WORK(&vi->refill, refill_work);
1641 	for (i = 0; i < vi->max_queue_pairs; i++) {
1642 		vi->rq[i].pages = NULL;
1643 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1644 			       napi_weight);
1645 		napi_hash_add(&vi->rq[i].napi);
1646 
1647 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1648 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
1649 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
1650 	}
1651 
1652 	return 0;
1653 
1654 err_rq:
1655 	kfree(vi->sq);
1656 err_sq:
1657 	return -ENOMEM;
1658 }
1659 
init_vqs(struct virtnet_info * vi)1660 static int init_vqs(struct virtnet_info *vi)
1661 {
1662 	int ret;
1663 
1664 	/* Allocate send & receive queues */
1665 	ret = virtnet_alloc_queues(vi);
1666 	if (ret)
1667 		goto err;
1668 
1669 	ret = virtnet_find_vqs(vi);
1670 	if (ret)
1671 		goto err_free;
1672 
1673 	get_online_cpus();
1674 	virtnet_set_affinity(vi);
1675 	put_online_cpus();
1676 
1677 	return 0;
1678 
1679 err_free:
1680 	virtnet_free_queues(vi);
1681 err:
1682 	return ret;
1683 }
1684 
1685 #ifdef CONFIG_SYSFS
mergeable_rx_buffer_size_show(struct netdev_rx_queue * queue,struct rx_queue_attribute * attribute,char * buf)1686 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
1687 		struct rx_queue_attribute *attribute, char *buf)
1688 {
1689 	struct virtnet_info *vi = netdev_priv(queue->dev);
1690 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
1691 	struct ewma_pkt_len *avg;
1692 
1693 	BUG_ON(queue_index >= vi->max_queue_pairs);
1694 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
1695 	return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
1696 }
1697 
1698 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
1699 	__ATTR_RO(mergeable_rx_buffer_size);
1700 
1701 static struct attribute *virtio_net_mrg_rx_attrs[] = {
1702 	&mergeable_rx_buffer_size_attribute.attr,
1703 	NULL
1704 };
1705 
1706 static const struct attribute_group virtio_net_mrg_rx_group = {
1707 	.name = "virtio_net",
1708 	.attrs = virtio_net_mrg_rx_attrs
1709 };
1710 #endif
1711 
virtnet_fail_on_feature(struct virtio_device * vdev,unsigned int fbit,const char * fname,const char * dname)1712 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
1713 				    unsigned int fbit,
1714 				    const char *fname, const char *dname)
1715 {
1716 	if (!virtio_has_feature(vdev, fbit))
1717 		return false;
1718 
1719 	dev_err(&vdev->dev, "device advertises feature %s but not %s",
1720 		fname, dname);
1721 
1722 	return true;
1723 }
1724 
1725 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
1726 	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
1727 
virtnet_validate_features(struct virtio_device * vdev)1728 static bool virtnet_validate_features(struct virtio_device *vdev)
1729 {
1730 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
1731 	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
1732 			     "VIRTIO_NET_F_CTRL_VQ") ||
1733 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
1734 			     "VIRTIO_NET_F_CTRL_VQ") ||
1735 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
1736 			     "VIRTIO_NET_F_CTRL_VQ") ||
1737 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
1738 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
1739 			     "VIRTIO_NET_F_CTRL_VQ"))) {
1740 		return false;
1741 	}
1742 
1743 	return true;
1744 }
1745 
virtnet_probe(struct virtio_device * vdev)1746 static int virtnet_probe(struct virtio_device *vdev)
1747 {
1748 	int i, err;
1749 	struct net_device *dev;
1750 	struct virtnet_info *vi;
1751 	u16 max_queue_pairs;
1752 
1753 	if (!vdev->config->get) {
1754 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
1755 			__func__);
1756 		return -EINVAL;
1757 	}
1758 
1759 	if (!virtnet_validate_features(vdev))
1760 		return -EINVAL;
1761 
1762 	/* Find if host supports multiqueue virtio_net device */
1763 	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
1764 				   struct virtio_net_config,
1765 				   max_virtqueue_pairs, &max_queue_pairs);
1766 
1767 	/* We need at least 2 queue's */
1768 	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1769 	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1770 	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1771 		max_queue_pairs = 1;
1772 
1773 	/* Allocate ourselves a network device with room for our info */
1774 	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
1775 	if (!dev)
1776 		return -ENOMEM;
1777 
1778 	/* Set up network device as normal. */
1779 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1780 	dev->netdev_ops = &virtnet_netdev;
1781 	dev->features = NETIF_F_HIGHDMA;
1782 
1783 	dev->ethtool_ops = &virtnet_ethtool_ops;
1784 	SET_NETDEV_DEV(dev, &vdev->dev);
1785 
1786 	/* Do we support "hardware" checksums? */
1787 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1788 		/* This opens up the world of extra features. */
1789 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1790 		if (csum)
1791 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1792 
1793 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1794 			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1795 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
1796 		}
1797 		/* Individual feature bits: what can host handle? */
1798 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1799 			dev->hw_features |= NETIF_F_TSO;
1800 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1801 			dev->hw_features |= NETIF_F_TSO6;
1802 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1803 			dev->hw_features |= NETIF_F_TSO_ECN;
1804 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1805 			dev->hw_features |= NETIF_F_UFO;
1806 
1807 		dev->features |= NETIF_F_GSO_ROBUST;
1808 
1809 		if (gso)
1810 			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1811 		/* (!csum && gso) case will be fixed by register_netdev() */
1812 	}
1813 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
1814 		dev->features |= NETIF_F_RXCSUM;
1815 
1816 	dev->vlan_features = dev->features;
1817 
1818 	/* Configuration may specify what MAC to use.  Otherwise random. */
1819 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
1820 		virtio_cread_bytes(vdev,
1821 				   offsetof(struct virtio_net_config, mac),
1822 				   dev->dev_addr, dev->addr_len);
1823 	else
1824 		eth_hw_addr_random(dev);
1825 
1826 	/* Set up our device-specific information */
1827 	vi = netdev_priv(dev);
1828 	vi->dev = dev;
1829 	vi->vdev = vdev;
1830 	vdev->priv = vi;
1831 	vi->stats = alloc_percpu(struct virtnet_stats);
1832 	err = -ENOMEM;
1833 	if (vi->stats == NULL)
1834 		goto free;
1835 
1836 	for_each_possible_cpu(i) {
1837 		struct virtnet_stats *virtnet_stats;
1838 		virtnet_stats = per_cpu_ptr(vi->stats, i);
1839 		u64_stats_init(&virtnet_stats->tx_syncp);
1840 		u64_stats_init(&virtnet_stats->rx_syncp);
1841 	}
1842 
1843 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1844 
1845 	/* If we can receive ANY GSO packets, we must allocate large ones. */
1846 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1847 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1848 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
1849 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1850 		vi->big_packets = true;
1851 
1852 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1853 		vi->mergeable_rx_bufs = true;
1854 
1855 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
1856 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1857 		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1858 	else
1859 		vi->hdr_len = sizeof(struct virtio_net_hdr);
1860 
1861 	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
1862 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1863 		vi->any_header_sg = true;
1864 
1865 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1866 		vi->has_cvq = true;
1867 
1868 	if (vi->any_header_sg)
1869 		dev->needed_headroom = vi->hdr_len;
1870 
1871 	/* Use single tx/rx queue pair as default */
1872 	vi->curr_queue_pairs = 1;
1873 	vi->max_queue_pairs = max_queue_pairs;
1874 
1875 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1876 	err = init_vqs(vi);
1877 	if (err)
1878 		goto free_stats;
1879 
1880 #ifdef CONFIG_SYSFS
1881 	if (vi->mergeable_rx_bufs)
1882 		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
1883 #endif
1884 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
1885 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
1886 
1887 	err = register_netdev(dev);
1888 	if (err) {
1889 		pr_debug("virtio_net: registering device failed\n");
1890 		goto free_vqs;
1891 	}
1892 
1893 	virtio_device_ready(vdev);
1894 
1895 	/* Last of all, set up some receive buffers. */
1896 	for (i = 0; i < vi->curr_queue_pairs; i++) {
1897 		try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
1898 
1899 		/* If we didn't even get one input buffer, we're useless. */
1900 		if (vi->rq[i].vq->num_free ==
1901 		    virtqueue_get_vring_size(vi->rq[i].vq)) {
1902 			free_unused_bufs(vi);
1903 			err = -ENOMEM;
1904 			goto free_recv_bufs;
1905 		}
1906 	}
1907 
1908 	vi->nb.notifier_call = &virtnet_cpu_callback;
1909 	err = register_hotcpu_notifier(&vi->nb);
1910 	if (err) {
1911 		pr_debug("virtio_net: registering cpu notifier failed\n");
1912 		goto free_recv_bufs;
1913 	}
1914 
1915 	/* Assume link up if device can't report link status,
1916 	   otherwise get link status from config. */
1917 	netif_carrier_off(dev);
1918 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1919 		schedule_work(&vi->config_work);
1920 	} else {
1921 		vi->status = VIRTIO_NET_S_LINK_UP;
1922 		netif_carrier_on(dev);
1923 	}
1924 
1925 	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
1926 		 dev->name, max_queue_pairs);
1927 
1928 	return 0;
1929 
1930 free_recv_bufs:
1931 	vi->vdev->config->reset(vdev);
1932 
1933 	free_receive_bufs(vi);
1934 	unregister_netdev(dev);
1935 free_vqs:
1936 	cancel_delayed_work_sync(&vi->refill);
1937 	free_receive_page_frags(vi);
1938 	virtnet_del_vqs(vi);
1939 free_stats:
1940 	free_percpu(vi->stats);
1941 free:
1942 	free_netdev(dev);
1943 	return err;
1944 }
1945 
remove_vq_common(struct virtnet_info * vi)1946 static void remove_vq_common(struct virtnet_info *vi)
1947 {
1948 	vi->vdev->config->reset(vi->vdev);
1949 
1950 	/* Free unused buffers in both send and recv, if any. */
1951 	free_unused_bufs(vi);
1952 
1953 	free_receive_bufs(vi);
1954 
1955 	free_receive_page_frags(vi);
1956 
1957 	virtnet_del_vqs(vi);
1958 }
1959 
virtnet_remove(struct virtio_device * vdev)1960 static void virtnet_remove(struct virtio_device *vdev)
1961 {
1962 	struct virtnet_info *vi = vdev->priv;
1963 
1964 	unregister_hotcpu_notifier(&vi->nb);
1965 
1966 	/* Make sure no work handler is accessing the device. */
1967 	flush_work(&vi->config_work);
1968 
1969 	unregister_netdev(vi->dev);
1970 
1971 	remove_vq_common(vi);
1972 
1973 	free_percpu(vi->stats);
1974 	free_netdev(vi->dev);
1975 }
1976 
1977 #ifdef CONFIG_PM_SLEEP
virtnet_freeze(struct virtio_device * vdev)1978 static int virtnet_freeze(struct virtio_device *vdev)
1979 {
1980 	struct virtnet_info *vi = vdev->priv;
1981 	int i;
1982 
1983 	unregister_hotcpu_notifier(&vi->nb);
1984 
1985 	/* Make sure no work handler is accessing the device */
1986 	flush_work(&vi->config_work);
1987 
1988 	netif_device_detach(vi->dev);
1989 	cancel_delayed_work_sync(&vi->refill);
1990 
1991 	if (netif_running(vi->dev)) {
1992 		for (i = 0; i < vi->max_queue_pairs; i++)
1993 			napi_disable(&vi->rq[i].napi);
1994 	}
1995 
1996 	remove_vq_common(vi);
1997 
1998 	return 0;
1999 }
2000 
virtnet_restore(struct virtio_device * vdev)2001 static int virtnet_restore(struct virtio_device *vdev)
2002 {
2003 	struct virtnet_info *vi = vdev->priv;
2004 	int err, i;
2005 
2006 	err = init_vqs(vi);
2007 	if (err)
2008 		return err;
2009 
2010 	virtio_device_ready(vdev);
2011 
2012 	if (netif_running(vi->dev)) {
2013 		for (i = 0; i < vi->curr_queue_pairs; i++)
2014 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2015 				schedule_delayed_work(&vi->refill, 0);
2016 
2017 		for (i = 0; i < vi->max_queue_pairs; i++)
2018 			virtnet_napi_enable(&vi->rq[i]);
2019 	}
2020 
2021 	netif_device_attach(vi->dev);
2022 
2023 	rtnl_lock();
2024 	virtnet_set_queues(vi, vi->curr_queue_pairs);
2025 	rtnl_unlock();
2026 
2027 	err = register_hotcpu_notifier(&vi->nb);
2028 	if (err)
2029 		return err;
2030 
2031 	return 0;
2032 }
2033 #endif
2034 
2035 static struct virtio_device_id id_table[] = {
2036 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
2037 	{ 0 },
2038 };
2039 
2040 static unsigned int features[] = {
2041 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
2042 	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
2043 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
2044 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
2045 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2046 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
2047 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2048 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
2049 	VIRTIO_NET_F_CTRL_MAC_ADDR,
2050 	VIRTIO_F_ANY_LAYOUT,
2051 };
2052 
2053 static struct virtio_driver virtio_net_driver = {
2054 	.feature_table = features,
2055 	.feature_table_size = ARRAY_SIZE(features),
2056 	.driver.name =	KBUILD_MODNAME,
2057 	.driver.owner =	THIS_MODULE,
2058 	.id_table =	id_table,
2059 	.probe =	virtnet_probe,
2060 	.remove =	virtnet_remove,
2061 	.config_changed = virtnet_config_changed,
2062 #ifdef CONFIG_PM_SLEEP
2063 	.freeze =	virtnet_freeze,
2064 	.restore =	virtnet_restore,
2065 #endif
2066 };
2067 
2068 module_virtio_driver(virtio_net_driver);
2069 
2070 MODULE_DEVICE_TABLE(virtio, id_table);
2071 MODULE_DESCRIPTION("Virtio network driver");
2072 MODULE_LICENSE("GPL");
2073