• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Author: Michael S. Tsirkin <mst@redhat.com>
4  *
5  * virtio-net server in host kernel.
6  */
7 
8 #include <linux/compat.h>
9 #include <linux/eventfd.h>
10 #include <linux/vhost.h>
11 #include <linux/virtio_net.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/mutex.h>
16 #include <linux/workqueue.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/sched/clock.h>
20 #include <linux/sched/signal.h>
21 #include <linux/vmalloc.h>
22 
23 #include <linux/net.h>
24 #include <linux/if_packet.h>
25 #include <linux/if_arp.h>
26 #include <linux/if_tun.h>
27 #include <linux/if_macvlan.h>
28 #include <linux/if_tap.h>
29 #include <linux/if_vlan.h>
30 #include <linux/skb_array.h>
31 #include <linux/skbuff.h>
32 
33 #include <net/sock.h>
34 #include <net/xdp.h>
35 
36 #include "vhost.h"
37 
38 static int experimental_zcopytx = 0;
39 module_param(experimental_zcopytx, int, 0444);
40 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
41 		                       " 1 -Enable; 0 - Disable");
42 
43 /* Max number of bytes transferred before requeueing the job.
44  * Using this limit prevents one virtqueue from starving others. */
45 #define VHOST_NET_WEIGHT 0x80000
46 
47 /* Max number of packets transferred before requeueing the job.
48  * Using this limit prevents one virtqueue from starving others with small
49  * pkts.
50  */
51 #define VHOST_NET_PKT_WEIGHT 256
52 
53 /* MAX number of TX used buffers for outstanding zerocopy */
54 #define VHOST_MAX_PEND 128
55 #define VHOST_GOODCOPY_LEN 256
56 
57 /*
58  * For transmit, used buffer len is unused; we override it to track buffer
59  * status internally; used for zerocopy tx only.
60  */
61 /* Lower device DMA failed */
62 #define VHOST_DMA_FAILED_LEN	((__force __virtio32)3)
63 /* Lower device DMA done */
64 #define VHOST_DMA_DONE_LEN	((__force __virtio32)2)
65 /* Lower device DMA in progress */
66 #define VHOST_DMA_IN_PROGRESS	((__force __virtio32)1)
67 /* Buffer unused */
68 #define VHOST_DMA_CLEAR_LEN	((__force __virtio32)0)
69 
70 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
71 
72 enum {
73 	VHOST_NET_FEATURES = VHOST_FEATURES |
74 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
75 			 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
76 			 (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
77 			 (1ULL << VIRTIO_F_RING_RESET)
78 };
79 
80 enum {
81 	VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
82 };
83 
84 enum {
85 	VHOST_NET_VQ_RX = 0,
86 	VHOST_NET_VQ_TX = 1,
87 	VHOST_NET_VQ_MAX = 2,
88 };
89 
90 struct vhost_net_ubuf_ref {
91 	/* refcount follows semantics similar to kref:
92 	 *  0: object is released
93 	 *  1: no outstanding ubufs
94 	 * >1: outstanding ubufs
95 	 */
96 	atomic_t refcount;
97 	wait_queue_head_t wait;
98 	struct vhost_virtqueue *vq;
99 	struct rcu_head rcu;
100 };
101 
102 #define VHOST_NET_BATCH 64
103 struct vhost_net_buf {
104 	void **queue;
105 	int tail;
106 	int head;
107 };
108 
109 struct vhost_net_virtqueue {
110 	struct vhost_virtqueue vq;
111 	size_t vhost_hlen;
112 	size_t sock_hlen;
113 	/* vhost zerocopy support fields below: */
114 	/* last used idx for outstanding DMA zerocopy buffers */
115 	int upend_idx;
116 	/* For TX, first used idx for DMA done zerocopy buffers
117 	 * For RX, number of batched heads
118 	 */
119 	int done_idx;
120 	/* Number of XDP frames batched */
121 	int batched_xdp;
122 	/* an array of userspace buffers info */
123 	struct ubuf_info_msgzc *ubuf_info;
124 	/* Reference counting for outstanding ubufs.
125 	 * Protected by vq mutex. Writers must also take device mutex. */
126 	struct vhost_net_ubuf_ref *ubufs;
127 	struct ptr_ring *rx_ring;
128 	struct vhost_net_buf rxq;
129 	/* Batched XDP buffs */
130 	struct xdp_buff *xdp;
131 };
132 
133 struct vhost_net {
134 	struct vhost_dev dev;
135 	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
136 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
137 	/* Number of TX recently submitted.
138 	 * Protected by tx vq lock. */
139 	unsigned tx_packets;
140 	/* Number of times zerocopy TX recently failed.
141 	 * Protected by tx vq lock. */
142 	unsigned tx_zcopy_err;
143 	/* Flush in progress. Protected by tx vq lock. */
144 	bool tx_flush;
145 	/* Private page frag cache */
146 	struct page_frag_cache pf_cache;
147 };
148 
149 static unsigned vhost_net_zcopy_mask __read_mostly;
150 
vhost_net_buf_get_ptr(struct vhost_net_buf * rxq)151 static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
152 {
153 	if (rxq->tail != rxq->head)
154 		return rxq->queue[rxq->head];
155 	else
156 		return NULL;
157 }
158 
vhost_net_buf_get_size(struct vhost_net_buf * rxq)159 static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
160 {
161 	return rxq->tail - rxq->head;
162 }
163 
vhost_net_buf_is_empty(struct vhost_net_buf * rxq)164 static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
165 {
166 	return rxq->tail == rxq->head;
167 }
168 
vhost_net_buf_consume(struct vhost_net_buf * rxq)169 static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
170 {
171 	void *ret = vhost_net_buf_get_ptr(rxq);
172 	++rxq->head;
173 	return ret;
174 }
175 
vhost_net_buf_produce(struct vhost_net_virtqueue * nvq)176 static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
177 {
178 	struct vhost_net_buf *rxq = &nvq->rxq;
179 
180 	rxq->head = 0;
181 	rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
182 					      VHOST_NET_BATCH);
183 	return rxq->tail;
184 }
185 
vhost_net_buf_unproduce(struct vhost_net_virtqueue * nvq)186 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
187 {
188 	struct vhost_net_buf *rxq = &nvq->rxq;
189 
190 	if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
191 		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
192 				   vhost_net_buf_get_size(rxq),
193 				   tun_ptr_free);
194 		rxq->head = rxq->tail = 0;
195 	}
196 }
197 
vhost_net_buf_peek_len(void * ptr)198 static int vhost_net_buf_peek_len(void *ptr)
199 {
200 	if (tun_is_xdp_frame(ptr)) {
201 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
202 
203 		return xdpf->len;
204 	}
205 
206 	return __skb_array_len_with_tag(ptr);
207 }
208 
vhost_net_buf_peek(struct vhost_net_virtqueue * nvq)209 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
210 {
211 	struct vhost_net_buf *rxq = &nvq->rxq;
212 
213 	if (!vhost_net_buf_is_empty(rxq))
214 		goto out;
215 
216 	if (!vhost_net_buf_produce(nvq))
217 		return 0;
218 
219 out:
220 	return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
221 }
222 
vhost_net_buf_init(struct vhost_net_buf * rxq)223 static void vhost_net_buf_init(struct vhost_net_buf *rxq)
224 {
225 	rxq->head = rxq->tail = 0;
226 }
227 
vhost_net_enable_zcopy(int vq)228 static void vhost_net_enable_zcopy(int vq)
229 {
230 	vhost_net_zcopy_mask |= 0x1 << vq;
231 }
232 
233 static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue * vq,bool zcopy)234 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
235 {
236 	struct vhost_net_ubuf_ref *ubufs;
237 	/* No zero copy backend? Nothing to count. */
238 	if (!zcopy)
239 		return NULL;
240 	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
241 	if (!ubufs)
242 		return ERR_PTR(-ENOMEM);
243 	atomic_set(&ubufs->refcount, 1);
244 	init_waitqueue_head(&ubufs->wait);
245 	ubufs->vq = vq;
246 	return ubufs;
247 }
248 
vhost_net_ubuf_put(struct vhost_net_ubuf_ref * ubufs)249 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
250 {
251 	int r;
252 
253 	rcu_read_lock();
254 	r = atomic_sub_return(1, &ubufs->refcount);
255 	if (unlikely(!r))
256 		wake_up(&ubufs->wait);
257 	rcu_read_unlock();
258 	return r;
259 }
260 
vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref * ubufs)261 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
262 {
263 	vhost_net_ubuf_put(ubufs);
264 	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
265 }
266 
vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref * ubufs)267 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
268 {
269 	vhost_net_ubuf_put_and_wait(ubufs);
270 	kfree_rcu(ubufs, rcu);
271 }
272 
vhost_net_clear_ubuf_info(struct vhost_net * n)273 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
274 {
275 	int i;
276 
277 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
278 		kfree(n->vqs[i].ubuf_info);
279 		n->vqs[i].ubuf_info = NULL;
280 	}
281 }
282 
vhost_net_set_ubuf_info(struct vhost_net * n)283 static int vhost_net_set_ubuf_info(struct vhost_net *n)
284 {
285 	bool zcopy;
286 	int i;
287 
288 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
289 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
290 		if (!zcopy)
291 			continue;
292 		n->vqs[i].ubuf_info =
293 			kmalloc_array(UIO_MAXIOV,
294 				      sizeof(*n->vqs[i].ubuf_info),
295 				      GFP_KERNEL);
296 		if  (!n->vqs[i].ubuf_info)
297 			goto err;
298 	}
299 	return 0;
300 
301 err:
302 	vhost_net_clear_ubuf_info(n);
303 	return -ENOMEM;
304 }
305 
vhost_net_vq_reset(struct vhost_net * n)306 static void vhost_net_vq_reset(struct vhost_net *n)
307 {
308 	int i;
309 
310 	vhost_net_clear_ubuf_info(n);
311 
312 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
313 		n->vqs[i].done_idx = 0;
314 		n->vqs[i].upend_idx = 0;
315 		n->vqs[i].ubufs = NULL;
316 		n->vqs[i].vhost_hlen = 0;
317 		n->vqs[i].sock_hlen = 0;
318 		vhost_net_buf_init(&n->vqs[i].rxq);
319 	}
320 
321 }
322 
vhost_net_tx_packet(struct vhost_net * net)323 static void vhost_net_tx_packet(struct vhost_net *net)
324 {
325 	++net->tx_packets;
326 	if (net->tx_packets < 1024)
327 		return;
328 	net->tx_packets = 0;
329 	net->tx_zcopy_err = 0;
330 }
331 
vhost_net_tx_err(struct vhost_net * net)332 static void vhost_net_tx_err(struct vhost_net *net)
333 {
334 	++net->tx_zcopy_err;
335 }
336 
vhost_net_tx_select_zcopy(struct vhost_net * net)337 static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
338 {
339 	/* TX flush waits for outstanding DMAs to be done.
340 	 * Don't start new DMAs.
341 	 */
342 	return !net->tx_flush &&
343 		net->tx_packets / 64 >= net->tx_zcopy_err;
344 }
345 
vhost_sock_zcopy(struct socket * sock)346 static bool vhost_sock_zcopy(struct socket *sock)
347 {
348 	return unlikely(experimental_zcopytx) &&
349 		sock_flag(sock->sk, SOCK_ZEROCOPY);
350 }
351 
vhost_sock_xdp(struct socket * sock)352 static bool vhost_sock_xdp(struct socket *sock)
353 {
354 	return sock_flag(sock->sk, SOCK_XDP);
355 }
356 
357 /* In case of DMA done not in order in lower device driver for some reason.
358  * upend_idx is used to track end of used idx, done_idx is used to track head
359  * of used idx. Once lower device DMA done contiguously, we will signal KVM
360  * guest used idx.
361  */
vhost_zerocopy_signal_used(struct vhost_net * net,struct vhost_virtqueue * vq)362 static void vhost_zerocopy_signal_used(struct vhost_net *net,
363 				       struct vhost_virtqueue *vq)
364 {
365 	struct vhost_net_virtqueue *nvq =
366 		container_of(vq, struct vhost_net_virtqueue, vq);
367 	int i, add;
368 	int j = 0;
369 
370 	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
371 		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
372 			vhost_net_tx_err(net);
373 		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
374 			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
375 			++j;
376 		} else
377 			break;
378 	}
379 	while (j) {
380 		add = min(UIO_MAXIOV - nvq->done_idx, j);
381 		vhost_add_used_and_signal_n(vq->dev, vq,
382 					    &vq->heads[nvq->done_idx], add);
383 		nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
384 		j -= add;
385 	}
386 }
387 
vhost_zerocopy_complete(struct sk_buff * skb,struct ubuf_info * ubuf_base,bool success)388 static void vhost_zerocopy_complete(struct sk_buff *skb,
389 				    struct ubuf_info *ubuf_base, bool success)
390 {
391 	struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
392 	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
393 	struct vhost_virtqueue *vq = ubufs->vq;
394 	int cnt;
395 
396 	rcu_read_lock_bh();
397 
398 	/* set len to mark this desc buffers done DMA */
399 	vq->heads[ubuf->desc].len = success ?
400 		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
401 	cnt = vhost_net_ubuf_put(ubufs);
402 
403 	/*
404 	 * Trigger polling thread if guest stopped submitting new buffers:
405 	 * in this case, the refcount after decrement will eventually reach 1.
406 	 * We also trigger polling periodically after each 16 packets
407 	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
408 	 * less than 10% of times).
409 	 */
410 	if (cnt <= 1 || !(cnt % 16))
411 		vhost_poll_queue(&vq->poll);
412 
413 	rcu_read_unlock_bh();
414 }
415 
416 static const struct ubuf_info_ops vhost_ubuf_ops = {
417 	.complete = vhost_zerocopy_complete,
418 };
419 
busy_clock(void)420 static inline unsigned long busy_clock(void)
421 {
422 	return local_clock() >> 10;
423 }
424 
vhost_can_busy_poll(unsigned long endtime)425 static bool vhost_can_busy_poll(unsigned long endtime)
426 {
427 	return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
428 		      !signal_pending(current));
429 }
430 
vhost_net_disable_vq(struct vhost_net * n,struct vhost_virtqueue * vq)431 static void vhost_net_disable_vq(struct vhost_net *n,
432 				 struct vhost_virtqueue *vq)
433 {
434 	struct vhost_net_virtqueue *nvq =
435 		container_of(vq, struct vhost_net_virtqueue, vq);
436 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
437 	if (!vhost_vq_get_backend(vq))
438 		return;
439 	vhost_poll_stop(poll);
440 }
441 
vhost_net_enable_vq(struct vhost_net * n,struct vhost_virtqueue * vq)442 static int vhost_net_enable_vq(struct vhost_net *n,
443 				struct vhost_virtqueue *vq)
444 {
445 	struct vhost_net_virtqueue *nvq =
446 		container_of(vq, struct vhost_net_virtqueue, vq);
447 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
448 	struct socket *sock;
449 
450 	sock = vhost_vq_get_backend(vq);
451 	if (!sock)
452 		return 0;
453 
454 	return vhost_poll_start(poll, sock->file);
455 }
456 
vhost_net_signal_used(struct vhost_net_virtqueue * nvq)457 static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
458 {
459 	struct vhost_virtqueue *vq = &nvq->vq;
460 	struct vhost_dev *dev = vq->dev;
461 
462 	if (!nvq->done_idx)
463 		return;
464 
465 	vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
466 	nvq->done_idx = 0;
467 }
468 
vhost_tx_batch(struct vhost_net * net,struct vhost_net_virtqueue * nvq,struct socket * sock,struct msghdr * msghdr)469 static void vhost_tx_batch(struct vhost_net *net,
470 			   struct vhost_net_virtqueue *nvq,
471 			   struct socket *sock,
472 			   struct msghdr *msghdr)
473 {
474 	struct tun_msg_ctl ctl = {
475 		.type = TUN_MSG_PTR,
476 		.num = nvq->batched_xdp,
477 		.ptr = nvq->xdp,
478 	};
479 	int i, err;
480 
481 	if (nvq->batched_xdp == 0)
482 		goto signal_used;
483 
484 	msghdr->msg_control = &ctl;
485 	msghdr->msg_controllen = sizeof(ctl);
486 	err = sock->ops->sendmsg(sock, msghdr, 0);
487 	if (unlikely(err < 0)) {
488 		vq_err(&nvq->vq, "Fail to batch sending packets\n");
489 
490 		/* free pages owned by XDP; since this is an unlikely error path,
491 		 * keep it simple and avoid more complex bulk update for the
492 		 * used pages
493 		 */
494 		for (i = 0; i < nvq->batched_xdp; ++i)
495 			put_page(virt_to_head_page(nvq->xdp[i].data));
496 		nvq->batched_xdp = 0;
497 		nvq->done_idx = 0;
498 		return;
499 	}
500 
501 signal_used:
502 	vhost_net_signal_used(nvq);
503 	nvq->batched_xdp = 0;
504 }
505 
sock_has_rx_data(struct socket * sock)506 static int sock_has_rx_data(struct socket *sock)
507 {
508 	if (unlikely(!sock))
509 		return 0;
510 
511 	if (sock->ops->peek_len)
512 		return sock->ops->peek_len(sock);
513 
514 	return skb_queue_empty(&sock->sk->sk_receive_queue);
515 }
516 
vhost_net_busy_poll_try_queue(struct vhost_net * net,struct vhost_virtqueue * vq)517 static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
518 					  struct vhost_virtqueue *vq)
519 {
520 	if (!vhost_vq_avail_empty(&net->dev, vq)) {
521 		vhost_poll_queue(&vq->poll);
522 	} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
523 		vhost_disable_notify(&net->dev, vq);
524 		vhost_poll_queue(&vq->poll);
525 	}
526 }
527 
vhost_net_busy_poll(struct vhost_net * net,struct vhost_virtqueue * rvq,struct vhost_virtqueue * tvq,bool * busyloop_intr,bool poll_rx)528 static void vhost_net_busy_poll(struct vhost_net *net,
529 				struct vhost_virtqueue *rvq,
530 				struct vhost_virtqueue *tvq,
531 				bool *busyloop_intr,
532 				bool poll_rx)
533 {
534 	unsigned long busyloop_timeout;
535 	unsigned long endtime;
536 	struct socket *sock;
537 	struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
538 
539 	/* Try to hold the vq mutex of the paired virtqueue. We can't
540 	 * use mutex_lock() here since we could not guarantee a
541 	 * consistenet lock ordering.
542 	 */
543 	if (!mutex_trylock(&vq->mutex))
544 		return;
545 
546 	vhost_disable_notify(&net->dev, vq);
547 	sock = vhost_vq_get_backend(rvq);
548 
549 	busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
550 				     tvq->busyloop_timeout;
551 
552 	preempt_disable();
553 	endtime = busy_clock() + busyloop_timeout;
554 
555 	while (vhost_can_busy_poll(endtime)) {
556 		if (vhost_vq_has_work(vq)) {
557 			*busyloop_intr = true;
558 			break;
559 		}
560 
561 		if ((sock_has_rx_data(sock) &&
562 		     !vhost_vq_avail_empty(&net->dev, rvq)) ||
563 		    !vhost_vq_avail_empty(&net->dev, tvq))
564 			break;
565 
566 		cpu_relax();
567 	}
568 
569 	preempt_enable();
570 
571 	if (poll_rx || sock_has_rx_data(sock))
572 		vhost_net_busy_poll_try_queue(net, vq);
573 	else if (!poll_rx) /* On tx here, sock has no rx data. */
574 		vhost_enable_notify(&net->dev, rvq);
575 
576 	mutex_unlock(&vq->mutex);
577 }
578 
vhost_net_tx_get_vq_desc(struct vhost_net * net,struct vhost_net_virtqueue * tnvq,unsigned int * out_num,unsigned int * in_num,struct msghdr * msghdr,bool * busyloop_intr)579 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
580 				    struct vhost_net_virtqueue *tnvq,
581 				    unsigned int *out_num, unsigned int *in_num,
582 				    struct msghdr *msghdr, bool *busyloop_intr)
583 {
584 	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
585 	struct vhost_virtqueue *rvq = &rnvq->vq;
586 	struct vhost_virtqueue *tvq = &tnvq->vq;
587 
588 	int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
589 				  out_num, in_num, NULL, NULL);
590 
591 	if (r == tvq->num && tvq->busyloop_timeout) {
592 		/* Flush batched packets first */
593 		if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
594 			vhost_tx_batch(net, tnvq,
595 				       vhost_vq_get_backend(tvq),
596 				       msghdr);
597 
598 		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
599 
600 		r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
601 				      out_num, in_num, NULL, NULL);
602 	}
603 
604 	return r;
605 }
606 
vhost_exceeds_maxpend(struct vhost_net * net)607 static bool vhost_exceeds_maxpend(struct vhost_net *net)
608 {
609 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
610 	struct vhost_virtqueue *vq = &nvq->vq;
611 
612 	return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
613 	       min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
614 }
615 
init_iov_iter(struct vhost_virtqueue * vq,struct iov_iter * iter,size_t hdr_size,int out)616 static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
617 			    size_t hdr_size, int out)
618 {
619 	/* Skip header. TODO: support TSO. */
620 	size_t len = iov_length(vq->iov, out);
621 
622 	iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
623 	iov_iter_advance(iter, hdr_size);
624 
625 	return iov_iter_count(iter);
626 }
627 
get_tx_bufs(struct vhost_net * net,struct vhost_net_virtqueue * nvq,struct msghdr * msg,unsigned int * out,unsigned int * in,size_t * len,bool * busyloop_intr)628 static int get_tx_bufs(struct vhost_net *net,
629 		       struct vhost_net_virtqueue *nvq,
630 		       struct msghdr *msg,
631 		       unsigned int *out, unsigned int *in,
632 		       size_t *len, bool *busyloop_intr)
633 {
634 	struct vhost_virtqueue *vq = &nvq->vq;
635 	int ret;
636 
637 	ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
638 
639 	if (ret < 0 || ret == vq->num)
640 		return ret;
641 
642 	if (*in) {
643 		vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
644 			*out, *in);
645 		return -EFAULT;
646 	}
647 
648 	/* Sanity check */
649 	*len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
650 	if (*len == 0) {
651 		vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
652 			*len, nvq->vhost_hlen);
653 		return -EFAULT;
654 	}
655 
656 	return ret;
657 }
658 
tx_can_batch(struct vhost_virtqueue * vq,size_t total_len)659 static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
660 {
661 	return total_len < VHOST_NET_WEIGHT &&
662 	       !vhost_vq_avail_empty(vq->dev, vq);
663 }
664 
665 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
666 
vhost_net_build_xdp(struct vhost_net_virtqueue * nvq,struct iov_iter * from)667 static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
668 			       struct iov_iter *from)
669 {
670 	struct vhost_virtqueue *vq = &nvq->vq;
671 	struct vhost_net *net = container_of(vq->dev, struct vhost_net,
672 					     dev);
673 	struct socket *sock = vhost_vq_get_backend(vq);
674 	struct virtio_net_hdr *gso;
675 	struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
676 	struct tun_xdp_hdr *hdr;
677 	size_t len = iov_iter_count(from);
678 	int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
679 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
680 	int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
681 	int sock_hlen = nvq->sock_hlen;
682 	void *buf;
683 	int copied;
684 	int ret;
685 
686 	if (unlikely(len < nvq->sock_hlen))
687 		return -EFAULT;
688 
689 	if (SKB_DATA_ALIGN(len + pad) +
690 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
691 		return -ENOSPC;
692 
693 	buflen += SKB_DATA_ALIGN(len + pad);
694 	buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
695 				    SMP_CACHE_BYTES);
696 	if (unlikely(!buf))
697 		return -ENOMEM;
698 
699 	copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
700 				sock_hlen, from);
701 	if (copied != sock_hlen) {
702 		ret = -EFAULT;
703 		goto err;
704 	}
705 
706 	hdr = buf;
707 	gso = &hdr->gso;
708 
709 	if (!sock_hlen)
710 		memset(buf, 0, pad);
711 
712 	if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
713 	    vhost16_to_cpu(vq, gso->csum_start) +
714 	    vhost16_to_cpu(vq, gso->csum_offset) + 2 >
715 	    vhost16_to_cpu(vq, gso->hdr_len)) {
716 		gso->hdr_len = cpu_to_vhost16(vq,
717 			       vhost16_to_cpu(vq, gso->csum_start) +
718 			       vhost16_to_cpu(vq, gso->csum_offset) + 2);
719 
720 		if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
721 			ret = -EINVAL;
722 			goto err;
723 		}
724 	}
725 
726 	len -= sock_hlen;
727 	copied = copy_from_iter(buf + pad, len, from);
728 	if (copied != len) {
729 		ret = -EFAULT;
730 		goto err;
731 	}
732 
733 	xdp_init_buff(xdp, buflen, NULL);
734 	xdp_prepare_buff(xdp, buf, pad, len, true);
735 	hdr->buflen = buflen;
736 
737 	++nvq->batched_xdp;
738 
739 	return 0;
740 
741 err:
742 	page_frag_free(buf);
743 	return ret;
744 }
745 
handle_tx_copy(struct vhost_net * net,struct socket * sock)746 static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
747 {
748 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
749 	struct vhost_virtqueue *vq = &nvq->vq;
750 	unsigned out, in;
751 	int head;
752 	struct msghdr msg = {
753 		.msg_name = NULL,
754 		.msg_namelen = 0,
755 		.msg_control = NULL,
756 		.msg_controllen = 0,
757 		.msg_flags = MSG_DONTWAIT,
758 	};
759 	size_t len, total_len = 0;
760 	int err;
761 	int sent_pkts = 0;
762 	bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
763 
764 	do {
765 		bool busyloop_intr = false;
766 
767 		if (nvq->done_idx == VHOST_NET_BATCH)
768 			vhost_tx_batch(net, nvq, sock, &msg);
769 
770 		head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
771 				   &busyloop_intr);
772 		/* On error, stop handling until the next kick. */
773 		if (unlikely(head < 0))
774 			break;
775 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
776 		if (head == vq->num) {
777 			if (unlikely(busyloop_intr)) {
778 				vhost_poll_queue(&vq->poll);
779 			} else if (unlikely(vhost_enable_notify(&net->dev,
780 								vq))) {
781 				vhost_disable_notify(&net->dev, vq);
782 				continue;
783 			}
784 			break;
785 		}
786 
787 		total_len += len;
788 
789 		/* For simplicity, TX batching is only enabled if
790 		 * sndbuf is unlimited.
791 		 */
792 		if (sock_can_batch) {
793 			err = vhost_net_build_xdp(nvq, &msg.msg_iter);
794 			if (!err) {
795 				goto done;
796 			} else if (unlikely(err != -ENOSPC)) {
797 				vhost_tx_batch(net, nvq, sock, &msg);
798 				vhost_discard_vq_desc(vq, 1);
799 				vhost_net_enable_vq(net, vq);
800 				break;
801 			}
802 
803 			/* We can't build XDP buff, go for single
804 			 * packet path but let's flush batched
805 			 * packets.
806 			 */
807 			vhost_tx_batch(net, nvq, sock, &msg);
808 			msg.msg_control = NULL;
809 		} else {
810 			if (tx_can_batch(vq, total_len))
811 				msg.msg_flags |= MSG_MORE;
812 			else
813 				msg.msg_flags &= ~MSG_MORE;
814 		}
815 
816 		err = sock->ops->sendmsg(sock, &msg, len);
817 		if (unlikely(err < 0)) {
818 			if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
819 				vhost_discard_vq_desc(vq, 1);
820 				vhost_net_enable_vq(net, vq);
821 				break;
822 			}
823 			pr_debug("Fail to send packet: err %d", err);
824 		} else if (unlikely(err != len))
825 			pr_debug("Truncated TX packet: len %d != %zd\n",
826 				 err, len);
827 done:
828 		vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
829 		vq->heads[nvq->done_idx].len = 0;
830 		++nvq->done_idx;
831 	} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
832 
833 	vhost_tx_batch(net, nvq, sock, &msg);
834 }
835 
handle_tx_zerocopy(struct vhost_net * net,struct socket * sock)836 static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
837 {
838 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
839 	struct vhost_virtqueue *vq = &nvq->vq;
840 	unsigned out, in;
841 	int head;
842 	struct msghdr msg = {
843 		.msg_name = NULL,
844 		.msg_namelen = 0,
845 		.msg_control = NULL,
846 		.msg_controllen = 0,
847 		.msg_flags = MSG_DONTWAIT,
848 	};
849 	struct tun_msg_ctl ctl;
850 	size_t len, total_len = 0;
851 	int err;
852 	struct vhost_net_ubuf_ref *ubufs;
853 	struct ubuf_info_msgzc *ubuf;
854 	bool zcopy_used;
855 	int sent_pkts = 0;
856 
857 	do {
858 		bool busyloop_intr;
859 
860 		/* Release DMAs done buffers first */
861 		vhost_zerocopy_signal_used(net, vq);
862 
863 		busyloop_intr = false;
864 		head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
865 				   &busyloop_intr);
866 		/* On error, stop handling until the next kick. */
867 		if (unlikely(head < 0))
868 			break;
869 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
870 		if (head == vq->num) {
871 			if (unlikely(busyloop_intr)) {
872 				vhost_poll_queue(&vq->poll);
873 			} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
874 				vhost_disable_notify(&net->dev, vq);
875 				continue;
876 			}
877 			break;
878 		}
879 
880 		zcopy_used = len >= VHOST_GOODCOPY_LEN
881 			     && !vhost_exceeds_maxpend(net)
882 			     && vhost_net_tx_select_zcopy(net);
883 
884 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
885 		if (zcopy_used) {
886 			ubuf = nvq->ubuf_info + nvq->upend_idx;
887 			vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
888 			vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
889 			ubuf->ctx = nvq->ubufs;
890 			ubuf->desc = nvq->upend_idx;
891 			ubuf->ubuf.ops = &vhost_ubuf_ops;
892 			ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
893 			refcount_set(&ubuf->ubuf.refcnt, 1);
894 			msg.msg_control = &ctl;
895 			ctl.type = TUN_MSG_UBUF;
896 			ctl.ptr = &ubuf->ubuf;
897 			msg.msg_controllen = sizeof(ctl);
898 			ubufs = nvq->ubufs;
899 			atomic_inc(&ubufs->refcount);
900 			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
901 		} else {
902 			msg.msg_control = NULL;
903 			ubufs = NULL;
904 		}
905 		total_len += len;
906 		if (tx_can_batch(vq, total_len) &&
907 		    likely(!vhost_exceeds_maxpend(net))) {
908 			msg.msg_flags |= MSG_MORE;
909 		} else {
910 			msg.msg_flags &= ~MSG_MORE;
911 		}
912 
913 		err = sock->ops->sendmsg(sock, &msg, len);
914 		if (unlikely(err < 0)) {
915 			bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
916 
917 			if (zcopy_used) {
918 				if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
919 					vhost_net_ubuf_put(ubufs);
920 				if (retry)
921 					nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
922 						% UIO_MAXIOV;
923 				else
924 					vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
925 			}
926 			if (retry) {
927 				vhost_discard_vq_desc(vq, 1);
928 				vhost_net_enable_vq(net, vq);
929 				break;
930 			}
931 			pr_debug("Fail to send packet: err %d", err);
932 		} else if (unlikely(err != len))
933 			pr_debug("Truncated TX packet: "
934 				 " len %d != %zd\n", err, len);
935 		if (!zcopy_used)
936 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
937 		else
938 			vhost_zerocopy_signal_used(net, vq);
939 		vhost_net_tx_packet(net);
940 	} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
941 }
942 
943 /* Expects to be always run from workqueue - which acts as
944  * read-size critical section for our kind of RCU. */
handle_tx(struct vhost_net * net)945 static void handle_tx(struct vhost_net *net)
946 {
947 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
948 	struct vhost_virtqueue *vq = &nvq->vq;
949 	struct socket *sock;
950 
951 	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
952 	sock = vhost_vq_get_backend(vq);
953 	if (!sock)
954 		goto out;
955 
956 	if (!vq_meta_prefetch(vq))
957 		goto out;
958 
959 	vhost_disable_notify(&net->dev, vq);
960 	vhost_net_disable_vq(net, vq);
961 
962 	if (vhost_sock_zcopy(sock))
963 		handle_tx_zerocopy(net, sock);
964 	else
965 		handle_tx_copy(net, sock);
966 
967 out:
968 	mutex_unlock(&vq->mutex);
969 }
970 
peek_head_len(struct vhost_net_virtqueue * rvq,struct sock * sk)971 static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
972 {
973 	struct sk_buff *head;
974 	int len = 0;
975 	unsigned long flags;
976 
977 	if (rvq->rx_ring)
978 		return vhost_net_buf_peek(rvq);
979 
980 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
981 	head = skb_peek(&sk->sk_receive_queue);
982 	if (likely(head)) {
983 		len = head->len;
984 		if (skb_vlan_tag_present(head))
985 			len += VLAN_HLEN;
986 	}
987 
988 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
989 	return len;
990 }
991 
vhost_net_rx_peek_head_len(struct vhost_net * net,struct sock * sk,bool * busyloop_intr)992 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
993 				      bool *busyloop_intr)
994 {
995 	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
996 	struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
997 	struct vhost_virtqueue *rvq = &rnvq->vq;
998 	struct vhost_virtqueue *tvq = &tnvq->vq;
999 	int len = peek_head_len(rnvq, sk);
1000 
1001 	if (!len && rvq->busyloop_timeout) {
1002 		/* Flush batched heads first */
1003 		vhost_net_signal_used(rnvq);
1004 		/* Both tx vq and rx socket were polled here */
1005 		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
1006 
1007 		len = peek_head_len(rnvq, sk);
1008 	}
1009 
1010 	return len;
1011 }
1012 
1013 /* This is a multi-buffer version of vhost_get_desc, that works if
1014  *	vq has read descriptors only.
1015  * @vq		- the relevant virtqueue
1016  * @datalen	- data length we'll be reading
1017  * @iovcount	- returned count of io vectors we fill
1018  * @log		- vhost log
1019  * @log_num	- log offset
1020  * @quota       - headcount quota, 1 for big buffer
1021  *	returns number of buffer heads allocated, negative on error
1022  */
get_rx_bufs(struct vhost_virtqueue * vq,struct vring_used_elem * heads,int datalen,unsigned * iovcount,struct vhost_log * log,unsigned * log_num,unsigned int quota)1023 static int get_rx_bufs(struct vhost_virtqueue *vq,
1024 		       struct vring_used_elem *heads,
1025 		       int datalen,
1026 		       unsigned *iovcount,
1027 		       struct vhost_log *log,
1028 		       unsigned *log_num,
1029 		       unsigned int quota)
1030 {
1031 	unsigned int out, in;
1032 	int seg = 0;
1033 	int headcount = 0;
1034 	unsigned d;
1035 	int r, nlogs = 0;
1036 	/* len is always initialized before use since we are always called with
1037 	 * datalen > 0.
1038 	 */
1039 	u32 len;
1040 
1041 	while (datalen > 0 && headcount < quota) {
1042 		if (unlikely(seg >= UIO_MAXIOV)) {
1043 			r = -ENOBUFS;
1044 			goto err;
1045 		}
1046 		r = vhost_get_vq_desc(vq, vq->iov + seg,
1047 				      ARRAY_SIZE(vq->iov) - seg, &out,
1048 				      &in, log, log_num);
1049 		if (unlikely(r < 0))
1050 			goto err;
1051 
1052 		d = r;
1053 		if (d == vq->num) {
1054 			r = 0;
1055 			goto err;
1056 		}
1057 		if (unlikely(out || in <= 0)) {
1058 			vq_err(vq, "unexpected descriptor format for RX: "
1059 				"out %d, in %d\n", out, in);
1060 			r = -EINVAL;
1061 			goto err;
1062 		}
1063 		if (unlikely(log)) {
1064 			nlogs += *log_num;
1065 			log += *log_num;
1066 		}
1067 		heads[headcount].id = cpu_to_vhost32(vq, d);
1068 		len = iov_length(vq->iov + seg, in);
1069 		heads[headcount].len = cpu_to_vhost32(vq, len);
1070 		datalen -= len;
1071 		++headcount;
1072 		seg += in;
1073 	}
1074 	heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
1075 	*iovcount = seg;
1076 	if (unlikely(log))
1077 		*log_num = nlogs;
1078 
1079 	/* Detect overrun */
1080 	if (unlikely(datalen > 0)) {
1081 		r = UIO_MAXIOV + 1;
1082 		goto err;
1083 	}
1084 	return headcount;
1085 err:
1086 	vhost_discard_vq_desc(vq, headcount);
1087 	return r;
1088 }
1089 
1090 /* Expects to be always run from workqueue - which acts as
1091  * read-size critical section for our kind of RCU. */
handle_rx(struct vhost_net * net)1092 static void handle_rx(struct vhost_net *net)
1093 {
1094 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
1095 	struct vhost_virtqueue *vq = &nvq->vq;
1096 	unsigned in, log;
1097 	struct vhost_log *vq_log;
1098 	struct msghdr msg = {
1099 		.msg_name = NULL,
1100 		.msg_namelen = 0,
1101 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
1102 		.msg_controllen = 0,
1103 		.msg_flags = MSG_DONTWAIT,
1104 	};
1105 	struct virtio_net_hdr hdr = {
1106 		.flags = 0,
1107 		.gso_type = VIRTIO_NET_HDR_GSO_NONE
1108 	};
1109 	size_t total_len = 0;
1110 	int err, mergeable;
1111 	s16 headcount;
1112 	size_t vhost_hlen, sock_hlen;
1113 	size_t vhost_len, sock_len;
1114 	bool busyloop_intr = false;
1115 	struct socket *sock;
1116 	struct iov_iter fixup;
1117 	__virtio16 num_buffers;
1118 	int recv_pkts = 0;
1119 
1120 	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
1121 	sock = vhost_vq_get_backend(vq);
1122 	if (!sock)
1123 		goto out;
1124 
1125 	if (!vq_meta_prefetch(vq))
1126 		goto out;
1127 
1128 	vhost_disable_notify(&net->dev, vq);
1129 	vhost_net_disable_vq(net, vq);
1130 
1131 	vhost_hlen = nvq->vhost_hlen;
1132 	sock_hlen = nvq->sock_hlen;
1133 
1134 	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1135 		vq->log : NULL;
1136 	mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1137 
1138 	do {
1139 		sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
1140 						      &busyloop_intr);
1141 		if (!sock_len)
1142 			break;
1143 		sock_len += sock_hlen;
1144 		vhost_len = sock_len + vhost_hlen;
1145 		headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
1146 					vhost_len, &in, vq_log, &log,
1147 					likely(mergeable) ? UIO_MAXIOV : 1);
1148 		/* On error, stop handling until the next kick. */
1149 		if (unlikely(headcount < 0))
1150 			goto out;
1151 		/* OK, now we need to know about added descriptors. */
1152 		if (!headcount) {
1153 			if (unlikely(busyloop_intr)) {
1154 				vhost_poll_queue(&vq->poll);
1155 			} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
1156 				/* They have slipped one in as we were
1157 				 * doing that: check again. */
1158 				vhost_disable_notify(&net->dev, vq);
1159 				continue;
1160 			}
1161 			/* Nothing new?  Wait for eventfd to tell us
1162 			 * they refilled. */
1163 			goto out;
1164 		}
1165 		busyloop_intr = false;
1166 		if (nvq->rx_ring)
1167 			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
1168 		/* On overrun, truncate and discard */
1169 		if (unlikely(headcount > UIO_MAXIOV)) {
1170 			iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
1171 			err = sock->ops->recvmsg(sock, &msg,
1172 						 1, MSG_DONTWAIT | MSG_TRUNC);
1173 			pr_debug("Discarded rx packet: len %zd\n", sock_len);
1174 			continue;
1175 		}
1176 		/* We don't need to be notified again. */
1177 		iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
1178 		fixup = msg.msg_iter;
1179 		if (unlikely((vhost_hlen))) {
1180 			/* We will supply the header ourselves
1181 			 * TODO: support TSO.
1182 			 */
1183 			iov_iter_advance(&msg.msg_iter, vhost_hlen);
1184 		}
1185 		err = sock->ops->recvmsg(sock, &msg,
1186 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
1187 		/* Userspace might have consumed the packet meanwhile:
1188 		 * it's not supposed to do this usually, but might be hard
1189 		 * to prevent. Discard data we got (if any) and keep going. */
1190 		if (unlikely(err != sock_len)) {
1191 			pr_debug("Discarded rx packet: "
1192 				 " len %d, expected %zd\n", err, sock_len);
1193 			vhost_discard_vq_desc(vq, headcount);
1194 			continue;
1195 		}
1196 		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
1197 		if (unlikely(vhost_hlen)) {
1198 			if (copy_to_iter(&hdr, sizeof(hdr),
1199 					 &fixup) != sizeof(hdr)) {
1200 				vq_err(vq, "Unable to write vnet_hdr "
1201 				       "at addr %p\n", vq->iov->iov_base);
1202 				goto out;
1203 			}
1204 		} else {
1205 			/* Header came from socket; we'll need to patch
1206 			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1207 			 */
1208 			iov_iter_advance(&fixup, sizeof(hdr));
1209 		}
1210 		/* TODO: Should check and handle checksum. */
1211 
1212 		num_buffers = cpu_to_vhost16(vq, headcount);
1213 		if (likely(mergeable) &&
1214 		    copy_to_iter(&num_buffers, sizeof num_buffers,
1215 				 &fixup) != sizeof num_buffers) {
1216 			vq_err(vq, "Failed num_buffers write");
1217 			vhost_discard_vq_desc(vq, headcount);
1218 			goto out;
1219 		}
1220 		nvq->done_idx += headcount;
1221 		if (nvq->done_idx > VHOST_NET_BATCH)
1222 			vhost_net_signal_used(nvq);
1223 		if (unlikely(vq_log))
1224 			vhost_log_write(vq, vq_log, log, vhost_len,
1225 					vq->iov, in);
1226 		total_len += vhost_len;
1227 	} while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1228 
1229 	if (unlikely(busyloop_intr))
1230 		vhost_poll_queue(&vq->poll);
1231 	else if (!sock_len)
1232 		vhost_net_enable_vq(net, vq);
1233 out:
1234 	vhost_net_signal_used(nvq);
1235 	mutex_unlock(&vq->mutex);
1236 }
1237 
handle_tx_kick(struct vhost_work * work)1238 static void handle_tx_kick(struct vhost_work *work)
1239 {
1240 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1241 						  poll.work);
1242 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1243 
1244 	handle_tx(net);
1245 }
1246 
handle_rx_kick(struct vhost_work * work)1247 static void handle_rx_kick(struct vhost_work *work)
1248 {
1249 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1250 						  poll.work);
1251 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1252 
1253 	handle_rx(net);
1254 }
1255 
handle_tx_net(struct vhost_work * work)1256 static void handle_tx_net(struct vhost_work *work)
1257 {
1258 	struct vhost_net *net = container_of(work, struct vhost_net,
1259 					     poll[VHOST_NET_VQ_TX].work);
1260 	handle_tx(net);
1261 }
1262 
handle_rx_net(struct vhost_work * work)1263 static void handle_rx_net(struct vhost_work *work)
1264 {
1265 	struct vhost_net *net = container_of(work, struct vhost_net,
1266 					     poll[VHOST_NET_VQ_RX].work);
1267 	handle_rx(net);
1268 }
1269 
vhost_net_open(struct inode * inode,struct file * f)1270 static int vhost_net_open(struct inode *inode, struct file *f)
1271 {
1272 	struct vhost_net *n;
1273 	struct vhost_dev *dev;
1274 	struct vhost_virtqueue **vqs;
1275 	void **queue;
1276 	struct xdp_buff *xdp;
1277 	int i;
1278 
1279 	n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1280 	if (!n)
1281 		return -ENOMEM;
1282 	vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
1283 	if (!vqs) {
1284 		kvfree(n);
1285 		return -ENOMEM;
1286 	}
1287 
1288 	queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
1289 			      GFP_KERNEL);
1290 	if (!queue) {
1291 		kfree(vqs);
1292 		kvfree(n);
1293 		return -ENOMEM;
1294 	}
1295 	n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
1296 
1297 	xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
1298 	if (!xdp) {
1299 		kfree(vqs);
1300 		kvfree(n);
1301 		kfree(queue);
1302 		return -ENOMEM;
1303 	}
1304 	n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
1305 
1306 	dev = &n->dev;
1307 	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
1308 	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
1309 	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
1310 	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
1311 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
1312 		n->vqs[i].ubufs = NULL;
1313 		n->vqs[i].ubuf_info = NULL;
1314 		n->vqs[i].upend_idx = 0;
1315 		n->vqs[i].done_idx = 0;
1316 		n->vqs[i].batched_xdp = 0;
1317 		n->vqs[i].vhost_hlen = 0;
1318 		n->vqs[i].sock_hlen = 0;
1319 		n->vqs[i].rx_ring = NULL;
1320 		vhost_net_buf_init(&n->vqs[i].rxq);
1321 	}
1322 	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1323 		       UIO_MAXIOV + VHOST_NET_BATCH,
1324 		       VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
1325 		       NULL);
1326 
1327 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev,
1328 			vqs[VHOST_NET_VQ_TX]);
1329 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev,
1330 			vqs[VHOST_NET_VQ_RX]);
1331 
1332 	f->private_data = n;
1333 	n->pf_cache.va = NULL;
1334 
1335 	return 0;
1336 }
1337 
vhost_net_stop_vq(struct vhost_net * n,struct vhost_virtqueue * vq)1338 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
1339 					struct vhost_virtqueue *vq)
1340 {
1341 	struct socket *sock;
1342 	struct vhost_net_virtqueue *nvq =
1343 		container_of(vq, struct vhost_net_virtqueue, vq);
1344 
1345 	mutex_lock(&vq->mutex);
1346 	sock = vhost_vq_get_backend(vq);
1347 	vhost_net_disable_vq(n, vq);
1348 	vhost_vq_set_backend(vq, NULL);
1349 	vhost_net_buf_unproduce(nvq);
1350 	nvq->rx_ring = NULL;
1351 	mutex_unlock(&vq->mutex);
1352 	return sock;
1353 }
1354 
vhost_net_stop(struct vhost_net * n,struct socket ** tx_sock,struct socket ** rx_sock)1355 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
1356 			   struct socket **rx_sock)
1357 {
1358 	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
1359 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
1360 }
1361 
vhost_net_flush(struct vhost_net * n)1362 static void vhost_net_flush(struct vhost_net *n)
1363 {
1364 	vhost_dev_flush(&n->dev);
1365 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
1366 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1367 		n->tx_flush = true;
1368 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1369 		/* Wait for all lower device DMAs done. */
1370 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
1371 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1372 		n->tx_flush = false;
1373 		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
1374 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1375 	}
1376 }
1377 
vhost_net_release(struct inode * inode,struct file * f)1378 static int vhost_net_release(struct inode *inode, struct file *f)
1379 {
1380 	struct vhost_net *n = f->private_data;
1381 	struct socket *tx_sock;
1382 	struct socket *rx_sock;
1383 
1384 	vhost_net_stop(n, &tx_sock, &rx_sock);
1385 	vhost_net_flush(n);
1386 	vhost_dev_stop(&n->dev);
1387 	vhost_dev_cleanup(&n->dev);
1388 	vhost_net_vq_reset(n);
1389 	if (tx_sock)
1390 		sockfd_put(tx_sock);
1391 	if (rx_sock)
1392 		sockfd_put(rx_sock);
1393 	/* Make sure no callbacks are outstanding */
1394 	synchronize_rcu();
1395 	/* We do an extra flush before freeing memory,
1396 	 * since jobs can re-queue themselves. */
1397 	vhost_net_flush(n);
1398 	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
1399 	kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
1400 	kfree(n->dev.vqs);
1401 	page_frag_cache_drain(&n->pf_cache);
1402 	kvfree(n);
1403 	return 0;
1404 }
1405 
get_raw_socket(int fd)1406 static struct socket *get_raw_socket(int fd)
1407 {
1408 	int r;
1409 	struct socket *sock = sockfd_lookup(fd, &r);
1410 
1411 	if (!sock)
1412 		return ERR_PTR(-ENOTSOCK);
1413 
1414 	/* Parameter checking */
1415 	if (sock->sk->sk_type != SOCK_RAW) {
1416 		r = -ESOCKTNOSUPPORT;
1417 		goto err;
1418 	}
1419 
1420 	if (sock->sk->sk_family != AF_PACKET) {
1421 		r = -EPFNOSUPPORT;
1422 		goto err;
1423 	}
1424 	return sock;
1425 err:
1426 	sockfd_put(sock);
1427 	return ERR_PTR(r);
1428 }
1429 
get_tap_ptr_ring(struct file * file)1430 static struct ptr_ring *get_tap_ptr_ring(struct file *file)
1431 {
1432 	struct ptr_ring *ring;
1433 	ring = tun_get_tx_ring(file);
1434 	if (!IS_ERR(ring))
1435 		goto out;
1436 	ring = tap_get_ptr_ring(file);
1437 	if (!IS_ERR(ring))
1438 		goto out;
1439 	ring = NULL;
1440 out:
1441 	return ring;
1442 }
1443 
get_tap_socket(int fd)1444 static struct socket *get_tap_socket(int fd)
1445 {
1446 	struct file *file = fget(fd);
1447 	struct socket *sock;
1448 
1449 	if (!file)
1450 		return ERR_PTR(-EBADF);
1451 	sock = tun_get_socket(file);
1452 	if (!IS_ERR(sock))
1453 		return sock;
1454 	sock = tap_get_socket(file);
1455 	if (IS_ERR(sock))
1456 		fput(file);
1457 	return sock;
1458 }
1459 
get_socket(int fd)1460 static struct socket *get_socket(int fd)
1461 {
1462 	struct socket *sock;
1463 
1464 	/* special case to disable backend */
1465 	if (fd == -1)
1466 		return NULL;
1467 	sock = get_raw_socket(fd);
1468 	if (!IS_ERR(sock))
1469 		return sock;
1470 	sock = get_tap_socket(fd);
1471 	if (!IS_ERR(sock))
1472 		return sock;
1473 	return ERR_PTR(-ENOTSOCK);
1474 }
1475 
vhost_net_set_backend(struct vhost_net * n,unsigned index,int fd)1476 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1477 {
1478 	struct socket *sock, *oldsock;
1479 	struct vhost_virtqueue *vq;
1480 	struct vhost_net_virtqueue *nvq;
1481 	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1482 	int r;
1483 
1484 	mutex_lock(&n->dev.mutex);
1485 	r = vhost_dev_check_owner(&n->dev);
1486 	if (r)
1487 		goto err;
1488 
1489 	if (index >= VHOST_NET_VQ_MAX) {
1490 		r = -ENOBUFS;
1491 		goto err;
1492 	}
1493 	vq = &n->vqs[index].vq;
1494 	nvq = &n->vqs[index];
1495 	mutex_lock(&vq->mutex);
1496 
1497 	if (fd == -1)
1498 		vhost_clear_msg(&n->dev);
1499 
1500 	/* Verify that ring has been setup correctly. */
1501 	if (!vhost_vq_access_ok(vq)) {
1502 		r = -EFAULT;
1503 		goto err_vq;
1504 	}
1505 	sock = get_socket(fd);
1506 	if (IS_ERR(sock)) {
1507 		r = PTR_ERR(sock);
1508 		goto err_vq;
1509 	}
1510 
1511 	/* start polling new socket */
1512 	oldsock = vhost_vq_get_backend(vq);
1513 	if (sock != oldsock) {
1514 		ubufs = vhost_net_ubuf_alloc(vq,
1515 					     sock && vhost_sock_zcopy(sock));
1516 		if (IS_ERR(ubufs)) {
1517 			r = PTR_ERR(ubufs);
1518 			goto err_ubufs;
1519 		}
1520 
1521 		vhost_net_disable_vq(n, vq);
1522 		vhost_vq_set_backend(vq, sock);
1523 		vhost_net_buf_unproduce(nvq);
1524 		r = vhost_vq_init_access(vq);
1525 		if (r)
1526 			goto err_used;
1527 		r = vhost_net_enable_vq(n, vq);
1528 		if (r)
1529 			goto err_used;
1530 		if (index == VHOST_NET_VQ_RX) {
1531 			if (sock)
1532 				nvq->rx_ring = get_tap_ptr_ring(sock->file);
1533 			else
1534 				nvq->rx_ring = NULL;
1535 		}
1536 
1537 		oldubufs = nvq->ubufs;
1538 		nvq->ubufs = ubufs;
1539 
1540 		n->tx_packets = 0;
1541 		n->tx_zcopy_err = 0;
1542 		n->tx_flush = false;
1543 	}
1544 
1545 	mutex_unlock(&vq->mutex);
1546 
1547 	if (oldubufs) {
1548 		vhost_net_ubuf_put_wait_and_free(oldubufs);
1549 		mutex_lock(&vq->mutex);
1550 		vhost_zerocopy_signal_used(n, vq);
1551 		mutex_unlock(&vq->mutex);
1552 	}
1553 
1554 	if (oldsock) {
1555 		vhost_dev_flush(&n->dev);
1556 		sockfd_put(oldsock);
1557 	}
1558 
1559 	mutex_unlock(&n->dev.mutex);
1560 	return 0;
1561 
1562 err_used:
1563 	vhost_vq_set_backend(vq, oldsock);
1564 	vhost_net_enable_vq(n, vq);
1565 	if (ubufs)
1566 		vhost_net_ubuf_put_wait_and_free(ubufs);
1567 err_ubufs:
1568 	if (sock)
1569 		sockfd_put(sock);
1570 err_vq:
1571 	mutex_unlock(&vq->mutex);
1572 err:
1573 	mutex_unlock(&n->dev.mutex);
1574 	return r;
1575 }
1576 
vhost_net_reset_owner(struct vhost_net * n)1577 static long vhost_net_reset_owner(struct vhost_net *n)
1578 {
1579 	struct socket *tx_sock = NULL;
1580 	struct socket *rx_sock = NULL;
1581 	long err;
1582 	struct vhost_iotlb *umem;
1583 
1584 	mutex_lock(&n->dev.mutex);
1585 	err = vhost_dev_check_owner(&n->dev);
1586 	if (err)
1587 		goto done;
1588 	umem = vhost_dev_reset_owner_prepare();
1589 	if (!umem) {
1590 		err = -ENOMEM;
1591 		goto done;
1592 	}
1593 	vhost_net_stop(n, &tx_sock, &rx_sock);
1594 	vhost_net_flush(n);
1595 	vhost_dev_stop(&n->dev);
1596 	vhost_dev_reset_owner(&n->dev, umem);
1597 	vhost_net_vq_reset(n);
1598 done:
1599 	mutex_unlock(&n->dev.mutex);
1600 	if (tx_sock)
1601 		sockfd_put(tx_sock);
1602 	if (rx_sock)
1603 		sockfd_put(rx_sock);
1604 	return err;
1605 }
1606 
vhost_net_set_features(struct vhost_net * n,u64 features)1607 static int vhost_net_set_features(struct vhost_net *n, u64 features)
1608 {
1609 	size_t vhost_hlen, sock_hlen, hdr_len;
1610 	int i;
1611 
1612 	hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1613 			       (1ULL << VIRTIO_F_VERSION_1))) ?
1614 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1615 			sizeof(struct virtio_net_hdr);
1616 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1617 		/* vhost provides vnet_hdr */
1618 		vhost_hlen = hdr_len;
1619 		sock_hlen = 0;
1620 	} else {
1621 		/* socket provides vnet_hdr */
1622 		vhost_hlen = 0;
1623 		sock_hlen = hdr_len;
1624 	}
1625 	mutex_lock(&n->dev.mutex);
1626 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1627 	    !vhost_log_access_ok(&n->dev))
1628 		goto out_unlock;
1629 
1630 	if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
1631 		if (vhost_init_device_iotlb(&n->dev))
1632 			goto out_unlock;
1633 	}
1634 
1635 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1636 		mutex_lock(&n->vqs[i].vq.mutex);
1637 		n->vqs[i].vq.acked_features = features;
1638 		n->vqs[i].vhost_hlen = vhost_hlen;
1639 		n->vqs[i].sock_hlen = sock_hlen;
1640 		mutex_unlock(&n->vqs[i].vq.mutex);
1641 	}
1642 	mutex_unlock(&n->dev.mutex);
1643 	return 0;
1644 
1645 out_unlock:
1646 	mutex_unlock(&n->dev.mutex);
1647 	return -EFAULT;
1648 }
1649 
vhost_net_set_owner(struct vhost_net * n)1650 static long vhost_net_set_owner(struct vhost_net *n)
1651 {
1652 	int r;
1653 
1654 	mutex_lock(&n->dev.mutex);
1655 	if (vhost_dev_has_owner(&n->dev)) {
1656 		r = -EBUSY;
1657 		goto out;
1658 	}
1659 	r = vhost_net_set_ubuf_info(n);
1660 	if (r)
1661 		goto out;
1662 	r = vhost_dev_set_owner(&n->dev);
1663 	if (r)
1664 		vhost_net_clear_ubuf_info(n);
1665 	vhost_net_flush(n);
1666 out:
1667 	mutex_unlock(&n->dev.mutex);
1668 	return r;
1669 }
1670 
vhost_net_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)1671 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1672 			    unsigned long arg)
1673 {
1674 	struct vhost_net *n = f->private_data;
1675 	void __user *argp = (void __user *)arg;
1676 	u64 __user *featurep = argp;
1677 	struct vhost_vring_file backend;
1678 	u64 features;
1679 	int r;
1680 
1681 	switch (ioctl) {
1682 	case VHOST_NET_SET_BACKEND:
1683 		if (copy_from_user(&backend, argp, sizeof backend))
1684 			return -EFAULT;
1685 		return vhost_net_set_backend(n, backend.index, backend.fd);
1686 	case VHOST_GET_FEATURES:
1687 		features = VHOST_NET_FEATURES;
1688 		if (copy_to_user(featurep, &features, sizeof features))
1689 			return -EFAULT;
1690 		return 0;
1691 	case VHOST_SET_FEATURES:
1692 		if (copy_from_user(&features, featurep, sizeof features))
1693 			return -EFAULT;
1694 		if (features & ~VHOST_NET_FEATURES)
1695 			return -EOPNOTSUPP;
1696 		return vhost_net_set_features(n, features);
1697 	case VHOST_GET_BACKEND_FEATURES:
1698 		features = VHOST_NET_BACKEND_FEATURES;
1699 		if (copy_to_user(featurep, &features, sizeof(features)))
1700 			return -EFAULT;
1701 		return 0;
1702 	case VHOST_SET_BACKEND_FEATURES:
1703 		if (copy_from_user(&features, featurep, sizeof(features)))
1704 			return -EFAULT;
1705 		if (features & ~VHOST_NET_BACKEND_FEATURES)
1706 			return -EOPNOTSUPP;
1707 		vhost_set_backend_features(&n->dev, features);
1708 		return 0;
1709 	case VHOST_RESET_OWNER:
1710 		return vhost_net_reset_owner(n);
1711 	case VHOST_SET_OWNER:
1712 		return vhost_net_set_owner(n);
1713 	default:
1714 		mutex_lock(&n->dev.mutex);
1715 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1716 		if (r == -ENOIOCTLCMD)
1717 			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1718 		else
1719 			vhost_net_flush(n);
1720 		mutex_unlock(&n->dev.mutex);
1721 		return r;
1722 	}
1723 }
1724 
vhost_net_chr_read_iter(struct kiocb * iocb,struct iov_iter * to)1725 static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1726 {
1727 	struct file *file = iocb->ki_filp;
1728 	struct vhost_net *n = file->private_data;
1729 	struct vhost_dev *dev = &n->dev;
1730 	int noblock = file->f_flags & O_NONBLOCK;
1731 
1732 	return vhost_chr_read_iter(dev, to, noblock);
1733 }
1734 
vhost_net_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)1735 static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
1736 					struct iov_iter *from)
1737 {
1738 	struct file *file = iocb->ki_filp;
1739 	struct vhost_net *n = file->private_data;
1740 	struct vhost_dev *dev = &n->dev;
1741 
1742 	return vhost_chr_write_iter(dev, from);
1743 }
1744 
vhost_net_chr_poll(struct file * file,poll_table * wait)1745 static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
1746 {
1747 	struct vhost_net *n = file->private_data;
1748 	struct vhost_dev *dev = &n->dev;
1749 
1750 	return vhost_chr_poll(file, dev, wait);
1751 }
1752 
1753 static const struct file_operations vhost_net_fops = {
1754 	.owner          = THIS_MODULE,
1755 	.release        = vhost_net_release,
1756 	.read_iter      = vhost_net_chr_read_iter,
1757 	.write_iter     = vhost_net_chr_write_iter,
1758 	.poll           = vhost_net_chr_poll,
1759 	.unlocked_ioctl = vhost_net_ioctl,
1760 	.compat_ioctl   = compat_ptr_ioctl,
1761 	.open           = vhost_net_open,
1762 	.llseek		= noop_llseek,
1763 };
1764 
1765 static struct miscdevice vhost_net_misc = {
1766 	.minor = VHOST_NET_MINOR,
1767 	.name = "vhost-net",
1768 	.fops = &vhost_net_fops,
1769 };
1770 
vhost_net_init(void)1771 static int __init vhost_net_init(void)
1772 {
1773 	if (experimental_zcopytx)
1774 		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1775 	return misc_register(&vhost_net_misc);
1776 }
1777 module_init(vhost_net_init);
1778 
vhost_net_exit(void)1779 static void __exit vhost_net_exit(void)
1780 {
1781 	misc_deregister(&vhost_net_misc);
1782 }
1783 module_exit(vhost_net_exit);
1784 
1785 MODULE_VERSION("0.0.1");
1786 MODULE_LICENSE("GPL v2");
1787 MODULE_AUTHOR("Michael S. Tsirkin");
1788 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1789 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1790 MODULE_ALIAS("devname:vhost-net");
1791