• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * virtio-fs: Virtio Filesystem
4  * Copyright (C) 2018 Red Hat, Inc.
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/dax.h>
9 #include <linux/pci.h>
10 #include <linux/pfn_t.h>
11 #include <linux/module.h>
12 #include <linux/virtio.h>
13 #include <linux/virtio_fs.h>
14 #include <linux/delay.h>
15 #include <linux/fs_context.h>
16 #include <linux/fs_parser.h>
17 #include <linux/highmem.h>
18 #include <linux/uio.h>
19 #include "fuse_i.h"
20 
21 /* Used to help calculate the FUSE connection's max_pages limit for a request's
22  * size. Parts of the struct fuse_req are sliced into scattergather lists in
23  * addition to the pages used, so this can help account for that overhead.
24  */
25 #define FUSE_HEADER_OVERHEAD    4
26 
27 /* List of virtio-fs device instances and a lock for the list. Also provides
28  * mutual exclusion in device removal and mounting path
29  */
30 static DEFINE_MUTEX(virtio_fs_mutex);
31 static LIST_HEAD(virtio_fs_instances);
32 
33 enum {
34 	VQ_HIPRIO,
35 	VQ_REQUEST
36 };
37 
38 #define VQ_NAME_LEN	24
39 
40 /* Per-virtqueue state */
41 struct virtio_fs_vq {
42 	spinlock_t lock;
43 	struct virtqueue *vq;     /* protected by ->lock */
44 	struct work_struct done_work;
45 	struct list_head queued_reqs;
46 	struct list_head end_reqs;	/* End these requests */
47 	struct delayed_work dispatch_work;
48 	struct fuse_dev *fud;
49 	bool connected;
50 	long in_flight;
51 	struct completion in_flight_zero; /* No inflight requests */
52 	char name[VQ_NAME_LEN];
53 } ____cacheline_aligned_in_smp;
54 
55 /* A virtio-fs device instance */
56 struct virtio_fs {
57 	struct kref refcount;
58 	struct list_head list;    /* on virtio_fs_instances */
59 	char *tag;
60 	struct virtio_fs_vq *vqs;
61 	unsigned int nvqs;               /* number of virtqueues */
62 	unsigned int num_request_queues; /* number of request queues */
63 	struct dax_device *dax_dev;
64 
65 	/* DAX memory window where file contents are mapped */
66 	void *window_kaddr;
67 	phys_addr_t window_phys_addr;
68 	size_t window_len;
69 };
70 
71 struct virtio_fs_forget_req {
72 	struct fuse_in_header ih;
73 	struct fuse_forget_in arg;
74 };
75 
76 struct virtio_fs_forget {
77 	/* This request can be temporarily queued on virt queue */
78 	struct list_head list;
79 	struct virtio_fs_forget_req req;
80 };
81 
82 struct virtio_fs_req_work {
83 	struct fuse_req *req;
84 	struct virtio_fs_vq *fsvq;
85 	struct work_struct done_work;
86 };
87 
88 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
89 				 struct fuse_req *req, bool in_flight);
90 
91 enum {
92 	OPT_DAX,
93 };
94 
95 static const struct fs_parameter_spec virtio_fs_parameters[] = {
96 	fsparam_flag("dax", OPT_DAX),
97 	{}
98 };
99 
virtio_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)100 static int virtio_fs_parse_param(struct fs_context *fc,
101 				 struct fs_parameter *param)
102 {
103 	struct fs_parse_result result;
104 	struct fuse_fs_context *ctx = fc->fs_private;
105 	int opt;
106 
107 	opt = fs_parse(fc, virtio_fs_parameters, param, &result);
108 	if (opt < 0)
109 		return opt;
110 
111 	switch (opt) {
112 	case OPT_DAX:
113 		ctx->dax = 1;
114 		break;
115 	default:
116 		return -EINVAL;
117 	}
118 
119 	return 0;
120 }
121 
virtio_fs_free_fc(struct fs_context * fc)122 static void virtio_fs_free_fc(struct fs_context *fc)
123 {
124 	struct fuse_fs_context *ctx = fc->fs_private;
125 
126 	kfree(ctx);
127 }
128 
vq_to_fsvq(struct virtqueue * vq)129 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
130 {
131 	struct virtio_fs *fs = vq->vdev->priv;
132 
133 	return &fs->vqs[vq->index];
134 }
135 
vq_to_fpq(struct virtqueue * vq)136 static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
137 {
138 	return &vq_to_fsvq(vq)->fud->pq;
139 }
140 
141 /* Should be called with fsvq->lock held. */
inc_in_flight_req(struct virtio_fs_vq * fsvq)142 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
143 {
144 	fsvq->in_flight++;
145 }
146 
147 /* Should be called with fsvq->lock held. */
dec_in_flight_req(struct virtio_fs_vq * fsvq)148 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
149 {
150 	WARN_ON(fsvq->in_flight <= 0);
151 	fsvq->in_flight--;
152 	if (!fsvq->in_flight)
153 		complete(&fsvq->in_flight_zero);
154 }
155 
release_virtio_fs_obj(struct kref * ref)156 static void release_virtio_fs_obj(struct kref *ref)
157 {
158 	struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
159 
160 	kfree(vfs->vqs);
161 	kfree(vfs);
162 }
163 
164 /* Make sure virtiofs_mutex is held */
virtio_fs_put(struct virtio_fs * fs)165 static void virtio_fs_put(struct virtio_fs *fs)
166 {
167 	kref_put(&fs->refcount, release_virtio_fs_obj);
168 }
169 
virtio_fs_fiq_release(struct fuse_iqueue * fiq)170 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
171 {
172 	struct virtio_fs *vfs = fiq->priv;
173 
174 	mutex_lock(&virtio_fs_mutex);
175 	virtio_fs_put(vfs);
176 	mutex_unlock(&virtio_fs_mutex);
177 }
178 
virtio_fs_drain_queue(struct virtio_fs_vq * fsvq)179 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
180 {
181 	WARN_ON(fsvq->in_flight < 0);
182 
183 	/* Wait for in flight requests to finish.*/
184 	spin_lock(&fsvq->lock);
185 	if (fsvq->in_flight) {
186 		/* We are holding virtio_fs_mutex. There should not be any
187 		 * waiters waiting for completion.
188 		 */
189 		reinit_completion(&fsvq->in_flight_zero);
190 		spin_unlock(&fsvq->lock);
191 		wait_for_completion(&fsvq->in_flight_zero);
192 	} else {
193 		spin_unlock(&fsvq->lock);
194 	}
195 
196 	flush_work(&fsvq->done_work);
197 	flush_delayed_work(&fsvq->dispatch_work);
198 }
199 
virtio_fs_drain_all_queues_locked(struct virtio_fs * fs)200 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
201 {
202 	struct virtio_fs_vq *fsvq;
203 	int i;
204 
205 	for (i = 0; i < fs->nvqs; i++) {
206 		fsvq = &fs->vqs[i];
207 		virtio_fs_drain_queue(fsvq);
208 	}
209 }
210 
virtio_fs_drain_all_queues(struct virtio_fs * fs)211 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
212 {
213 	/* Provides mutual exclusion between ->remove and ->kill_sb
214 	 * paths. We don't want both of these draining queue at the
215 	 * same time. Current completion logic reinits completion
216 	 * and that means there should not be any other thread
217 	 * doing reinit or waiting for completion already.
218 	 */
219 	mutex_lock(&virtio_fs_mutex);
220 	virtio_fs_drain_all_queues_locked(fs);
221 	mutex_unlock(&virtio_fs_mutex);
222 }
223 
virtio_fs_start_all_queues(struct virtio_fs * fs)224 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
225 {
226 	struct virtio_fs_vq *fsvq;
227 	int i;
228 
229 	for (i = 0; i < fs->nvqs; i++) {
230 		fsvq = &fs->vqs[i];
231 		spin_lock(&fsvq->lock);
232 		fsvq->connected = true;
233 		spin_unlock(&fsvq->lock);
234 	}
235 }
236 
237 /* Add a new instance to the list or return -EEXIST if tag name exists*/
virtio_fs_add_instance(struct virtio_fs * fs)238 static int virtio_fs_add_instance(struct virtio_fs *fs)
239 {
240 	struct virtio_fs *fs2;
241 	bool duplicate = false;
242 
243 	mutex_lock(&virtio_fs_mutex);
244 
245 	list_for_each_entry(fs2, &virtio_fs_instances, list) {
246 		if (strcmp(fs->tag, fs2->tag) == 0)
247 			duplicate = true;
248 	}
249 
250 	if (!duplicate)
251 		list_add_tail(&fs->list, &virtio_fs_instances);
252 
253 	mutex_unlock(&virtio_fs_mutex);
254 
255 	if (duplicate)
256 		return -EEXIST;
257 	return 0;
258 }
259 
260 /* Return the virtio_fs with a given tag, or NULL */
virtio_fs_find_instance(const char * tag)261 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
262 {
263 	struct virtio_fs *fs;
264 
265 	mutex_lock(&virtio_fs_mutex);
266 
267 	list_for_each_entry(fs, &virtio_fs_instances, list) {
268 		if (strcmp(fs->tag, tag) == 0) {
269 			kref_get(&fs->refcount);
270 			goto found;
271 		}
272 	}
273 
274 	fs = NULL; /* not found */
275 
276 found:
277 	mutex_unlock(&virtio_fs_mutex);
278 
279 	return fs;
280 }
281 
virtio_fs_free_devs(struct virtio_fs * fs)282 static void virtio_fs_free_devs(struct virtio_fs *fs)
283 {
284 	unsigned int i;
285 
286 	for (i = 0; i < fs->nvqs; i++) {
287 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
288 
289 		if (!fsvq->fud)
290 			continue;
291 
292 		fuse_dev_free(fsvq->fud);
293 		fsvq->fud = NULL;
294 	}
295 }
296 
297 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
virtio_fs_read_tag(struct virtio_device * vdev,struct virtio_fs * fs)298 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
299 {
300 	char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
301 	char *end;
302 	size_t len;
303 
304 	virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
305 			   &tag_buf, sizeof(tag_buf));
306 	end = memchr(tag_buf, '\0', sizeof(tag_buf));
307 	if (end == tag_buf)
308 		return -EINVAL; /* empty tag */
309 	if (!end)
310 		end = &tag_buf[sizeof(tag_buf)];
311 
312 	len = end - tag_buf;
313 	fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
314 	if (!fs->tag)
315 		return -ENOMEM;
316 	memcpy(fs->tag, tag_buf, len);
317 	fs->tag[len] = '\0';
318 	return 0;
319 }
320 
321 /* Work function for hiprio completion */
virtio_fs_hiprio_done_work(struct work_struct * work)322 static void virtio_fs_hiprio_done_work(struct work_struct *work)
323 {
324 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
325 						 done_work);
326 	struct virtqueue *vq = fsvq->vq;
327 
328 	/* Free completed FUSE_FORGET requests */
329 	spin_lock(&fsvq->lock);
330 	do {
331 		unsigned int len;
332 		void *req;
333 
334 		virtqueue_disable_cb(vq);
335 
336 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
337 			kfree(req);
338 			dec_in_flight_req(fsvq);
339 		}
340 	} while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
341 	spin_unlock(&fsvq->lock);
342 }
343 
virtio_fs_request_dispatch_work(struct work_struct * work)344 static void virtio_fs_request_dispatch_work(struct work_struct *work)
345 {
346 	struct fuse_req *req;
347 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
348 						 dispatch_work.work);
349 	int ret;
350 
351 	pr_debug("virtio-fs: worker %s called.\n", __func__);
352 	while (1) {
353 		spin_lock(&fsvq->lock);
354 		req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
355 					       list);
356 		if (!req) {
357 			spin_unlock(&fsvq->lock);
358 			break;
359 		}
360 
361 		list_del_init(&req->list);
362 		spin_unlock(&fsvq->lock);
363 		fuse_request_end(req);
364 	}
365 
366 	/* Dispatch pending requests */
367 	while (1) {
368 		spin_lock(&fsvq->lock);
369 		req = list_first_entry_or_null(&fsvq->queued_reqs,
370 					       struct fuse_req, list);
371 		if (!req) {
372 			spin_unlock(&fsvq->lock);
373 			return;
374 		}
375 		list_del_init(&req->list);
376 		spin_unlock(&fsvq->lock);
377 
378 		ret = virtio_fs_enqueue_req(fsvq, req, true);
379 		if (ret < 0) {
380 			if (ret == -ENOMEM || ret == -ENOSPC) {
381 				spin_lock(&fsvq->lock);
382 				list_add_tail(&req->list, &fsvq->queued_reqs);
383 				schedule_delayed_work(&fsvq->dispatch_work,
384 						      msecs_to_jiffies(1));
385 				spin_unlock(&fsvq->lock);
386 				return;
387 			}
388 			req->out.h.error = ret;
389 			spin_lock(&fsvq->lock);
390 			dec_in_flight_req(fsvq);
391 			spin_unlock(&fsvq->lock);
392 			pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
393 			       ret);
394 			fuse_request_end(req);
395 		}
396 	}
397 }
398 
399 /*
400  * Returns 1 if queue is full and sender should wait a bit before sending
401  * next request, 0 otherwise.
402  */
send_forget_request(struct virtio_fs_vq * fsvq,struct virtio_fs_forget * forget,bool in_flight)403 static int send_forget_request(struct virtio_fs_vq *fsvq,
404 			       struct virtio_fs_forget *forget,
405 			       bool in_flight)
406 {
407 	struct scatterlist sg;
408 	struct virtqueue *vq;
409 	int ret = 0;
410 	bool notify;
411 	struct virtio_fs_forget_req *req = &forget->req;
412 
413 	spin_lock(&fsvq->lock);
414 	if (!fsvq->connected) {
415 		if (in_flight)
416 			dec_in_flight_req(fsvq);
417 		kfree(forget);
418 		goto out;
419 	}
420 
421 	sg_init_one(&sg, req, sizeof(*req));
422 	vq = fsvq->vq;
423 	dev_dbg(&vq->vdev->dev, "%s\n", __func__);
424 
425 	ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
426 	if (ret < 0) {
427 		if (ret == -ENOMEM || ret == -ENOSPC) {
428 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
429 				 ret);
430 			list_add_tail(&forget->list, &fsvq->queued_reqs);
431 			schedule_delayed_work(&fsvq->dispatch_work,
432 					      msecs_to_jiffies(1));
433 			if (!in_flight)
434 				inc_in_flight_req(fsvq);
435 			/* Queue is full */
436 			ret = 1;
437 		} else {
438 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
439 				 ret);
440 			kfree(forget);
441 			if (in_flight)
442 				dec_in_flight_req(fsvq);
443 		}
444 		goto out;
445 	}
446 
447 	if (!in_flight)
448 		inc_in_flight_req(fsvq);
449 	notify = virtqueue_kick_prepare(vq);
450 	spin_unlock(&fsvq->lock);
451 
452 	if (notify)
453 		virtqueue_notify(vq);
454 	return ret;
455 out:
456 	spin_unlock(&fsvq->lock);
457 	return ret;
458 }
459 
virtio_fs_hiprio_dispatch_work(struct work_struct * work)460 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
461 {
462 	struct virtio_fs_forget *forget;
463 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
464 						 dispatch_work.work);
465 	pr_debug("virtio-fs: worker %s called.\n", __func__);
466 	while (1) {
467 		spin_lock(&fsvq->lock);
468 		forget = list_first_entry_or_null(&fsvq->queued_reqs,
469 					struct virtio_fs_forget, list);
470 		if (!forget) {
471 			spin_unlock(&fsvq->lock);
472 			return;
473 		}
474 
475 		list_del(&forget->list);
476 		spin_unlock(&fsvq->lock);
477 		if (send_forget_request(fsvq, forget, true))
478 			return;
479 	}
480 }
481 
482 /* Allocate and copy args into req->argbuf */
copy_args_to_argbuf(struct fuse_req * req)483 static int copy_args_to_argbuf(struct fuse_req *req)
484 {
485 	struct fuse_args *args = req->args;
486 	unsigned int offset = 0;
487 	unsigned int num_in;
488 	unsigned int num_out;
489 	unsigned int len;
490 	unsigned int i;
491 
492 	num_in = args->in_numargs - args->in_pages;
493 	num_out = args->out_numargs - args->out_pages;
494 	len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
495 	      fuse_len_args(num_out, args->out_args);
496 
497 	req->argbuf = kmalloc(len, GFP_ATOMIC);
498 	if (!req->argbuf)
499 		return -ENOMEM;
500 
501 	for (i = 0; i < num_in; i++) {
502 		memcpy(req->argbuf + offset,
503 		       args->in_args[i].value,
504 		       args->in_args[i].size);
505 		offset += args->in_args[i].size;
506 	}
507 
508 	return 0;
509 }
510 
511 /* Copy args out of and free req->argbuf */
copy_args_from_argbuf(struct fuse_args * args,struct fuse_req * req)512 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
513 {
514 	unsigned int remaining;
515 	unsigned int offset;
516 	unsigned int num_in;
517 	unsigned int num_out;
518 	unsigned int i;
519 
520 	remaining = req->out.h.len - sizeof(req->out.h);
521 	num_in = args->in_numargs - args->in_pages;
522 	num_out = args->out_numargs - args->out_pages;
523 	offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
524 
525 	for (i = 0; i < num_out; i++) {
526 		unsigned int argsize = args->out_args[i].size;
527 
528 		if (args->out_argvar &&
529 		    i == args->out_numargs - 1 &&
530 		    argsize > remaining) {
531 			argsize = remaining;
532 		}
533 
534 		memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
535 		offset += argsize;
536 
537 		if (i != args->out_numargs - 1)
538 			remaining -= argsize;
539 	}
540 
541 	/* Store the actual size of the variable-length arg */
542 	if (args->out_argvar)
543 		args->out_args[args->out_numargs - 1].size = remaining;
544 
545 	kfree(req->argbuf);
546 	req->argbuf = NULL;
547 }
548 
549 /* Work function for request completion */
virtio_fs_request_complete(struct fuse_req * req,struct virtio_fs_vq * fsvq)550 static void virtio_fs_request_complete(struct fuse_req *req,
551 				       struct virtio_fs_vq *fsvq)
552 {
553 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
554 	struct fuse_args *args;
555 	struct fuse_args_pages *ap;
556 	unsigned int len, i, thislen;
557 	struct page *page;
558 
559 	/*
560 	 * TODO verify that server properly follows FUSE protocol
561 	 * (oh.uniq, oh.len)
562 	 */
563 	args = req->args;
564 	copy_args_from_argbuf(args, req);
565 
566 	if (args->out_pages && args->page_zeroing) {
567 		len = args->out_args[args->out_numargs - 1].size;
568 		ap = container_of(args, typeof(*ap), args);
569 		for (i = 0; i < ap->num_pages; i++) {
570 			thislen = ap->descs[i].length;
571 			if (len < thislen) {
572 				WARN_ON(ap->descs[i].offset);
573 				page = ap->pages[i];
574 				zero_user_segment(page, len, thislen);
575 				len = 0;
576 			} else {
577 				len -= thislen;
578 			}
579 		}
580 	}
581 
582 	spin_lock(&fpq->lock);
583 	clear_bit(FR_SENT, &req->flags);
584 	spin_unlock(&fpq->lock);
585 
586 	fuse_request_end(req);
587 	spin_lock(&fsvq->lock);
588 	dec_in_flight_req(fsvq);
589 	spin_unlock(&fsvq->lock);
590 }
591 
virtio_fs_complete_req_work(struct work_struct * work)592 static void virtio_fs_complete_req_work(struct work_struct *work)
593 {
594 	struct virtio_fs_req_work *w =
595 		container_of(work, typeof(*w), done_work);
596 
597 	virtio_fs_request_complete(w->req, w->fsvq);
598 	kfree(w);
599 }
600 
virtio_fs_requests_done_work(struct work_struct * work)601 static void virtio_fs_requests_done_work(struct work_struct *work)
602 {
603 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
604 						 done_work);
605 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
606 	struct virtqueue *vq = fsvq->vq;
607 	struct fuse_req *req;
608 	struct fuse_req *next;
609 	unsigned int len;
610 	LIST_HEAD(reqs);
611 
612 	/* Collect completed requests off the virtqueue */
613 	spin_lock(&fsvq->lock);
614 	do {
615 		virtqueue_disable_cb(vq);
616 
617 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
618 			spin_lock(&fpq->lock);
619 			list_move_tail(&req->list, &reqs);
620 			spin_unlock(&fpq->lock);
621 		}
622 	} while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
623 	spin_unlock(&fsvq->lock);
624 
625 	/* End requests */
626 	list_for_each_entry_safe(req, next, &reqs, list) {
627 		list_del_init(&req->list);
628 
629 		/* blocking async request completes in a worker context */
630 		if (req->args->may_block) {
631 			struct virtio_fs_req_work *w;
632 
633 			w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
634 			INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
635 			w->fsvq = fsvq;
636 			w->req = req;
637 			schedule_work(&w->done_work);
638 		} else {
639 			virtio_fs_request_complete(req, fsvq);
640 		}
641 	}
642 }
643 
644 /* Virtqueue interrupt handler */
virtio_fs_vq_done(struct virtqueue * vq)645 static void virtio_fs_vq_done(struct virtqueue *vq)
646 {
647 	struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
648 
649 	dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
650 
651 	schedule_work(&fsvq->done_work);
652 }
653 
virtio_fs_init_vq(struct virtio_fs_vq * fsvq,char * name,int vq_type)654 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
655 			      int vq_type)
656 {
657 	strncpy(fsvq->name, name, VQ_NAME_LEN);
658 	spin_lock_init(&fsvq->lock);
659 	INIT_LIST_HEAD(&fsvq->queued_reqs);
660 	INIT_LIST_HEAD(&fsvq->end_reqs);
661 	init_completion(&fsvq->in_flight_zero);
662 
663 	if (vq_type == VQ_REQUEST) {
664 		INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
665 		INIT_DELAYED_WORK(&fsvq->dispatch_work,
666 				  virtio_fs_request_dispatch_work);
667 	} else {
668 		INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
669 		INIT_DELAYED_WORK(&fsvq->dispatch_work,
670 				  virtio_fs_hiprio_dispatch_work);
671 	}
672 }
673 
674 /* Initialize virtqueues */
virtio_fs_setup_vqs(struct virtio_device * vdev,struct virtio_fs * fs)675 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
676 			       struct virtio_fs *fs)
677 {
678 	struct virtqueue **vqs;
679 	vq_callback_t **callbacks;
680 	const char **names;
681 	unsigned int i;
682 	int ret = 0;
683 
684 	virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
685 			&fs->num_request_queues);
686 	if (fs->num_request_queues == 0)
687 		return -EINVAL;
688 
689 	fs->nvqs = VQ_REQUEST + fs->num_request_queues;
690 	fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
691 	if (!fs->vqs)
692 		return -ENOMEM;
693 
694 	vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
695 	callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
696 					GFP_KERNEL);
697 	names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
698 	if (!vqs || !callbacks || !names) {
699 		ret = -ENOMEM;
700 		goto out;
701 	}
702 
703 	/* Initialize the hiprio/forget request virtqueue */
704 	callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
705 	virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
706 	names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
707 
708 	/* Initialize the requests virtqueues */
709 	for (i = VQ_REQUEST; i < fs->nvqs; i++) {
710 		char vq_name[VQ_NAME_LEN];
711 
712 		snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
713 		virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
714 		callbacks[i] = virtio_fs_vq_done;
715 		names[i] = fs->vqs[i].name;
716 	}
717 
718 	ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
719 	if (ret < 0)
720 		goto out;
721 
722 	for (i = 0; i < fs->nvqs; i++)
723 		fs->vqs[i].vq = vqs[i];
724 
725 	virtio_fs_start_all_queues(fs);
726 out:
727 	kfree(names);
728 	kfree(callbacks);
729 	kfree(vqs);
730 	if (ret)
731 		kfree(fs->vqs);
732 	return ret;
733 }
734 
735 /* Free virtqueues (device must already be reset) */
virtio_fs_cleanup_vqs(struct virtio_device * vdev,struct virtio_fs * fs)736 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
737 				  struct virtio_fs *fs)
738 {
739 	vdev->config->del_vqs(vdev);
740 }
741 
742 /* Map a window offset to a page frame number.  The window offset will have
743  * been produced by .iomap_begin(), which maps a file offset to a window
744  * offset.
745  */
virtio_fs_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)746 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
747 				    long nr_pages, void **kaddr, pfn_t *pfn)
748 {
749 	struct virtio_fs *fs = dax_get_private(dax_dev);
750 	phys_addr_t offset = PFN_PHYS(pgoff);
751 	size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
752 
753 	if (kaddr)
754 		*kaddr = fs->window_kaddr + offset;
755 	if (pfn)
756 		*pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
757 					PFN_DEV | PFN_MAP);
758 	return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
759 }
760 
virtio_fs_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)761 static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
762 				       pgoff_t pgoff, void *addr,
763 				       size_t bytes, struct iov_iter *i)
764 {
765 	return copy_from_iter(addr, bytes, i);
766 }
767 
virtio_fs_copy_to_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)768 static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
769 				       pgoff_t pgoff, void *addr,
770 				       size_t bytes, struct iov_iter *i)
771 {
772 	return copy_to_iter(addr, bytes, i);
773 }
774 
virtio_fs_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)775 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
776 				     pgoff_t pgoff, size_t nr_pages)
777 {
778 	long rc;
779 	void *kaddr;
780 
781 	rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
782 	if (rc < 0)
783 		return rc;
784 	memset(kaddr, 0, nr_pages << PAGE_SHIFT);
785 	dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
786 	return 0;
787 }
788 
789 static const struct dax_operations virtio_fs_dax_ops = {
790 	.direct_access = virtio_fs_direct_access,
791 	.copy_from_iter = virtio_fs_copy_from_iter,
792 	.copy_to_iter = virtio_fs_copy_to_iter,
793 	.zero_page_range = virtio_fs_zero_page_range,
794 };
795 
virtio_fs_cleanup_dax(void * data)796 static void virtio_fs_cleanup_dax(void *data)
797 {
798 	struct dax_device *dax_dev = data;
799 
800 	kill_dax(dax_dev);
801 	put_dax(dax_dev);
802 }
803 
virtio_fs_setup_dax(struct virtio_device * vdev,struct virtio_fs * fs)804 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
805 {
806 	struct virtio_shm_region cache_reg;
807 	struct dev_pagemap *pgmap;
808 	bool have_cache;
809 
810 	if (!IS_ENABLED(CONFIG_FUSE_DAX))
811 		return 0;
812 
813 	/* Get cache region */
814 	have_cache = virtio_get_shm_region(vdev, &cache_reg,
815 					   (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
816 	if (!have_cache) {
817 		dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
818 		return 0;
819 	}
820 
821 	if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
822 				     dev_name(&vdev->dev))) {
823 		dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
824 			 cache_reg.addr, cache_reg.len);
825 		return -EBUSY;
826 	}
827 
828 	dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
829 		   cache_reg.addr);
830 
831 	pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
832 	if (!pgmap)
833 		return -ENOMEM;
834 
835 	pgmap->type = MEMORY_DEVICE_FS_DAX;
836 
837 	/* Ideally we would directly use the PCI BAR resource but
838 	 * devm_memremap_pages() wants its own copy in pgmap.  So
839 	 * initialize a struct resource from scratch (only the start
840 	 * and end fields will be used).
841 	 */
842 	pgmap->range = (struct range) {
843 		.start = (phys_addr_t) cache_reg.addr,
844 		.end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
845 	};
846 	pgmap->nr_range = 1;
847 
848 	fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
849 	if (IS_ERR(fs->window_kaddr))
850 		return PTR_ERR(fs->window_kaddr);
851 
852 	fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
853 	fs->window_len = (phys_addr_t) cache_reg.len;
854 
855 	dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
856 		__func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
857 
858 	fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
859 	if (IS_ERR(fs->dax_dev))
860 		return PTR_ERR(fs->dax_dev);
861 
862 	return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
863 					fs->dax_dev);
864 }
865 
virtio_fs_probe(struct virtio_device * vdev)866 static int virtio_fs_probe(struct virtio_device *vdev)
867 {
868 	struct virtio_fs *fs;
869 	int ret;
870 
871 	fs = kzalloc(sizeof(*fs), GFP_KERNEL);
872 	if (!fs)
873 		return -ENOMEM;
874 	kref_init(&fs->refcount);
875 	vdev->priv = fs;
876 
877 	ret = virtio_fs_read_tag(vdev, fs);
878 	if (ret < 0)
879 		goto out;
880 
881 	ret = virtio_fs_setup_vqs(vdev, fs);
882 	if (ret < 0)
883 		goto out;
884 
885 	/* TODO vq affinity */
886 
887 	ret = virtio_fs_setup_dax(vdev, fs);
888 	if (ret < 0)
889 		goto out_vqs;
890 
891 	/* Bring the device online in case the filesystem is mounted and
892 	 * requests need to be sent before we return.
893 	 */
894 	virtio_device_ready(vdev);
895 
896 	ret = virtio_fs_add_instance(fs);
897 	if (ret < 0)
898 		goto out_vqs;
899 
900 	return 0;
901 
902 out_vqs:
903 	vdev->config->reset(vdev);
904 	virtio_fs_cleanup_vqs(vdev, fs);
905 	kfree(fs->vqs);
906 
907 out:
908 	vdev->priv = NULL;
909 	kfree(fs);
910 	return ret;
911 }
912 
virtio_fs_stop_all_queues(struct virtio_fs * fs)913 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
914 {
915 	struct virtio_fs_vq *fsvq;
916 	int i;
917 
918 	for (i = 0; i < fs->nvqs; i++) {
919 		fsvq = &fs->vqs[i];
920 		spin_lock(&fsvq->lock);
921 		fsvq->connected = false;
922 		spin_unlock(&fsvq->lock);
923 	}
924 }
925 
virtio_fs_remove(struct virtio_device * vdev)926 static void virtio_fs_remove(struct virtio_device *vdev)
927 {
928 	struct virtio_fs *fs = vdev->priv;
929 
930 	mutex_lock(&virtio_fs_mutex);
931 	/* This device is going away. No one should get new reference */
932 	list_del_init(&fs->list);
933 	virtio_fs_stop_all_queues(fs);
934 	virtio_fs_drain_all_queues_locked(fs);
935 	vdev->config->reset(vdev);
936 	virtio_fs_cleanup_vqs(vdev, fs);
937 
938 	vdev->priv = NULL;
939 	/* Put device reference on virtio_fs object */
940 	virtio_fs_put(fs);
941 	mutex_unlock(&virtio_fs_mutex);
942 }
943 
944 #ifdef CONFIG_PM_SLEEP
virtio_fs_freeze(struct virtio_device * vdev)945 static int virtio_fs_freeze(struct virtio_device *vdev)
946 {
947 	/* TODO need to save state here */
948 	pr_warn("virtio-fs: suspend/resume not yet supported\n");
949 	return -EOPNOTSUPP;
950 }
951 
virtio_fs_restore(struct virtio_device * vdev)952 static int virtio_fs_restore(struct virtio_device *vdev)
953 {
954 	 /* TODO need to restore state here */
955 	return 0;
956 }
957 #endif /* CONFIG_PM_SLEEP */
958 
959 static const struct virtio_device_id id_table[] = {
960 	{ VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
961 	{},
962 };
963 
964 static const unsigned int feature_table[] = {};
965 
966 static struct virtio_driver virtio_fs_driver = {
967 	.driver.name		= KBUILD_MODNAME,
968 	.driver.owner		= THIS_MODULE,
969 	.id_table		= id_table,
970 	.feature_table		= feature_table,
971 	.feature_table_size	= ARRAY_SIZE(feature_table),
972 	.probe			= virtio_fs_probe,
973 	.remove			= virtio_fs_remove,
974 #ifdef CONFIG_PM_SLEEP
975 	.freeze			= virtio_fs_freeze,
976 	.restore		= virtio_fs_restore,
977 #endif
978 };
979 
virtio_fs_wake_forget_and_unlock(struct fuse_iqueue * fiq,bool sync)980 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq, bool sync)
981 __releases(fiq->lock)
982 {
983 	struct fuse_forget_link *link;
984 	struct virtio_fs_forget *forget;
985 	struct virtio_fs_forget_req *req;
986 	struct virtio_fs *fs;
987 	struct virtio_fs_vq *fsvq;
988 	u64 unique;
989 
990 	link = fuse_dequeue_forget(fiq, 1, NULL);
991 	unique = fuse_get_unique(fiq);
992 
993 	fs = fiq->priv;
994 	fsvq = &fs->vqs[VQ_HIPRIO];
995 	spin_unlock(&fiq->lock);
996 
997 	/* Allocate a buffer for the request */
998 	forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
999 	req = &forget->req;
1000 
1001 	req->ih = (struct fuse_in_header){
1002 		.opcode = FUSE_FORGET,
1003 		.nodeid = link->forget_one.nodeid,
1004 		.unique = unique,
1005 		.len = sizeof(*req),
1006 	};
1007 	req->arg = (struct fuse_forget_in){
1008 		.nlookup = link->forget_one.nlookup,
1009 	};
1010 
1011 	send_forget_request(fsvq, forget, false);
1012 	kfree(link);
1013 }
1014 
virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue * fiq,bool sync)1015 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq,
1016 						bool sync)
1017 __releases(fiq->lock)
1018 {
1019 	/*
1020 	 * TODO interrupts.
1021 	 *
1022 	 * Normal fs operations on a local filesystems aren't interruptible.
1023 	 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1024 	 * with shared lock between host and guest.
1025 	 */
1026 	spin_unlock(&fiq->lock);
1027 }
1028 
1029 /* Count number of scatter-gather elements required */
sg_count_fuse_pages(struct fuse_page_desc * page_descs,unsigned int num_pages,unsigned int total_len)1030 static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1031 				       unsigned int num_pages,
1032 				       unsigned int total_len)
1033 {
1034 	unsigned int i;
1035 	unsigned int this_len;
1036 
1037 	for (i = 0; i < num_pages && total_len; i++) {
1038 		this_len =  min(page_descs[i].length, total_len);
1039 		total_len -= this_len;
1040 	}
1041 
1042 	return i;
1043 }
1044 
1045 /* Return the number of scatter-gather list elements required */
sg_count_fuse_req(struct fuse_req * req)1046 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1047 {
1048 	struct fuse_args *args = req->args;
1049 	struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1050 	unsigned int size, total_sgs = 1 /* fuse_in_header */;
1051 
1052 	if (args->in_numargs - args->in_pages)
1053 		total_sgs += 1;
1054 
1055 	if (args->in_pages) {
1056 		size = args->in_args[args->in_numargs - 1].size;
1057 		total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1058 						 size);
1059 	}
1060 
1061 	if (!test_bit(FR_ISREPLY, &req->flags))
1062 		return total_sgs;
1063 
1064 	total_sgs += 1 /* fuse_out_header */;
1065 
1066 	if (args->out_numargs - args->out_pages)
1067 		total_sgs += 1;
1068 
1069 	if (args->out_pages) {
1070 		size = args->out_args[args->out_numargs - 1].size;
1071 		total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1072 						 size);
1073 	}
1074 
1075 	return total_sgs;
1076 }
1077 
1078 /* Add pages to scatter-gather list and return number of elements used */
sg_init_fuse_pages(struct scatterlist * sg,struct page ** pages,struct fuse_page_desc * page_descs,unsigned int num_pages,unsigned int total_len)1079 static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1080 				       struct page **pages,
1081 				       struct fuse_page_desc *page_descs,
1082 				       unsigned int num_pages,
1083 				       unsigned int total_len)
1084 {
1085 	unsigned int i;
1086 	unsigned int this_len;
1087 
1088 	for (i = 0; i < num_pages && total_len; i++) {
1089 		sg_init_table(&sg[i], 1);
1090 		this_len =  min(page_descs[i].length, total_len);
1091 		sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1092 		total_len -= this_len;
1093 	}
1094 
1095 	return i;
1096 }
1097 
1098 /* Add args to scatter-gather list and return number of elements used */
sg_init_fuse_args(struct scatterlist * sg,struct fuse_req * req,struct fuse_arg * args,unsigned int numargs,bool argpages,void * argbuf,unsigned int * len_used)1099 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1100 				      struct fuse_req *req,
1101 				      struct fuse_arg *args,
1102 				      unsigned int numargs,
1103 				      bool argpages,
1104 				      void *argbuf,
1105 				      unsigned int *len_used)
1106 {
1107 	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1108 	unsigned int total_sgs = 0;
1109 	unsigned int len;
1110 
1111 	len = fuse_len_args(numargs - argpages, args);
1112 	if (len)
1113 		sg_init_one(&sg[total_sgs++], argbuf, len);
1114 
1115 	if (argpages)
1116 		total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1117 						ap->pages, ap->descs,
1118 						ap->num_pages,
1119 						args[numargs - 1].size);
1120 
1121 	if (len_used)
1122 		*len_used = len;
1123 
1124 	return total_sgs;
1125 }
1126 
1127 /* Add a request to a virtqueue and kick the device */
virtio_fs_enqueue_req(struct virtio_fs_vq * fsvq,struct fuse_req * req,bool in_flight)1128 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1129 				 struct fuse_req *req, bool in_flight)
1130 {
1131 	/* requests need at least 4 elements */
1132 	struct scatterlist *stack_sgs[6];
1133 	struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1134 	struct scatterlist **sgs = stack_sgs;
1135 	struct scatterlist *sg = stack_sg;
1136 	struct virtqueue *vq;
1137 	struct fuse_args *args = req->args;
1138 	unsigned int argbuf_used = 0;
1139 	unsigned int out_sgs = 0;
1140 	unsigned int in_sgs = 0;
1141 	unsigned int total_sgs;
1142 	unsigned int i;
1143 	int ret;
1144 	bool notify;
1145 	struct fuse_pqueue *fpq;
1146 
1147 	/* Does the sglist fit on the stack? */
1148 	total_sgs = sg_count_fuse_req(req);
1149 	if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1150 		sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1151 		sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1152 		if (!sgs || !sg) {
1153 			ret = -ENOMEM;
1154 			goto out;
1155 		}
1156 	}
1157 
1158 	/* Use a bounce buffer since stack args cannot be mapped */
1159 	ret = copy_args_to_argbuf(req);
1160 	if (ret < 0)
1161 		goto out;
1162 
1163 	/* Request elements */
1164 	sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1165 	out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1166 				     (struct fuse_arg *)args->in_args,
1167 				     args->in_numargs, args->in_pages,
1168 				     req->argbuf, &argbuf_used);
1169 
1170 	/* Reply elements */
1171 	if (test_bit(FR_ISREPLY, &req->flags)) {
1172 		sg_init_one(&sg[out_sgs + in_sgs++],
1173 			    &req->out.h, sizeof(req->out.h));
1174 		in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1175 					    args->out_args, args->out_numargs,
1176 					    args->out_pages,
1177 					    req->argbuf + argbuf_used, NULL);
1178 	}
1179 
1180 	WARN_ON(out_sgs + in_sgs != total_sgs);
1181 
1182 	for (i = 0; i < total_sgs; i++)
1183 		sgs[i] = &sg[i];
1184 
1185 	spin_lock(&fsvq->lock);
1186 
1187 	if (!fsvq->connected) {
1188 		spin_unlock(&fsvq->lock);
1189 		ret = -ENOTCONN;
1190 		goto out;
1191 	}
1192 
1193 	vq = fsvq->vq;
1194 	ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1195 	if (ret < 0) {
1196 		spin_unlock(&fsvq->lock);
1197 		goto out;
1198 	}
1199 
1200 	/* Request successfully sent. */
1201 	fpq = &fsvq->fud->pq;
1202 	spin_lock(&fpq->lock);
1203 	list_add_tail(&req->list, fpq->processing);
1204 	spin_unlock(&fpq->lock);
1205 	set_bit(FR_SENT, &req->flags);
1206 	/* matches barrier in request_wait_answer() */
1207 	smp_mb__after_atomic();
1208 
1209 	if (!in_flight)
1210 		inc_in_flight_req(fsvq);
1211 	notify = virtqueue_kick_prepare(vq);
1212 
1213 	spin_unlock(&fsvq->lock);
1214 
1215 	if (notify)
1216 		virtqueue_notify(vq);
1217 
1218 out:
1219 	if (ret < 0 && req->argbuf) {
1220 		kfree(req->argbuf);
1221 		req->argbuf = NULL;
1222 	}
1223 	if (sgs != stack_sgs) {
1224 		kfree(sgs);
1225 		kfree(sg);
1226 	}
1227 
1228 	return ret;
1229 }
1230 
virtio_fs_wake_pending_and_unlock(struct fuse_iqueue * fiq,bool sync)1231 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq,
1232 					      bool sync)
1233 __releases(fiq->lock)
1234 {
1235 	unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1236 	struct virtio_fs *fs;
1237 	struct fuse_req *req;
1238 	struct virtio_fs_vq *fsvq;
1239 	int ret;
1240 
1241 	WARN_ON(list_empty(&fiq->pending));
1242 	req = list_last_entry(&fiq->pending, struct fuse_req, list);
1243 	clear_bit(FR_PENDING, &req->flags);
1244 	list_del_init(&req->list);
1245 	WARN_ON(!list_empty(&fiq->pending));
1246 	spin_unlock(&fiq->lock);
1247 
1248 	fs = fiq->priv;
1249 
1250 	pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1251 		  __func__, req->in.h.opcode, req->in.h.unique,
1252 		 req->in.h.nodeid, req->in.h.len,
1253 		 fuse_len_args(req->args->out_numargs, req->args->out_args));
1254 
1255 	fsvq = &fs->vqs[queue_id];
1256 	ret = virtio_fs_enqueue_req(fsvq, req, false);
1257 	if (ret < 0) {
1258 		if (ret == -ENOMEM || ret == -ENOSPC) {
1259 			/*
1260 			 * Virtqueue full. Retry submission from worker
1261 			 * context as we might be holding fc->bg_lock.
1262 			 */
1263 			spin_lock(&fsvq->lock);
1264 			list_add_tail(&req->list, &fsvq->queued_reqs);
1265 			inc_in_flight_req(fsvq);
1266 			schedule_delayed_work(&fsvq->dispatch_work,
1267 						msecs_to_jiffies(1));
1268 			spin_unlock(&fsvq->lock);
1269 			return;
1270 		}
1271 		req->out.h.error = ret;
1272 		pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1273 
1274 		/* Can't end request in submission context. Use a worker */
1275 		spin_lock(&fsvq->lock);
1276 		list_add_tail(&req->list, &fsvq->end_reqs);
1277 		schedule_delayed_work(&fsvq->dispatch_work, 0);
1278 		spin_unlock(&fsvq->lock);
1279 		return;
1280 	}
1281 }
1282 
1283 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1284 	.wake_forget_and_unlock		= virtio_fs_wake_forget_and_unlock,
1285 	.wake_interrupt_and_unlock	= virtio_fs_wake_interrupt_and_unlock,
1286 	.wake_pending_and_unlock	= virtio_fs_wake_pending_and_unlock,
1287 	.release			= virtio_fs_fiq_release,
1288 };
1289 
virtio_fs_ctx_set_defaults(struct fuse_fs_context * ctx)1290 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1291 {
1292 	ctx->rootmode = S_IFDIR;
1293 	ctx->default_permissions = 1;
1294 	ctx->allow_other = 1;
1295 	ctx->max_read = UINT_MAX;
1296 	ctx->blksize = 512;
1297 	ctx->destroy = true;
1298 	ctx->no_control = true;
1299 	ctx->no_force_umount = true;
1300 }
1301 
virtio_fs_fill_super(struct super_block * sb,struct fs_context * fsc)1302 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1303 {
1304 	struct fuse_mount *fm = get_fuse_mount_super(sb);
1305 	struct fuse_conn *fc = fm->fc;
1306 	struct virtio_fs *fs = fc->iq.priv;
1307 	struct fuse_fs_context *ctx = fsc->fs_private;
1308 	unsigned int i;
1309 	int err;
1310 
1311 	virtio_fs_ctx_set_defaults(ctx);
1312 	mutex_lock(&virtio_fs_mutex);
1313 
1314 	/* After holding mutex, make sure virtiofs device is still there.
1315 	 * Though we are holding a reference to it, drive ->remove might
1316 	 * still have cleaned up virtual queues. In that case bail out.
1317 	 */
1318 	err = -EINVAL;
1319 	if (list_empty(&fs->list)) {
1320 		pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1321 		goto err;
1322 	}
1323 
1324 	err = -ENOMEM;
1325 	/* Allocate fuse_dev for hiprio and notification queues */
1326 	for (i = 0; i < fs->nvqs; i++) {
1327 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
1328 
1329 		fsvq->fud = fuse_dev_alloc();
1330 		if (!fsvq->fud)
1331 			goto err_free_fuse_devs;
1332 	}
1333 
1334 	/* virtiofs allocates and installs its own fuse devices */
1335 	ctx->fudptr = NULL;
1336 	if (ctx->dax) {
1337 		if (!fs->dax_dev) {
1338 			err = -EINVAL;
1339 			pr_err("virtio-fs: dax can't be enabled as filesystem"
1340 			       " device does not support it.\n");
1341 			goto err_free_fuse_devs;
1342 		}
1343 		ctx->dax_dev = fs->dax_dev;
1344 	}
1345 	err = fuse_fill_super_common(sb, ctx);
1346 	if (err < 0)
1347 		goto err_free_fuse_devs;
1348 
1349 	for (i = 0; i < fs->nvqs; i++) {
1350 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
1351 
1352 		fuse_dev_install(fsvq->fud, fc);
1353 	}
1354 
1355 	/* Previous unmount will stop all queues. Start these again */
1356 	virtio_fs_start_all_queues(fs);
1357 	fuse_send_init(fm);
1358 	mutex_unlock(&virtio_fs_mutex);
1359 	return 0;
1360 
1361 err_free_fuse_devs:
1362 	virtio_fs_free_devs(fs);
1363 err:
1364 	mutex_unlock(&virtio_fs_mutex);
1365 	return err;
1366 }
1367 
virtio_fs_conn_destroy(struct fuse_mount * fm)1368 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1369 {
1370 	struct fuse_conn *fc = fm->fc;
1371 	struct virtio_fs *vfs = fc->iq.priv;
1372 	struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1373 
1374 	/* Stop dax worker. Soon evict_inodes() will be called which
1375 	 * will free all memory ranges belonging to all inodes.
1376 	 */
1377 	if (IS_ENABLED(CONFIG_FUSE_DAX))
1378 		fuse_dax_cancel_work(fc);
1379 
1380 	/* Stop forget queue. Soon destroy will be sent */
1381 	spin_lock(&fsvq->lock);
1382 	fsvq->connected = false;
1383 	spin_unlock(&fsvq->lock);
1384 	virtio_fs_drain_all_queues(vfs);
1385 
1386 	fuse_conn_destroy(fm);
1387 
1388 	/* fuse_conn_destroy() must have sent destroy. Stop all queues
1389 	 * and drain one more time and free fuse devices. Freeing fuse
1390 	 * devices will drop their reference on fuse_conn and that in
1391 	 * turn will drop its reference on virtio_fs object.
1392 	 */
1393 	virtio_fs_stop_all_queues(vfs);
1394 	virtio_fs_drain_all_queues(vfs);
1395 	virtio_fs_free_devs(vfs);
1396 }
1397 
virtio_kill_sb(struct super_block * sb)1398 static void virtio_kill_sb(struct super_block *sb)
1399 {
1400 	struct fuse_mount *fm = get_fuse_mount_super(sb);
1401 	bool last;
1402 
1403 	/* If mount failed, we can still be called without any fc */
1404 	if (sb->s_root) {
1405 		last = fuse_mount_remove(fm);
1406 		if (last)
1407 			virtio_fs_conn_destroy(fm);
1408 	}
1409 	kill_anon_super(sb);
1410 }
1411 
virtio_fs_test_super(struct super_block * sb,struct fs_context * fsc)1412 static int virtio_fs_test_super(struct super_block *sb,
1413 				struct fs_context *fsc)
1414 {
1415 	struct fuse_mount *fsc_fm = fsc->s_fs_info;
1416 	struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1417 
1418 	return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1419 }
1420 
virtio_fs_set_super(struct super_block * sb,struct fs_context * fsc)1421 static int virtio_fs_set_super(struct super_block *sb,
1422 			       struct fs_context *fsc)
1423 {
1424 	int err;
1425 
1426 	err = get_anon_bdev(&sb->s_dev);
1427 	if (!err)
1428 		fuse_mount_get(fsc->s_fs_info);
1429 
1430 	return err;
1431 }
1432 
virtio_fs_get_tree(struct fs_context * fsc)1433 static int virtio_fs_get_tree(struct fs_context *fsc)
1434 {
1435 	struct virtio_fs *fs;
1436 	struct super_block *sb;
1437 	struct fuse_conn *fc = NULL;
1438 	struct fuse_mount *fm;
1439 	unsigned int virtqueue_size;
1440 	int err = -EIO;
1441 
1442 	/* This gets a reference on virtio_fs object. This ptr gets installed
1443 	 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1444 	 * to drop the reference to this object.
1445 	 */
1446 	fs = virtio_fs_find_instance(fsc->source);
1447 	if (!fs) {
1448 		pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1449 		return -EINVAL;
1450 	}
1451 
1452 	virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1453 	if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1454 		goto out_err;
1455 
1456 	err = -ENOMEM;
1457 	fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1458 	if (!fc)
1459 		goto out_err;
1460 
1461 	fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1462 	if (!fm)
1463 		goto out_err;
1464 
1465 	fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1466 	fc->release = fuse_free_conn;
1467 	fc->delete_stale = true;
1468 	fc->auto_submounts = true;
1469 
1470 	/* Tell FUSE to split requests that exceed the virtqueue's size */
1471 	fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1472 				    virtqueue_size - FUSE_HEADER_OVERHEAD);
1473 
1474 	fsc->s_fs_info = fm;
1475 	sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super);
1476 	fuse_mount_put(fm);
1477 	if (IS_ERR(sb))
1478 		return PTR_ERR(sb);
1479 
1480 	if (!sb->s_root) {
1481 		err = virtio_fs_fill_super(sb, fsc);
1482 		if (err) {
1483 			fuse_mount_put(fm);
1484 			sb->s_fs_info = NULL;
1485 			deactivate_locked_super(sb);
1486 			return err;
1487 		}
1488 
1489 		sb->s_flags |= SB_ACTIVE;
1490 	}
1491 
1492 	WARN_ON(fsc->root);
1493 	fsc->root = dget(sb->s_root);
1494 	return 0;
1495 
1496 out_err:
1497 	kfree(fc);
1498 	mutex_lock(&virtio_fs_mutex);
1499 	virtio_fs_put(fs);
1500 	mutex_unlock(&virtio_fs_mutex);
1501 	return err;
1502 }
1503 
1504 static const struct fs_context_operations virtio_fs_context_ops = {
1505 	.free		= virtio_fs_free_fc,
1506 	.parse_param	= virtio_fs_parse_param,
1507 	.get_tree	= virtio_fs_get_tree,
1508 };
1509 
virtio_fs_init_fs_context(struct fs_context * fsc)1510 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1511 {
1512 	struct fuse_fs_context *ctx;
1513 
1514 	ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1515 	if (!ctx)
1516 		return -ENOMEM;
1517 	fsc->fs_private = ctx;
1518 	fsc->ops = &virtio_fs_context_ops;
1519 	return 0;
1520 }
1521 
1522 static struct file_system_type virtio_fs_type = {
1523 	.owner		= THIS_MODULE,
1524 	.name		= "virtiofs",
1525 	.init_fs_context = virtio_fs_init_fs_context,
1526 	.kill_sb	= virtio_kill_sb,
1527 };
1528 
virtio_fs_init(void)1529 static int __init virtio_fs_init(void)
1530 {
1531 	int ret;
1532 
1533 	ret = register_virtio_driver(&virtio_fs_driver);
1534 	if (ret < 0)
1535 		return ret;
1536 
1537 	ret = register_filesystem(&virtio_fs_type);
1538 	if (ret < 0) {
1539 		unregister_virtio_driver(&virtio_fs_driver);
1540 		return ret;
1541 	}
1542 
1543 	return 0;
1544 }
1545 module_init(virtio_fs_init);
1546 
virtio_fs_exit(void)1547 static void __exit virtio_fs_exit(void)
1548 {
1549 	unregister_filesystem(&virtio_fs_type);
1550 	unregister_virtio_driver(&virtio_fs_driver);
1551 }
1552 module_exit(virtio_fs_exit);
1553 
1554 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1555 MODULE_DESCRIPTION("Virtio Filesystem");
1556 MODULE_LICENSE("GPL");
1557 MODULE_ALIAS_FS(KBUILD_MODNAME);
1558 MODULE_DEVICE_TABLE(virtio, id_table);
1559