1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/namei.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/slab.h>
21 #include <linux/pipe_fs_i.h>
22 #include <linux/swap.h>
23 #include <linux/splice.h>
24 #include <linux/sched.h>
25
26 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
27 MODULE_ALIAS("devname:fuse");
28
29 /* Ordinary requests have even IDs, while interrupts IDs are odd */
30 #define FUSE_INT_REQ_BIT (1ULL << 0)
31 #define FUSE_REQ_ID_STEP (1ULL << 1)
32
33 static struct kmem_cache *fuse_req_cachep;
34
fuse_get_dev(struct file * file)35 static struct fuse_dev *fuse_get_dev(struct file *file)
36 {
37 /*
38 * Lockless access is OK, because file->private data is set
39 * once during mount and is valid until the file is released.
40 */
41 return READ_ONCE(file->private_data);
42 }
43
fuse_request_init(struct fuse_mount * fm,struct fuse_req * req)44 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
45 {
46 INIT_LIST_HEAD(&req->list);
47 INIT_LIST_HEAD(&req->intr_entry);
48 init_waitqueue_head(&req->waitq);
49 refcount_set(&req->count, 1);
50 __set_bit(FR_PENDING, &req->flags);
51 req->fm = fm;
52 }
53
fuse_request_alloc(struct fuse_mount * fm,gfp_t flags)54 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
55 {
56 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
57 if (req)
58 fuse_request_init(fm, req);
59
60 return req;
61 }
62
fuse_request_free(struct fuse_req * req)63 static void fuse_request_free(struct fuse_req *req)
64 {
65 kmem_cache_free(fuse_req_cachep, req);
66 }
67
__fuse_get_request(struct fuse_req * req)68 static void __fuse_get_request(struct fuse_req *req)
69 {
70 refcount_inc(&req->count);
71 }
72
73 /* Must be called with > 1 refcount */
__fuse_put_request(struct fuse_req * req)74 static void __fuse_put_request(struct fuse_req *req)
75 {
76 refcount_dec(&req->count);
77 }
78
fuse_set_initialized(struct fuse_conn * fc)79 void fuse_set_initialized(struct fuse_conn *fc)
80 {
81 /* Make sure stores before this are seen on another CPU */
82 smp_wmb();
83 fc->initialized = 1;
84 }
85
fuse_block_alloc(struct fuse_conn * fc,bool for_background)86 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
87 {
88 return !fc->initialized || (for_background && fc->blocked);
89 }
90
fuse_drop_waiting(struct fuse_conn * fc)91 static void fuse_drop_waiting(struct fuse_conn *fc)
92 {
93 /*
94 * lockess check of fc->connected is okay, because atomic_dec_and_test()
95 * provides a memory barrier mached with the one in fuse_wait_aborted()
96 * to ensure no wake-up is missed.
97 */
98 if (atomic_dec_and_test(&fc->num_waiting) &&
99 !READ_ONCE(fc->connected)) {
100 /* wake up aborters */
101 wake_up_all(&fc->blocked_waitq);
102 }
103 }
104
105 static void fuse_put_request(struct fuse_req *req);
106
fuse_get_req(struct fuse_mount * fm,bool for_background)107 static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
108 {
109 struct fuse_conn *fc = fm->fc;
110 struct fuse_req *req;
111 int err;
112 atomic_inc(&fc->num_waiting);
113
114 if (fuse_block_alloc(fc, for_background)) {
115 err = -EINTR;
116 if (wait_event_killable_exclusive(fc->blocked_waitq,
117 !fuse_block_alloc(fc, for_background)))
118 goto out;
119 }
120 /* Matches smp_wmb() in fuse_set_initialized() */
121 smp_rmb();
122
123 err = -ENOTCONN;
124 if (!fc->connected)
125 goto out;
126
127 err = -ECONNREFUSED;
128 if (fc->conn_error)
129 goto out;
130
131 req = fuse_request_alloc(fm, GFP_KERNEL);
132 err = -ENOMEM;
133 if (!req) {
134 if (for_background)
135 wake_up(&fc->blocked_waitq);
136 goto out;
137 }
138
139 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
140 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
141 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
142
143 __set_bit(FR_WAITING, &req->flags);
144 if (for_background)
145 __set_bit(FR_BACKGROUND, &req->flags);
146
147 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
148 req->in.h.gid == ((gid_t)-1))) {
149 fuse_put_request(req);
150 return ERR_PTR(-EOVERFLOW);
151 }
152 return req;
153
154 out:
155 fuse_drop_waiting(fc);
156 return ERR_PTR(err);
157 }
158
fuse_put_request(struct fuse_req * req)159 static void fuse_put_request(struct fuse_req *req)
160 {
161 struct fuse_conn *fc = req->fm->fc;
162
163 if (refcount_dec_and_test(&req->count)) {
164 if (test_bit(FR_BACKGROUND, &req->flags)) {
165 /*
166 * We get here in the unlikely case that a background
167 * request was allocated but not sent
168 */
169 spin_lock(&fc->bg_lock);
170 if (!fc->blocked)
171 wake_up(&fc->blocked_waitq);
172 spin_unlock(&fc->bg_lock);
173 }
174
175 if (test_bit(FR_WAITING, &req->flags)) {
176 __clear_bit(FR_WAITING, &req->flags);
177 fuse_drop_waiting(fc);
178 }
179
180 fuse_request_free(req);
181 }
182 }
183
fuse_len_args(unsigned int numargs,struct fuse_arg * args)184 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
185 {
186 unsigned nbytes = 0;
187 unsigned i;
188
189 for (i = 0; i < numargs; i++)
190 nbytes += args[i].size;
191
192 return nbytes;
193 }
194 EXPORT_SYMBOL_GPL(fuse_len_args);
195
fuse_get_unique(struct fuse_iqueue * fiq)196 u64 fuse_get_unique(struct fuse_iqueue *fiq)
197 {
198 fiq->reqctr += FUSE_REQ_ID_STEP;
199 return fiq->reqctr;
200 }
201 EXPORT_SYMBOL_GPL(fuse_get_unique);
202
fuse_req_hash(u64 unique)203 static unsigned int fuse_req_hash(u64 unique)
204 {
205 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
206 }
207
208 /**
209 * A new request is available, wake fiq->waitq
210 */
fuse_dev_wake_and_unlock(struct fuse_iqueue * fiq,bool sync)211 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync)
212 __releases(fiq->lock)
213 {
214 if (sync)
215 wake_up_sync(&fiq->waitq);
216 else
217 wake_up(&fiq->waitq);
218 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
219 spin_unlock(&fiq->lock);
220 }
221
222 const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
223 .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
224 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
225 .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
226 };
227 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
228
queue_request_and_unlock(struct fuse_iqueue * fiq,struct fuse_req * req,bool sync)229 static void queue_request_and_unlock(struct fuse_iqueue *fiq,
230 struct fuse_req *req, bool sync)
231 __releases(fiq->lock)
232 {
233 req->in.h.len = sizeof(struct fuse_in_header) +
234 fuse_len_args(req->args->in_numargs,
235 (struct fuse_arg *) req->args->in_args);
236 list_add_tail(&req->list, &fiq->pending);
237 fiq->ops->wake_pending_and_unlock(fiq, sync);
238 }
239
fuse_queue_forget(struct fuse_conn * fc,struct fuse_forget_link * forget,u64 nodeid,u64 nlookup)240 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
241 u64 nodeid, u64 nlookup)
242 {
243 struct fuse_iqueue *fiq = &fc->iq;
244
245 forget->forget_one.nodeid = nodeid;
246 forget->forget_one.nlookup = nlookup;
247
248 spin_lock(&fiq->lock);
249 if (fiq->connected) {
250 fiq->forget_list_tail->next = forget;
251 fiq->forget_list_tail = forget;
252 fiq->ops->wake_forget_and_unlock(fiq, false);
253 } else {
254 kfree(forget);
255 spin_unlock(&fiq->lock);
256 }
257 }
258
flush_bg_queue(struct fuse_conn * fc)259 static void flush_bg_queue(struct fuse_conn *fc)
260 {
261 struct fuse_iqueue *fiq = &fc->iq;
262
263 while (fc->active_background < fc->max_background &&
264 !list_empty(&fc->bg_queue)) {
265 struct fuse_req *req;
266
267 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
268 list_del(&req->list);
269 fc->active_background++;
270 spin_lock(&fiq->lock);
271 req->in.h.unique = fuse_get_unique(fiq);
272 queue_request_and_unlock(fiq, req, false);
273 }
274 }
275
276 /*
277 * This function is called when a request is finished. Either a reply
278 * has arrived or it was aborted (and not yet sent) or some error
279 * occurred during communication with userspace, or the device file
280 * was closed. The requester thread is woken up (if still waiting),
281 * the 'end' callback is called if given, else the reference to the
282 * request is released
283 */
fuse_request_end(struct fuse_req * req)284 void fuse_request_end(struct fuse_req *req)
285 {
286 struct fuse_mount *fm = req->fm;
287 struct fuse_conn *fc = fm->fc;
288 struct fuse_iqueue *fiq = &fc->iq;
289
290 if (test_and_set_bit(FR_FINISHED, &req->flags))
291 goto put_request;
292
293 /*
294 * test_and_set_bit() implies smp_mb() between bit
295 * changing and below FR_INTERRUPTED check. Pairs with
296 * smp_mb() from queue_interrupt().
297 */
298 if (test_bit(FR_INTERRUPTED, &req->flags)) {
299 spin_lock(&fiq->lock);
300 list_del_init(&req->intr_entry);
301 spin_unlock(&fiq->lock);
302 }
303 WARN_ON(test_bit(FR_PENDING, &req->flags));
304 WARN_ON(test_bit(FR_SENT, &req->flags));
305 if (test_bit(FR_BACKGROUND, &req->flags)) {
306 spin_lock(&fc->bg_lock);
307 clear_bit(FR_BACKGROUND, &req->flags);
308 if (fc->num_background == fc->max_background) {
309 fc->blocked = 0;
310 wake_up(&fc->blocked_waitq);
311 } else if (!fc->blocked) {
312 /*
313 * Wake up next waiter, if any. It's okay to use
314 * waitqueue_active(), as we've already synced up
315 * fc->blocked with waiters with the wake_up() call
316 * above.
317 */
318 if (waitqueue_active(&fc->blocked_waitq))
319 wake_up(&fc->blocked_waitq);
320 }
321
322 if (fc->num_background == fc->congestion_threshold && fm->sb) {
323 clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
324 clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
325 }
326 fc->num_background--;
327 fc->active_background--;
328 flush_bg_queue(fc);
329 spin_unlock(&fc->bg_lock);
330 } else {
331 /* Wake up waiter sleeping in request_wait_answer() */
332 wake_up(&req->waitq);
333 }
334
335 if (test_bit(FR_ASYNC, &req->flags))
336 req->args->end(fm, req->args, req->out.h.error);
337 put_request:
338 fuse_put_request(req);
339 }
340 EXPORT_SYMBOL_GPL(fuse_request_end);
341
queue_interrupt(struct fuse_req * req)342 static int queue_interrupt(struct fuse_req *req)
343 {
344 struct fuse_iqueue *fiq = &req->fm->fc->iq;
345
346 spin_lock(&fiq->lock);
347 /* Check for we've sent request to interrupt this req */
348 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
349 spin_unlock(&fiq->lock);
350 return -EINVAL;
351 }
352
353 if (list_empty(&req->intr_entry)) {
354 list_add_tail(&req->intr_entry, &fiq->interrupts);
355 /*
356 * Pairs with smp_mb() implied by test_and_set_bit()
357 * from fuse_request_end().
358 */
359 smp_mb();
360 if (test_bit(FR_FINISHED, &req->flags)) {
361 list_del_init(&req->intr_entry);
362 spin_unlock(&fiq->lock);
363 return 0;
364 }
365 fiq->ops->wake_interrupt_and_unlock(fiq, false);
366 } else {
367 spin_unlock(&fiq->lock);
368 }
369 return 0;
370 }
371
request_wait_answer(struct fuse_req * req)372 static void request_wait_answer(struct fuse_req *req)
373 {
374 struct fuse_conn *fc = req->fm->fc;
375 struct fuse_iqueue *fiq = &fc->iq;
376 int err;
377
378 if (!fc->no_interrupt) {
379 /* Any signal may interrupt this */
380 err = wait_event_interruptible(req->waitq,
381 test_bit(FR_FINISHED, &req->flags));
382 if (!err)
383 return;
384
385 set_bit(FR_INTERRUPTED, &req->flags);
386 /* matches barrier in fuse_dev_do_read() */
387 smp_mb__after_atomic();
388 if (test_bit(FR_SENT, &req->flags))
389 queue_interrupt(req);
390 }
391
392 if (!test_bit(FR_FORCE, &req->flags)) {
393 /* Only fatal signals may interrupt this */
394 err = wait_event_killable(req->waitq,
395 test_bit(FR_FINISHED, &req->flags));
396 if (!err)
397 return;
398
399 spin_lock(&fiq->lock);
400 /* Request is not yet in userspace, bail out */
401 if (test_bit(FR_PENDING, &req->flags)) {
402 list_del(&req->list);
403 spin_unlock(&fiq->lock);
404 __fuse_put_request(req);
405 req->out.h.error = -EINTR;
406 return;
407 }
408 spin_unlock(&fiq->lock);
409 }
410
411 /*
412 * Either request is already in userspace, or it was forced.
413 * Wait it out.
414 */
415 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
416 }
417
__fuse_request_send(struct fuse_req * req)418 static void __fuse_request_send(struct fuse_req *req)
419 {
420 struct fuse_iqueue *fiq = &req->fm->fc->iq;
421
422 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
423 spin_lock(&fiq->lock);
424 if (!fiq->connected) {
425 spin_unlock(&fiq->lock);
426 req->out.h.error = -ENOTCONN;
427 } else {
428 req->in.h.unique = fuse_get_unique(fiq);
429 /* acquire extra reference, since request is still needed
430 after fuse_request_end() */
431 __fuse_get_request(req);
432 queue_request_and_unlock(fiq, req, true);
433
434 request_wait_answer(req);
435 /* Pairs with smp_wmb() in fuse_request_end() */
436 smp_rmb();
437 }
438 }
439
fuse_adjust_compat(struct fuse_conn * fc,struct fuse_args * args)440 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
441 {
442 if (fc->minor < 4 && args->opcode == FUSE_STATFS)
443 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
444
445 if (fc->minor < 9) {
446 switch (args->opcode) {
447 case FUSE_LOOKUP:
448 case FUSE_CREATE:
449 case FUSE_MKNOD:
450 case FUSE_MKDIR:
451 case FUSE_SYMLINK:
452 case FUSE_LINK:
453 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
454 break;
455 case FUSE_GETATTR:
456 case FUSE_SETATTR:
457 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
458 break;
459 }
460 }
461 if (fc->minor < 12) {
462 switch (args->opcode) {
463 case FUSE_CREATE:
464 args->in_args[0].size = sizeof(struct fuse_open_in);
465 break;
466 case FUSE_MKNOD:
467 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
468 break;
469 }
470 }
471 }
472
fuse_force_creds(struct fuse_req * req)473 static void fuse_force_creds(struct fuse_req *req)
474 {
475 struct fuse_conn *fc = req->fm->fc;
476
477 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
478 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
479 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
480 }
481
fuse_args_to_req(struct fuse_req * req,struct fuse_args * args)482 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
483 {
484 req->in.h.opcode = args->opcode;
485 req->in.h.nodeid = args->nodeid;
486 req->args = args;
487 if (args->end)
488 __set_bit(FR_ASYNC, &req->flags);
489 }
490
fuse_simple_request(struct fuse_mount * fm,struct fuse_args * args)491 ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
492 {
493 struct fuse_conn *fc = fm->fc;
494 struct fuse_req *req;
495 ssize_t ret;
496
497 if (args->force) {
498 atomic_inc(&fc->num_waiting);
499 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
500
501 if (!args->nocreds)
502 fuse_force_creds(req);
503
504 __set_bit(FR_WAITING, &req->flags);
505 __set_bit(FR_FORCE, &req->flags);
506 } else {
507 WARN_ON(args->nocreds);
508 req = fuse_get_req(fm, false);
509 if (IS_ERR(req))
510 return PTR_ERR(req);
511 }
512
513 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
514 fuse_adjust_compat(fc, args);
515 fuse_args_to_req(req, args);
516
517 if (!args->noreply)
518 __set_bit(FR_ISREPLY, &req->flags);
519 __fuse_request_send(req);
520 ret = req->out.h.error;
521 if (!ret && args->out_argvar) {
522 BUG_ON(args->out_numargs == 0);
523 ret = args->out_args[args->out_numargs - 1].size;
524 }
525 fuse_put_request(req);
526
527 return ret;
528 }
529
fuse_request_queue_background(struct fuse_req * req)530 static bool fuse_request_queue_background(struct fuse_req *req)
531 {
532 struct fuse_mount *fm = req->fm;
533 struct fuse_conn *fc = fm->fc;
534 bool queued = false;
535
536 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
537 if (!test_bit(FR_WAITING, &req->flags)) {
538 __set_bit(FR_WAITING, &req->flags);
539 atomic_inc(&fc->num_waiting);
540 }
541 __set_bit(FR_ISREPLY, &req->flags);
542 spin_lock(&fc->bg_lock);
543 if (likely(fc->connected)) {
544 fc->num_background++;
545 if (fc->num_background == fc->max_background)
546 fc->blocked = 1;
547 if (fc->num_background == fc->congestion_threshold && fm->sb) {
548 set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
549 set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
550 }
551 list_add_tail(&req->list, &fc->bg_queue);
552 flush_bg_queue(fc);
553 queued = true;
554 }
555 spin_unlock(&fc->bg_lock);
556
557 return queued;
558 }
559
fuse_simple_background(struct fuse_mount * fm,struct fuse_args * args,gfp_t gfp_flags)560 int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
561 gfp_t gfp_flags)
562 {
563 struct fuse_req *req;
564
565 if (args->force) {
566 WARN_ON(!args->nocreds);
567 req = fuse_request_alloc(fm, gfp_flags);
568 if (!req)
569 return -ENOMEM;
570 __set_bit(FR_BACKGROUND, &req->flags);
571 } else {
572 WARN_ON(args->nocreds);
573 req = fuse_get_req(fm, true);
574 if (IS_ERR(req))
575 return PTR_ERR(req);
576 }
577
578 fuse_args_to_req(req, args);
579
580 if (!fuse_request_queue_background(req)) {
581 fuse_put_request(req);
582 return -ENOTCONN;
583 }
584
585 return 0;
586 }
587 EXPORT_SYMBOL_GPL(fuse_simple_background);
588
fuse_simple_notify_reply(struct fuse_mount * fm,struct fuse_args * args,u64 unique)589 static int fuse_simple_notify_reply(struct fuse_mount *fm,
590 struct fuse_args *args, u64 unique)
591 {
592 struct fuse_req *req;
593 struct fuse_iqueue *fiq = &fm->fc->iq;
594 int err = 0;
595
596 req = fuse_get_req(fm, false);
597 if (IS_ERR(req))
598 return PTR_ERR(req);
599
600 __clear_bit(FR_ISREPLY, &req->flags);
601 req->in.h.unique = unique;
602
603 fuse_args_to_req(req, args);
604
605 spin_lock(&fiq->lock);
606 if (fiq->connected) {
607 queue_request_and_unlock(fiq, req, false);
608 } else {
609 err = -ENODEV;
610 spin_unlock(&fiq->lock);
611 fuse_put_request(req);
612 }
613
614 return err;
615 }
616
617 /*
618 * Lock the request. Up to the next unlock_request() there mustn't be
619 * anything that could cause a page-fault. If the request was already
620 * aborted bail out.
621 */
lock_request(struct fuse_req * req)622 static int lock_request(struct fuse_req *req)
623 {
624 int err = 0;
625 if (req) {
626 spin_lock(&req->waitq.lock);
627 if (test_bit(FR_ABORTED, &req->flags))
628 err = -ENOENT;
629 else
630 set_bit(FR_LOCKED, &req->flags);
631 spin_unlock(&req->waitq.lock);
632 }
633 return err;
634 }
635
636 /*
637 * Unlock request. If it was aborted while locked, caller is responsible
638 * for unlocking and ending the request.
639 */
unlock_request(struct fuse_req * req)640 static int unlock_request(struct fuse_req *req)
641 {
642 int err = 0;
643 if (req) {
644 spin_lock(&req->waitq.lock);
645 if (test_bit(FR_ABORTED, &req->flags))
646 err = -ENOENT;
647 else
648 clear_bit(FR_LOCKED, &req->flags);
649 spin_unlock(&req->waitq.lock);
650 }
651 return err;
652 }
653
654 struct fuse_copy_state {
655 int write;
656 struct fuse_req *req;
657 struct iov_iter *iter;
658 struct pipe_buffer *pipebufs;
659 struct pipe_buffer *currbuf;
660 struct pipe_inode_info *pipe;
661 unsigned long nr_segs;
662 struct page *pg;
663 unsigned len;
664 unsigned offset;
665 unsigned move_pages:1;
666 };
667
fuse_copy_init(struct fuse_copy_state * cs,int write,struct iov_iter * iter)668 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
669 struct iov_iter *iter)
670 {
671 memset(cs, 0, sizeof(*cs));
672 cs->write = write;
673 cs->iter = iter;
674 }
675
676 /* Unmap and put previous page of userspace buffer */
fuse_copy_finish(struct fuse_copy_state * cs)677 static void fuse_copy_finish(struct fuse_copy_state *cs)
678 {
679 if (cs->currbuf) {
680 struct pipe_buffer *buf = cs->currbuf;
681
682 if (cs->write)
683 buf->len = PAGE_SIZE - cs->len;
684 cs->currbuf = NULL;
685 } else if (cs->pg) {
686 if (cs->write) {
687 flush_dcache_page(cs->pg);
688 set_page_dirty_lock(cs->pg);
689 }
690 /*
691 * The page could be GUP page(see iov_iter_get_pages in
692 * fuse_copy_fill) so use put_user_page to release it.
693 */
694 put_user_page(cs->pg);
695 }
696 cs->pg = NULL;
697 }
698
699 /*
700 * Get another pagefull of userspace buffer, and map it to kernel
701 * address space, and lock request
702 */
fuse_copy_fill(struct fuse_copy_state * cs)703 static int fuse_copy_fill(struct fuse_copy_state *cs)
704 {
705 struct page *page;
706 int err;
707
708 err = unlock_request(cs->req);
709 if (err)
710 return err;
711
712 fuse_copy_finish(cs);
713 if (cs->pipebufs) {
714 struct pipe_buffer *buf = cs->pipebufs;
715
716 if (!cs->write) {
717 err = pipe_buf_confirm(cs->pipe, buf);
718 if (err)
719 return err;
720
721 BUG_ON(!cs->nr_segs);
722 cs->currbuf = buf;
723 cs->pg = buf->page;
724 cs->offset = buf->offset;
725 cs->len = buf->len;
726 cs->pipebufs++;
727 cs->nr_segs--;
728 } else {
729 if (cs->nr_segs >= cs->pipe->max_usage)
730 return -EIO;
731
732 page = alloc_page(GFP_HIGHUSER);
733 if (!page)
734 return -ENOMEM;
735
736 buf->page = page;
737 buf->offset = 0;
738 buf->len = 0;
739
740 cs->currbuf = buf;
741 cs->pg = page;
742 cs->offset = 0;
743 cs->len = PAGE_SIZE;
744 cs->pipebufs++;
745 cs->nr_segs++;
746 }
747 } else {
748 size_t off;
749 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
750 if (err < 0)
751 return err;
752 BUG_ON(!err);
753 cs->len = err;
754 cs->offset = off;
755 cs->pg = page;
756 iov_iter_advance(cs->iter, err);
757 }
758
759 return lock_request(cs->req);
760 }
761
762 /* Do as much copy to/from userspace buffer as we can */
fuse_copy_do(struct fuse_copy_state * cs,void ** val,unsigned * size)763 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
764 {
765 unsigned ncpy = min(*size, cs->len);
766 if (val) {
767 void *pgaddr = kmap_atomic(cs->pg);
768 void *buf = pgaddr + cs->offset;
769
770 if (cs->write)
771 memcpy(buf, *val, ncpy);
772 else
773 memcpy(*val, buf, ncpy);
774
775 kunmap_atomic(pgaddr);
776 *val += ncpy;
777 }
778 *size -= ncpy;
779 cs->len -= ncpy;
780 cs->offset += ncpy;
781 return ncpy;
782 }
783
fuse_check_page(struct page * page)784 static int fuse_check_page(struct page *page)
785 {
786 if (page_mapcount(page) ||
787 page->mapping != NULL ||
788 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
789 ~(1 << PG_locked |
790 1 << PG_referenced |
791 1 << PG_uptodate |
792 1 << PG_lru |
793 1 << PG_active |
794 1 << PG_workingset |
795 1 << PG_reclaim |
796 1 << PG_waiters))) {
797 dump_page(page, "fuse: trying to steal weird page");
798 return 1;
799 }
800 return 0;
801 }
802
fuse_try_move_page(struct fuse_copy_state * cs,struct page ** pagep)803 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
804 {
805 int err;
806 struct page *oldpage = *pagep;
807 struct page *newpage;
808 struct pipe_buffer *buf = cs->pipebufs;
809
810 get_page(oldpage);
811 err = unlock_request(cs->req);
812 if (err)
813 goto out_put_old;
814
815 fuse_copy_finish(cs);
816
817 err = pipe_buf_confirm(cs->pipe, buf);
818 if (err)
819 goto out_put_old;
820
821 BUG_ON(!cs->nr_segs);
822 cs->currbuf = buf;
823 cs->len = buf->len;
824 cs->pipebufs++;
825 cs->nr_segs--;
826
827 if (cs->len != PAGE_SIZE)
828 goto out_fallback;
829
830 if (!pipe_buf_try_steal(cs->pipe, buf))
831 goto out_fallback;
832
833 newpage = buf->page;
834
835 if (!PageUptodate(newpage))
836 SetPageUptodate(newpage);
837
838 ClearPageMappedToDisk(newpage);
839
840 if (fuse_check_page(newpage) != 0)
841 goto out_fallback_unlock;
842
843 /*
844 * This is a new and locked page, it shouldn't be mapped or
845 * have any special flags on it
846 */
847 if (WARN_ON(page_mapped(oldpage)))
848 goto out_fallback_unlock;
849 if (WARN_ON(page_has_private(oldpage)))
850 goto out_fallback_unlock;
851 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
852 goto out_fallback_unlock;
853 if (WARN_ON(PageMlocked(oldpage)))
854 goto out_fallback_unlock;
855
856 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
857 if (err) {
858 unlock_page(newpage);
859 goto out_put_old;
860 }
861
862 get_page(newpage);
863
864 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
865 lru_cache_add(newpage);
866
867 /*
868 * Release while we have extra ref on stolen page. Otherwise
869 * anon_pipe_buf_release() might think the page can be reused.
870 */
871 pipe_buf_release(cs->pipe, buf);
872
873 err = 0;
874 spin_lock(&cs->req->waitq.lock);
875 if (test_bit(FR_ABORTED, &cs->req->flags))
876 err = -ENOENT;
877 else
878 *pagep = newpage;
879 spin_unlock(&cs->req->waitq.lock);
880
881 if (err) {
882 unlock_page(newpage);
883 put_page(newpage);
884 goto out_put_old;
885 }
886
887 unlock_page(oldpage);
888 /* Drop ref for ap->pages[] array */
889 put_page(oldpage);
890 cs->len = 0;
891
892 err = 0;
893 out_put_old:
894 /* Drop ref obtained in this function */
895 put_page(oldpage);
896 return err;
897
898 out_fallback_unlock:
899 unlock_page(newpage);
900 out_fallback:
901 cs->pg = buf->page;
902 cs->offset = buf->offset;
903
904 err = lock_request(cs->req);
905 if (!err)
906 err = 1;
907
908 goto out_put_old;
909 }
910
fuse_ref_page(struct fuse_copy_state * cs,struct page * page,unsigned offset,unsigned count)911 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
912 unsigned offset, unsigned count)
913 {
914 struct pipe_buffer *buf;
915 int err;
916
917 if (cs->nr_segs >= cs->pipe->max_usage)
918 return -EIO;
919
920 get_page(page);
921 err = unlock_request(cs->req);
922 if (err) {
923 put_page(page);
924 return err;
925 }
926
927 fuse_copy_finish(cs);
928
929 buf = cs->pipebufs;
930 buf->page = page;
931 buf->offset = offset;
932 buf->len = count;
933
934 cs->pipebufs++;
935 cs->nr_segs++;
936 cs->len = 0;
937
938 return 0;
939 }
940
941 /*
942 * Copy a page in the request to/from the userspace buffer. Must be
943 * done atomically
944 */
fuse_copy_page(struct fuse_copy_state * cs,struct page ** pagep,unsigned offset,unsigned count,int zeroing)945 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
946 unsigned offset, unsigned count, int zeroing)
947 {
948 int err;
949 struct page *page = *pagep;
950
951 if (page && zeroing && count < PAGE_SIZE)
952 clear_highpage(page);
953
954 while (count) {
955 if (cs->write && cs->pipebufs && page) {
956 /*
957 * Can't control lifetime of pipe buffers, so always
958 * copy user pages.
959 */
960 if (cs->req->args->user_pages) {
961 err = fuse_copy_fill(cs);
962 if (err)
963 return err;
964 } else {
965 return fuse_ref_page(cs, page, offset, count);
966 }
967 } else if (!cs->len) {
968 if (cs->move_pages && page &&
969 offset == 0 && count == PAGE_SIZE) {
970 err = fuse_try_move_page(cs, pagep);
971 if (err <= 0)
972 return err;
973 } else {
974 err = fuse_copy_fill(cs);
975 if (err)
976 return err;
977 }
978 }
979 if (page) {
980 void *mapaddr = kmap_atomic(page);
981 void *buf = mapaddr + offset;
982 offset += fuse_copy_do(cs, &buf, &count);
983 kunmap_atomic(mapaddr);
984 } else
985 offset += fuse_copy_do(cs, NULL, &count);
986 }
987 if (page && !cs->write)
988 flush_dcache_page(page);
989 return 0;
990 }
991
992 /* Copy pages in the request to/from userspace buffer */
fuse_copy_pages(struct fuse_copy_state * cs,unsigned nbytes,int zeroing)993 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
994 int zeroing)
995 {
996 unsigned i;
997 struct fuse_req *req = cs->req;
998 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
999
1000
1001 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
1002 int err;
1003 unsigned int offset = ap->descs[i].offset;
1004 unsigned int count = min(nbytes, ap->descs[i].length);
1005
1006 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
1007 if (err)
1008 return err;
1009
1010 nbytes -= count;
1011 }
1012 return 0;
1013 }
1014
1015 /* Copy a single argument in the request to/from userspace buffer */
fuse_copy_one(struct fuse_copy_state * cs,void * val,unsigned size)1016 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1017 {
1018 while (size) {
1019 if (!cs->len) {
1020 int err = fuse_copy_fill(cs);
1021 if (err)
1022 return err;
1023 }
1024 fuse_copy_do(cs, &val, &size);
1025 }
1026 return 0;
1027 }
1028
1029 /* Copy request arguments to/from userspace buffer */
fuse_copy_args(struct fuse_copy_state * cs,unsigned numargs,unsigned argpages,struct fuse_arg * args,int zeroing)1030 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1031 unsigned argpages, struct fuse_arg *args,
1032 int zeroing)
1033 {
1034 int err = 0;
1035 unsigned i;
1036
1037 for (i = 0; !err && i < numargs; i++) {
1038 struct fuse_arg *arg = &args[i];
1039 if (i == numargs - 1 && argpages)
1040 err = fuse_copy_pages(cs, arg->size, zeroing);
1041 else
1042 err = fuse_copy_one(cs, arg->value, arg->size);
1043 }
1044 return err;
1045 }
1046
forget_pending(struct fuse_iqueue * fiq)1047 static int forget_pending(struct fuse_iqueue *fiq)
1048 {
1049 return fiq->forget_list_head.next != NULL;
1050 }
1051
request_pending(struct fuse_iqueue * fiq)1052 static int request_pending(struct fuse_iqueue *fiq)
1053 {
1054 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1055 forget_pending(fiq);
1056 }
1057
1058 /*
1059 * Transfer an interrupt request to userspace
1060 *
1061 * Unlike other requests this is assembled on demand, without a need
1062 * to allocate a separate fuse_req structure.
1063 *
1064 * Called with fiq->lock held, releases it
1065 */
fuse_read_interrupt(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes,struct fuse_req * req)1066 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1067 struct fuse_copy_state *cs,
1068 size_t nbytes, struct fuse_req *req)
1069 __releases(fiq->lock)
1070 {
1071 struct fuse_in_header ih;
1072 struct fuse_interrupt_in arg;
1073 unsigned reqsize = sizeof(ih) + sizeof(arg);
1074 int err;
1075
1076 list_del_init(&req->intr_entry);
1077 memset(&ih, 0, sizeof(ih));
1078 memset(&arg, 0, sizeof(arg));
1079 ih.len = reqsize;
1080 ih.opcode = FUSE_INTERRUPT;
1081 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1082 arg.unique = req->in.h.unique;
1083
1084 spin_unlock(&fiq->lock);
1085 if (nbytes < reqsize)
1086 return -EINVAL;
1087
1088 err = fuse_copy_one(cs, &ih, sizeof(ih));
1089 if (!err)
1090 err = fuse_copy_one(cs, &arg, sizeof(arg));
1091 fuse_copy_finish(cs);
1092
1093 return err ? err : reqsize;
1094 }
1095
fuse_dequeue_forget(struct fuse_iqueue * fiq,unsigned int max,unsigned int * countp)1096 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1097 unsigned int max,
1098 unsigned int *countp)
1099 {
1100 struct fuse_forget_link *head = fiq->forget_list_head.next;
1101 struct fuse_forget_link **newhead = &head;
1102 unsigned count;
1103
1104 for (count = 0; *newhead != NULL && count < max; count++)
1105 newhead = &(*newhead)->next;
1106
1107 fiq->forget_list_head.next = *newhead;
1108 *newhead = NULL;
1109 if (fiq->forget_list_head.next == NULL)
1110 fiq->forget_list_tail = &fiq->forget_list_head;
1111
1112 if (countp != NULL)
1113 *countp = count;
1114
1115 return head;
1116 }
1117 EXPORT_SYMBOL(fuse_dequeue_forget);
1118
fuse_read_single_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1119 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1120 struct fuse_copy_state *cs,
1121 size_t nbytes)
1122 __releases(fiq->lock)
1123 {
1124 int err;
1125 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
1126 struct fuse_forget_in arg = {
1127 .nlookup = forget->forget_one.nlookup,
1128 };
1129 struct fuse_in_header ih = {
1130 .opcode = FUSE_FORGET,
1131 .nodeid = forget->forget_one.nodeid,
1132 .unique = fuse_get_unique(fiq),
1133 .len = sizeof(ih) + sizeof(arg),
1134 };
1135
1136 spin_unlock(&fiq->lock);
1137 kfree(forget);
1138 if (nbytes < ih.len)
1139 return -EINVAL;
1140
1141 err = fuse_copy_one(cs, &ih, sizeof(ih));
1142 if (!err)
1143 err = fuse_copy_one(cs, &arg, sizeof(arg));
1144 fuse_copy_finish(cs);
1145
1146 if (err)
1147 return err;
1148
1149 return ih.len;
1150 }
1151
fuse_read_batch_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1152 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1153 struct fuse_copy_state *cs, size_t nbytes)
1154 __releases(fiq->lock)
1155 {
1156 int err;
1157 unsigned max_forgets;
1158 unsigned count;
1159 struct fuse_forget_link *head;
1160 struct fuse_batch_forget_in arg = { .count = 0 };
1161 struct fuse_in_header ih = {
1162 .opcode = FUSE_BATCH_FORGET,
1163 .unique = fuse_get_unique(fiq),
1164 .len = sizeof(ih) + sizeof(arg),
1165 };
1166
1167 if (nbytes < ih.len) {
1168 spin_unlock(&fiq->lock);
1169 return -EINVAL;
1170 }
1171
1172 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1173 head = fuse_dequeue_forget(fiq, max_forgets, &count);
1174 spin_unlock(&fiq->lock);
1175
1176 arg.count = count;
1177 ih.len += count * sizeof(struct fuse_forget_one);
1178 err = fuse_copy_one(cs, &ih, sizeof(ih));
1179 if (!err)
1180 err = fuse_copy_one(cs, &arg, sizeof(arg));
1181
1182 while (head) {
1183 struct fuse_forget_link *forget = head;
1184
1185 if (!err) {
1186 err = fuse_copy_one(cs, &forget->forget_one,
1187 sizeof(forget->forget_one));
1188 }
1189 head = forget->next;
1190 kfree(forget);
1191 }
1192
1193 fuse_copy_finish(cs);
1194
1195 if (err)
1196 return err;
1197
1198 return ih.len;
1199 }
1200
fuse_read_forget(struct fuse_conn * fc,struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1201 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1202 struct fuse_copy_state *cs,
1203 size_t nbytes)
1204 __releases(fiq->lock)
1205 {
1206 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1207 return fuse_read_single_forget(fiq, cs, nbytes);
1208 else
1209 return fuse_read_batch_forget(fiq, cs, nbytes);
1210 }
1211
1212 /*
1213 * Read a single request into the userspace filesystem's buffer. This
1214 * function waits until a request is available, then removes it from
1215 * the pending list and copies request data to userspace buffer. If
1216 * no reply is needed (FORGET) or request has been aborted or there
1217 * was an error during the copying then it's finished by calling
1218 * fuse_request_end(). Otherwise add it to the processing list, and set
1219 * the 'sent' flag.
1220 */
fuse_dev_do_read(struct fuse_dev * fud,struct file * file,struct fuse_copy_state * cs,size_t nbytes)1221 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1222 struct fuse_copy_state *cs, size_t nbytes)
1223 {
1224 ssize_t err;
1225 struct fuse_conn *fc = fud->fc;
1226 struct fuse_iqueue *fiq = &fc->iq;
1227 struct fuse_pqueue *fpq = &fud->pq;
1228 struct fuse_req *req;
1229 struct fuse_args *args;
1230 unsigned reqsize;
1231 unsigned int hash;
1232
1233 /*
1234 * Require sane minimum read buffer - that has capacity for fixed part
1235 * of any request header + negotiated max_write room for data.
1236 *
1237 * Historically libfuse reserves 4K for fixed header room, but e.g.
1238 * GlusterFS reserves only 80 bytes
1239 *
1240 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1241 *
1242 * which is the absolute minimum any sane filesystem should be using
1243 * for header room.
1244 */
1245 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1246 sizeof(struct fuse_in_header) +
1247 sizeof(struct fuse_write_in) +
1248 fc->max_write))
1249 return -EINVAL;
1250
1251 restart:
1252 for (;;) {
1253 spin_lock(&fiq->lock);
1254 if (!fiq->connected || request_pending(fiq))
1255 break;
1256 spin_unlock(&fiq->lock);
1257
1258 if (file->f_flags & O_NONBLOCK)
1259 return -EAGAIN;
1260 err = wait_event_interruptible_exclusive(fiq->waitq,
1261 !fiq->connected || request_pending(fiq));
1262 if (err)
1263 return err;
1264 }
1265
1266 if (!fiq->connected) {
1267 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1268 goto err_unlock;
1269 }
1270
1271 if (!list_empty(&fiq->interrupts)) {
1272 req = list_entry(fiq->interrupts.next, struct fuse_req,
1273 intr_entry);
1274 return fuse_read_interrupt(fiq, cs, nbytes, req);
1275 }
1276
1277 if (forget_pending(fiq)) {
1278 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1279 return fuse_read_forget(fc, fiq, cs, nbytes);
1280
1281 if (fiq->forget_batch <= -8)
1282 fiq->forget_batch = 16;
1283 }
1284
1285 req = list_entry(fiq->pending.next, struct fuse_req, list);
1286 clear_bit(FR_PENDING, &req->flags);
1287 list_del_init(&req->list);
1288 spin_unlock(&fiq->lock);
1289
1290 args = req->args;
1291 reqsize = req->in.h.len;
1292
1293 /* If request is too large, reply with an error and restart the read */
1294 if (nbytes < reqsize) {
1295 req->out.h.error = -EIO;
1296 /* SETXATTR is special, since it may contain too large data */
1297 if (args->opcode == FUSE_SETXATTR)
1298 req->out.h.error = -E2BIG;
1299 fuse_request_end(req);
1300 goto restart;
1301 }
1302 spin_lock(&fpq->lock);
1303 /*
1304 * Must not put request on fpq->io queue after having been shut down by
1305 * fuse_abort_conn()
1306 */
1307 if (!fpq->connected) {
1308 req->out.h.error = err = -ECONNABORTED;
1309 goto out_end;
1310
1311 }
1312 list_add(&req->list, &fpq->io);
1313 spin_unlock(&fpq->lock);
1314 cs->req = req;
1315 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1316 if (!err)
1317 err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1318 (struct fuse_arg *) args->in_args, 0);
1319 fuse_copy_finish(cs);
1320 spin_lock(&fpq->lock);
1321 clear_bit(FR_LOCKED, &req->flags);
1322 if (!fpq->connected) {
1323 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1324 goto out_end;
1325 }
1326 if (err) {
1327 req->out.h.error = -EIO;
1328 goto out_end;
1329 }
1330 if (!test_bit(FR_ISREPLY, &req->flags)) {
1331 err = reqsize;
1332 goto out_end;
1333 }
1334 hash = fuse_req_hash(req->in.h.unique);
1335 list_move_tail(&req->list, &fpq->processing[hash]);
1336 __fuse_get_request(req);
1337 set_bit(FR_SENT, &req->flags);
1338 spin_unlock(&fpq->lock);
1339 /* matches barrier in request_wait_answer() */
1340 smp_mb__after_atomic();
1341 if (test_bit(FR_INTERRUPTED, &req->flags))
1342 queue_interrupt(req);
1343 fuse_put_request(req);
1344
1345 return reqsize;
1346
1347 out_end:
1348 if (!test_bit(FR_PRIVATE, &req->flags))
1349 list_del_init(&req->list);
1350 spin_unlock(&fpq->lock);
1351 fuse_request_end(req);
1352 return err;
1353
1354 err_unlock:
1355 spin_unlock(&fiq->lock);
1356 return err;
1357 }
1358
fuse_dev_open(struct inode * inode,struct file * file)1359 static int fuse_dev_open(struct inode *inode, struct file *file)
1360 {
1361 /*
1362 * The fuse device's file's private_data is used to hold
1363 * the fuse_conn(ection) when it is mounted, and is used to
1364 * keep track of whether the file has been mounted already.
1365 */
1366 file->private_data = NULL;
1367 return 0;
1368 }
1369
fuse_dev_read(struct kiocb * iocb,struct iov_iter * to)1370 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1371 {
1372 struct fuse_copy_state cs;
1373 struct file *file = iocb->ki_filp;
1374 struct fuse_dev *fud = fuse_get_dev(file);
1375
1376 if (!fud)
1377 return -EPERM;
1378
1379 if (!iter_is_iovec(to))
1380 return -EINVAL;
1381
1382 fuse_copy_init(&cs, 1, to);
1383
1384 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1385 }
1386
fuse_dev_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1387 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1388 struct pipe_inode_info *pipe,
1389 size_t len, unsigned int flags)
1390 {
1391 int total, ret;
1392 int page_nr = 0;
1393 struct pipe_buffer *bufs;
1394 struct fuse_copy_state cs;
1395 struct fuse_dev *fud = fuse_get_dev(in);
1396
1397 if (!fud)
1398 return -EPERM;
1399
1400 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
1401 GFP_KERNEL);
1402 if (!bufs)
1403 return -ENOMEM;
1404
1405 fuse_copy_init(&cs, 1, NULL);
1406 cs.pipebufs = bufs;
1407 cs.pipe = pipe;
1408 ret = fuse_dev_do_read(fud, in, &cs, len);
1409 if (ret < 0)
1410 goto out;
1411
1412 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
1413 ret = -EIO;
1414 goto out;
1415 }
1416
1417 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1418 /*
1419 * Need to be careful about this. Having buf->ops in module
1420 * code can Oops if the buffer persists after module unload.
1421 */
1422 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1423 bufs[page_nr].flags = 0;
1424 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1425 if (unlikely(ret < 0))
1426 break;
1427 }
1428 if (total)
1429 ret = total;
1430 out:
1431 for (; page_nr < cs.nr_segs; page_nr++)
1432 put_page(bufs[page_nr].page);
1433
1434 kvfree(bufs);
1435 return ret;
1436 }
1437
fuse_notify_poll(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1438 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1439 struct fuse_copy_state *cs)
1440 {
1441 struct fuse_notify_poll_wakeup_out outarg;
1442 int err = -EINVAL;
1443
1444 if (size != sizeof(outarg))
1445 goto err;
1446
1447 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1448 if (err)
1449 goto err;
1450
1451 fuse_copy_finish(cs);
1452 return fuse_notify_poll_wakeup(fc, &outarg);
1453
1454 err:
1455 fuse_copy_finish(cs);
1456 return err;
1457 }
1458
fuse_notify_inval_inode(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1459 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1460 struct fuse_copy_state *cs)
1461 {
1462 struct fuse_notify_inval_inode_out outarg;
1463 int err = -EINVAL;
1464
1465 if (size != sizeof(outarg))
1466 goto err;
1467
1468 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1469 if (err)
1470 goto err;
1471 fuse_copy_finish(cs);
1472
1473 down_read(&fc->killsb);
1474 err = fuse_reverse_inval_inode(fc, outarg.ino,
1475 outarg.off, outarg.len);
1476 up_read(&fc->killsb);
1477 return err;
1478
1479 err:
1480 fuse_copy_finish(cs);
1481 return err;
1482 }
1483
fuse_notify_inval_entry(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1484 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1485 struct fuse_copy_state *cs)
1486 {
1487 struct fuse_notify_inval_entry_out outarg;
1488 int err = -ENOMEM;
1489 char *buf;
1490 struct qstr name;
1491
1492 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1493 if (!buf)
1494 goto err;
1495
1496 err = -EINVAL;
1497 if (size < sizeof(outarg))
1498 goto err;
1499
1500 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1501 if (err)
1502 goto err;
1503
1504 err = -ENAMETOOLONG;
1505 if (outarg.namelen > FUSE_NAME_MAX)
1506 goto err;
1507
1508 err = -EINVAL;
1509 if (size != sizeof(outarg) + outarg.namelen + 1)
1510 goto err;
1511
1512 name.name = buf;
1513 name.len = outarg.namelen;
1514 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1515 if (err)
1516 goto err;
1517 fuse_copy_finish(cs);
1518 buf[outarg.namelen] = 0;
1519
1520 down_read(&fc->killsb);
1521 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name);
1522 up_read(&fc->killsb);
1523 kfree(buf);
1524 return err;
1525
1526 err:
1527 kfree(buf);
1528 fuse_copy_finish(cs);
1529 return err;
1530 }
1531
fuse_notify_delete(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1532 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1533 struct fuse_copy_state *cs)
1534 {
1535 struct fuse_notify_delete_out outarg;
1536 int err = -ENOMEM;
1537 char *buf;
1538 struct qstr name;
1539
1540 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1541 if (!buf)
1542 goto err;
1543
1544 err = -EINVAL;
1545 if (size < sizeof(outarg))
1546 goto err;
1547
1548 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1549 if (err)
1550 goto err;
1551
1552 err = -ENAMETOOLONG;
1553 if (outarg.namelen > FUSE_NAME_MAX)
1554 goto err;
1555
1556 err = -EINVAL;
1557 if (size != sizeof(outarg) + outarg.namelen + 1)
1558 goto err;
1559
1560 name.name = buf;
1561 name.len = outarg.namelen;
1562 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1563 if (err)
1564 goto err;
1565 fuse_copy_finish(cs);
1566 buf[outarg.namelen] = 0;
1567
1568 down_read(&fc->killsb);
1569 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name);
1570 up_read(&fc->killsb);
1571 kfree(buf);
1572 return err;
1573
1574 err:
1575 kfree(buf);
1576 fuse_copy_finish(cs);
1577 return err;
1578 }
1579
fuse_notify_store(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1580 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1581 struct fuse_copy_state *cs)
1582 {
1583 struct fuse_notify_store_out outarg;
1584 struct inode *inode;
1585 struct address_space *mapping;
1586 u64 nodeid;
1587 int err;
1588 pgoff_t index;
1589 unsigned int offset;
1590 unsigned int num;
1591 loff_t file_size;
1592 loff_t end;
1593
1594 err = -EINVAL;
1595 if (size < sizeof(outarg))
1596 goto out_finish;
1597
1598 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1599 if (err)
1600 goto out_finish;
1601
1602 err = -EINVAL;
1603 if (size - sizeof(outarg) != outarg.size)
1604 goto out_finish;
1605
1606 nodeid = outarg.nodeid;
1607
1608 down_read(&fc->killsb);
1609
1610 err = -ENOENT;
1611 inode = fuse_ilookup(fc, nodeid, NULL);
1612 if (!inode)
1613 goto out_up_killsb;
1614
1615 mapping = inode->i_mapping;
1616 index = outarg.offset >> PAGE_SHIFT;
1617 offset = outarg.offset & ~PAGE_MASK;
1618 file_size = i_size_read(inode);
1619 end = outarg.offset + outarg.size;
1620 if (end > file_size) {
1621 file_size = end;
1622 fuse_write_update_size(inode, file_size);
1623 }
1624
1625 num = outarg.size;
1626 while (num) {
1627 struct page *page;
1628 unsigned int this_num;
1629
1630 err = -ENOMEM;
1631 page = find_or_create_page(mapping, index,
1632 mapping_gfp_mask(mapping));
1633 if (!page)
1634 goto out_iput;
1635
1636 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1637 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1638 if (!err && offset == 0 &&
1639 (this_num == PAGE_SIZE || file_size == end))
1640 SetPageUptodate(page);
1641 unlock_page(page);
1642 put_page(page);
1643
1644 if (err)
1645 goto out_iput;
1646
1647 num -= this_num;
1648 offset = 0;
1649 index++;
1650 }
1651
1652 err = 0;
1653
1654 out_iput:
1655 iput(inode);
1656 out_up_killsb:
1657 up_read(&fc->killsb);
1658 out_finish:
1659 fuse_copy_finish(cs);
1660 return err;
1661 }
1662
1663 struct fuse_retrieve_args {
1664 struct fuse_args_pages ap;
1665 struct fuse_notify_retrieve_in inarg;
1666 };
1667
fuse_retrieve_end(struct fuse_mount * fm,struct fuse_args * args,int error)1668 static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
1669 int error)
1670 {
1671 struct fuse_retrieve_args *ra =
1672 container_of(args, typeof(*ra), ap.args);
1673
1674 release_pages(ra->ap.pages, ra->ap.num_pages);
1675 kfree(ra);
1676 }
1677
fuse_retrieve(struct fuse_mount * fm,struct inode * inode,struct fuse_notify_retrieve_out * outarg)1678 static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
1679 struct fuse_notify_retrieve_out *outarg)
1680 {
1681 int err;
1682 struct address_space *mapping = inode->i_mapping;
1683 pgoff_t index;
1684 loff_t file_size;
1685 unsigned int num;
1686 unsigned int offset;
1687 size_t total_len = 0;
1688 unsigned int num_pages;
1689 struct fuse_conn *fc = fm->fc;
1690 struct fuse_retrieve_args *ra;
1691 size_t args_size = sizeof(*ra);
1692 struct fuse_args_pages *ap;
1693 struct fuse_args *args;
1694
1695 offset = outarg->offset & ~PAGE_MASK;
1696 file_size = i_size_read(inode);
1697
1698 num = min(outarg->size, fc->max_write);
1699 if (outarg->offset > file_size)
1700 num = 0;
1701 else if (outarg->offset + num > file_size)
1702 num = file_size - outarg->offset;
1703
1704 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1705 num_pages = min(num_pages, fc->max_pages);
1706
1707 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
1708
1709 ra = kzalloc(args_size, GFP_KERNEL);
1710 if (!ra)
1711 return -ENOMEM;
1712
1713 ap = &ra->ap;
1714 ap->pages = (void *) (ra + 1);
1715 ap->descs = (void *) (ap->pages + num_pages);
1716
1717 args = &ap->args;
1718 args->nodeid = outarg->nodeid;
1719 args->opcode = FUSE_NOTIFY_REPLY;
1720 args->in_numargs = 2;
1721 args->in_pages = true;
1722 args->end = fuse_retrieve_end;
1723
1724 index = outarg->offset >> PAGE_SHIFT;
1725
1726 while (num && ap->num_pages < num_pages) {
1727 struct page *page;
1728 unsigned int this_num;
1729
1730 page = find_get_page(mapping, index);
1731 if (!page)
1732 break;
1733
1734 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1735 ap->pages[ap->num_pages] = page;
1736 ap->descs[ap->num_pages].offset = offset;
1737 ap->descs[ap->num_pages].length = this_num;
1738 ap->num_pages++;
1739
1740 offset = 0;
1741 num -= this_num;
1742 total_len += this_num;
1743 index++;
1744 }
1745 ra->inarg.offset = outarg->offset;
1746 ra->inarg.size = total_len;
1747 args->in_args[0].size = sizeof(ra->inarg);
1748 args->in_args[0].value = &ra->inarg;
1749 args->in_args[1].size = total_len;
1750
1751 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
1752 if (err)
1753 fuse_retrieve_end(fm, args, err);
1754
1755 return err;
1756 }
1757
fuse_notify_retrieve(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1758 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1759 struct fuse_copy_state *cs)
1760 {
1761 struct fuse_notify_retrieve_out outarg;
1762 struct fuse_mount *fm;
1763 struct inode *inode;
1764 u64 nodeid;
1765 int err;
1766
1767 err = -EINVAL;
1768 if (size != sizeof(outarg))
1769 goto copy_finish;
1770
1771 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1772 if (err)
1773 goto copy_finish;
1774
1775 fuse_copy_finish(cs);
1776
1777 down_read(&fc->killsb);
1778 err = -ENOENT;
1779 nodeid = outarg.nodeid;
1780
1781 inode = fuse_ilookup(fc, nodeid, &fm);
1782 if (inode) {
1783 err = fuse_retrieve(fm, inode, &outarg);
1784 iput(inode);
1785 }
1786 up_read(&fc->killsb);
1787
1788 return err;
1789
1790 copy_finish:
1791 fuse_copy_finish(cs);
1792 return err;
1793 }
1794
fuse_notify(struct fuse_conn * fc,enum fuse_notify_code code,unsigned int size,struct fuse_copy_state * cs)1795 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1796 unsigned int size, struct fuse_copy_state *cs)
1797 {
1798 /* Don't try to move pages (yet) */
1799 cs->move_pages = 0;
1800
1801 switch (code) {
1802 case FUSE_NOTIFY_POLL:
1803 return fuse_notify_poll(fc, size, cs);
1804
1805 case FUSE_NOTIFY_INVAL_INODE:
1806 return fuse_notify_inval_inode(fc, size, cs);
1807
1808 case FUSE_NOTIFY_INVAL_ENTRY:
1809 return fuse_notify_inval_entry(fc, size, cs);
1810
1811 case FUSE_NOTIFY_STORE:
1812 return fuse_notify_store(fc, size, cs);
1813
1814 case FUSE_NOTIFY_RETRIEVE:
1815 return fuse_notify_retrieve(fc, size, cs);
1816
1817 case FUSE_NOTIFY_DELETE:
1818 return fuse_notify_delete(fc, size, cs);
1819
1820 default:
1821 fuse_copy_finish(cs);
1822 return -EINVAL;
1823 }
1824 }
1825
1826 /* Look up request on processing list by unique ID */
request_find(struct fuse_pqueue * fpq,u64 unique)1827 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1828 {
1829 unsigned int hash = fuse_req_hash(unique);
1830 struct fuse_req *req;
1831
1832 list_for_each_entry(req, &fpq->processing[hash], list) {
1833 if (req->in.h.unique == unique)
1834 return req;
1835 }
1836 return NULL;
1837 }
1838
copy_out_args(struct fuse_copy_state * cs,struct fuse_args * args,unsigned nbytes)1839 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
1840 unsigned nbytes)
1841 {
1842 unsigned reqsize = sizeof(struct fuse_out_header);
1843
1844 reqsize += fuse_len_args(args->out_numargs, args->out_args);
1845
1846 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
1847 return -EINVAL;
1848 else if (reqsize > nbytes) {
1849 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
1850 unsigned diffsize = reqsize - nbytes;
1851
1852 if (diffsize > lastarg->size)
1853 return -EINVAL;
1854 lastarg->size -= diffsize;
1855 }
1856 return fuse_copy_args(cs, args->out_numargs, args->out_pages,
1857 args->out_args, args->page_zeroing);
1858 }
1859
1860 /*
1861 * Write a single reply to a request. First the header is copied from
1862 * the write buffer. The request is then searched on the processing
1863 * list by the unique ID found in the header. If found, then remove
1864 * it from the list and copy the rest of the buffer to the request.
1865 * The request is finished by calling fuse_request_end().
1866 */
fuse_dev_do_write(struct fuse_dev * fud,struct fuse_copy_state * cs,size_t nbytes)1867 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1868 struct fuse_copy_state *cs, size_t nbytes)
1869 {
1870 int err;
1871 struct fuse_conn *fc = fud->fc;
1872 struct fuse_pqueue *fpq = &fud->pq;
1873 struct fuse_req *req;
1874 struct fuse_out_header oh;
1875
1876 err = -EINVAL;
1877 if (nbytes < sizeof(struct fuse_out_header))
1878 goto out;
1879
1880 err = fuse_copy_one(cs, &oh, sizeof(oh));
1881 if (err)
1882 goto copy_finish;
1883
1884 err = -EINVAL;
1885 if (oh.len != nbytes)
1886 goto copy_finish;
1887
1888 /*
1889 * Zero oh.unique indicates unsolicited notification message
1890 * and error contains notification code.
1891 */
1892 if (!oh.unique) {
1893 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1894 goto out;
1895 }
1896
1897 err = -EINVAL;
1898 if (oh.error <= -512 || oh.error > 0)
1899 goto copy_finish;
1900
1901 spin_lock(&fpq->lock);
1902 req = NULL;
1903 if (fpq->connected)
1904 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1905
1906 err = -ENOENT;
1907 if (!req) {
1908 spin_unlock(&fpq->lock);
1909 goto copy_finish;
1910 }
1911
1912 /* Is it an interrupt reply ID? */
1913 if (oh.unique & FUSE_INT_REQ_BIT) {
1914 __fuse_get_request(req);
1915 spin_unlock(&fpq->lock);
1916
1917 err = 0;
1918 if (nbytes != sizeof(struct fuse_out_header))
1919 err = -EINVAL;
1920 else if (oh.error == -ENOSYS)
1921 fc->no_interrupt = 1;
1922 else if (oh.error == -EAGAIN)
1923 err = queue_interrupt(req);
1924
1925 fuse_put_request(req);
1926
1927 goto copy_finish;
1928 }
1929
1930 clear_bit(FR_SENT, &req->flags);
1931 list_move(&req->list, &fpq->io);
1932 req->out.h = oh;
1933 set_bit(FR_LOCKED, &req->flags);
1934 spin_unlock(&fpq->lock);
1935 cs->req = req;
1936 if (!req->args->page_replace)
1937 cs->move_pages = 0;
1938
1939 if (oh.error)
1940 err = nbytes != sizeof(oh) ? -EINVAL : 0;
1941 else
1942 err = copy_out_args(cs, req->args, nbytes);
1943 fuse_copy_finish(cs);
1944
1945 if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH) {
1946 char *path = (char *)req->args->out_args[0].value;
1947
1948 path[req->args->out_args[0].size - 1] = 0;
1949 req->out.h.error =
1950 kern_path(path, 0, req->args->canonical_path);
1951 }
1952
1953 spin_lock(&fpq->lock);
1954 clear_bit(FR_LOCKED, &req->flags);
1955 if (!fpq->connected)
1956 err = -ENOENT;
1957 else if (err)
1958 req->out.h.error = -EIO;
1959 if (!test_bit(FR_PRIVATE, &req->flags))
1960 list_del_init(&req->list);
1961 spin_unlock(&fpq->lock);
1962
1963 fuse_request_end(req);
1964 out:
1965 return err ? err : nbytes;
1966
1967 copy_finish:
1968 fuse_copy_finish(cs);
1969 goto out;
1970 }
1971
fuse_dev_write(struct kiocb * iocb,struct iov_iter * from)1972 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1973 {
1974 struct fuse_copy_state cs;
1975 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1976
1977 if (!fud)
1978 return -EPERM;
1979
1980 if (!iter_is_iovec(from))
1981 return -EINVAL;
1982
1983 fuse_copy_init(&cs, 0, from);
1984
1985 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1986 }
1987
fuse_dev_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)1988 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1989 struct file *out, loff_t *ppos,
1990 size_t len, unsigned int flags)
1991 {
1992 unsigned int head, tail, mask, count;
1993 unsigned nbuf;
1994 unsigned idx;
1995 struct pipe_buffer *bufs;
1996 struct fuse_copy_state cs;
1997 struct fuse_dev *fud;
1998 size_t rem;
1999 ssize_t ret;
2000
2001 fud = fuse_get_dev(out);
2002 if (!fud)
2003 return -EPERM;
2004
2005 pipe_lock(pipe);
2006
2007 head = pipe->head;
2008 tail = pipe->tail;
2009 mask = pipe->ring_size - 1;
2010 count = head - tail;
2011
2012 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
2013 if (!bufs) {
2014 pipe_unlock(pipe);
2015 return -ENOMEM;
2016 }
2017
2018 nbuf = 0;
2019 rem = 0;
2020 for (idx = tail; idx != head && rem < len; idx++)
2021 rem += pipe->bufs[idx & mask].len;
2022
2023 ret = -EINVAL;
2024 if (rem < len)
2025 goto out_free;
2026
2027 rem = len;
2028 while (rem) {
2029 struct pipe_buffer *ibuf;
2030 struct pipe_buffer *obuf;
2031
2032 if (WARN_ON(nbuf >= count || tail == head))
2033 goto out_free;
2034
2035 ibuf = &pipe->bufs[tail & mask];
2036 obuf = &bufs[nbuf];
2037
2038 if (rem >= ibuf->len) {
2039 *obuf = *ibuf;
2040 ibuf->ops = NULL;
2041 tail++;
2042 pipe->tail = tail;
2043 } else {
2044 if (!pipe_buf_get(pipe, ibuf))
2045 goto out_free;
2046
2047 *obuf = *ibuf;
2048 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2049 obuf->len = rem;
2050 ibuf->offset += obuf->len;
2051 ibuf->len -= obuf->len;
2052 }
2053 nbuf++;
2054 rem -= obuf->len;
2055 }
2056 pipe_unlock(pipe);
2057
2058 fuse_copy_init(&cs, 0, NULL);
2059 cs.pipebufs = bufs;
2060 cs.nr_segs = nbuf;
2061 cs.pipe = pipe;
2062
2063 if (flags & SPLICE_F_MOVE)
2064 cs.move_pages = 1;
2065
2066 ret = fuse_dev_do_write(fud, &cs, len);
2067
2068 pipe_lock(pipe);
2069 out_free:
2070 for (idx = 0; idx < nbuf; idx++) {
2071 struct pipe_buffer *buf = &bufs[idx];
2072
2073 if (buf->ops)
2074 pipe_buf_release(pipe, buf);
2075 }
2076 pipe_unlock(pipe);
2077
2078 kvfree(bufs);
2079 return ret;
2080 }
2081
fuse_dev_poll(struct file * file,poll_table * wait)2082 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2083 {
2084 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2085 struct fuse_iqueue *fiq;
2086 struct fuse_dev *fud = fuse_get_dev(file);
2087
2088 if (!fud)
2089 return EPOLLERR;
2090
2091 fiq = &fud->fc->iq;
2092 poll_wait(file, &fiq->waitq, wait);
2093
2094 spin_lock(&fiq->lock);
2095 if (!fiq->connected)
2096 mask = EPOLLERR;
2097 else if (request_pending(fiq))
2098 mask |= EPOLLIN | EPOLLRDNORM;
2099 spin_unlock(&fiq->lock);
2100
2101 return mask;
2102 }
2103
2104 /* Abort all requests on the given list (pending or processing) */
end_requests(struct list_head * head)2105 static void end_requests(struct list_head *head)
2106 {
2107 while (!list_empty(head)) {
2108 struct fuse_req *req;
2109 req = list_entry(head->next, struct fuse_req, list);
2110 req->out.h.error = -ECONNABORTED;
2111 clear_bit(FR_SENT, &req->flags);
2112 list_del_init(&req->list);
2113 fuse_request_end(req);
2114 }
2115 }
2116
end_polls(struct fuse_conn * fc)2117 static void end_polls(struct fuse_conn *fc)
2118 {
2119 struct rb_node *p;
2120
2121 p = rb_first(&fc->polled_files);
2122
2123 while (p) {
2124 struct fuse_file *ff;
2125 ff = rb_entry(p, struct fuse_file, polled_node);
2126 wake_up_interruptible_all(&ff->poll_wait);
2127
2128 p = rb_next(p);
2129 }
2130 }
2131
2132 /*
2133 * Abort all requests.
2134 *
2135 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2136 * filesystem.
2137 *
2138 * The same effect is usually achievable through killing the filesystem daemon
2139 * and all users of the filesystem. The exception is the combination of an
2140 * asynchronous request and the tricky deadlock (see
2141 * Documentation/filesystems/fuse.rst).
2142 *
2143 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2144 * requests, they should be finished off immediately. Locked requests will be
2145 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2146 * requests. It is possible that some request will finish before we can. This
2147 * is OK, the request will in that case be removed from the list before we touch
2148 * it.
2149 */
fuse_abort_conn(struct fuse_conn * fc)2150 void fuse_abort_conn(struct fuse_conn *fc)
2151 {
2152 struct fuse_iqueue *fiq = &fc->iq;
2153
2154 spin_lock(&fc->lock);
2155 if (fc->connected) {
2156 struct fuse_dev *fud;
2157 struct fuse_req *req, *next;
2158 LIST_HEAD(to_end);
2159 unsigned int i;
2160
2161 /* Background queuing checks fc->connected under bg_lock */
2162 spin_lock(&fc->bg_lock);
2163 fc->connected = 0;
2164 spin_unlock(&fc->bg_lock);
2165
2166 fuse_set_initialized(fc);
2167 list_for_each_entry(fud, &fc->devices, entry) {
2168 struct fuse_pqueue *fpq = &fud->pq;
2169
2170 spin_lock(&fpq->lock);
2171 fpq->connected = 0;
2172 list_for_each_entry_safe(req, next, &fpq->io, list) {
2173 req->out.h.error = -ECONNABORTED;
2174 spin_lock(&req->waitq.lock);
2175 set_bit(FR_ABORTED, &req->flags);
2176 if (!test_bit(FR_LOCKED, &req->flags)) {
2177 set_bit(FR_PRIVATE, &req->flags);
2178 __fuse_get_request(req);
2179 list_move(&req->list, &to_end);
2180 }
2181 spin_unlock(&req->waitq.lock);
2182 }
2183 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2184 list_splice_tail_init(&fpq->processing[i],
2185 &to_end);
2186 spin_unlock(&fpq->lock);
2187 }
2188 spin_lock(&fc->bg_lock);
2189 fc->blocked = 0;
2190 fc->max_background = UINT_MAX;
2191 flush_bg_queue(fc);
2192 spin_unlock(&fc->bg_lock);
2193
2194 spin_lock(&fiq->lock);
2195 fiq->connected = 0;
2196 list_for_each_entry(req, &fiq->pending, list)
2197 clear_bit(FR_PENDING, &req->flags);
2198 list_splice_tail_init(&fiq->pending, &to_end);
2199 while (forget_pending(fiq))
2200 kfree(fuse_dequeue_forget(fiq, 1, NULL));
2201 wake_up_all(&fiq->waitq);
2202 spin_unlock(&fiq->lock);
2203 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2204 end_polls(fc);
2205 wake_up_all(&fc->blocked_waitq);
2206 spin_unlock(&fc->lock);
2207
2208 end_requests(&to_end);
2209 } else {
2210 spin_unlock(&fc->lock);
2211 }
2212 }
2213 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2214
fuse_wait_aborted(struct fuse_conn * fc)2215 void fuse_wait_aborted(struct fuse_conn *fc)
2216 {
2217 /* matches implicit memory barrier in fuse_drop_waiting() */
2218 smp_mb();
2219 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2220 }
2221
fuse_dev_release(struct inode * inode,struct file * file)2222 int fuse_dev_release(struct inode *inode, struct file *file)
2223 {
2224 struct fuse_dev *fud = fuse_get_dev(file);
2225
2226 if (fud) {
2227 struct fuse_conn *fc = fud->fc;
2228 struct fuse_pqueue *fpq = &fud->pq;
2229 LIST_HEAD(to_end);
2230 unsigned int i;
2231
2232 spin_lock(&fpq->lock);
2233 WARN_ON(!list_empty(&fpq->io));
2234 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2235 list_splice_init(&fpq->processing[i], &to_end);
2236 spin_unlock(&fpq->lock);
2237
2238 end_requests(&to_end);
2239
2240 /* Are we the last open device? */
2241 if (atomic_dec_and_test(&fc->dev_count)) {
2242 WARN_ON(fc->iq.fasync != NULL);
2243 fuse_abort_conn(fc);
2244 }
2245 fuse_dev_free(fud);
2246 }
2247 return 0;
2248 }
2249 EXPORT_SYMBOL_GPL(fuse_dev_release);
2250
fuse_dev_fasync(int fd,struct file * file,int on)2251 static int fuse_dev_fasync(int fd, struct file *file, int on)
2252 {
2253 struct fuse_dev *fud = fuse_get_dev(file);
2254
2255 if (!fud)
2256 return -EPERM;
2257
2258 /* No locking - fasync_helper does its own locking */
2259 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2260 }
2261
fuse_device_clone(struct fuse_conn * fc,struct file * new)2262 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2263 {
2264 struct fuse_dev *fud;
2265
2266 if (new->private_data)
2267 return -EINVAL;
2268
2269 fud = fuse_dev_alloc_install(fc);
2270 if (!fud)
2271 return -ENOMEM;
2272
2273 new->private_data = fud;
2274 atomic_inc(&fc->dev_count);
2275
2276 return 0;
2277 }
2278
fuse_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2279 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2280 unsigned long arg)
2281 {
2282 int res;
2283 int oldfd;
2284 struct fuse_dev *fud = NULL;
2285
2286 switch (cmd) {
2287 case FUSE_DEV_IOC_CLONE:
2288 res = -EFAULT;
2289 if (!get_user(oldfd, (__u32 __user *)arg)) {
2290 struct file *old = fget(oldfd);
2291
2292 res = -EINVAL;
2293 if (old) {
2294 /*
2295 * Check against file->f_op because CUSE
2296 * uses the same ioctl handler.
2297 */
2298 if (old->f_op == file->f_op &&
2299 old->f_cred->user_ns ==
2300 file->f_cred->user_ns)
2301 fud = fuse_get_dev(old);
2302
2303 if (fud) {
2304 mutex_lock(&fuse_mutex);
2305 res = fuse_device_clone(fud->fc, file);
2306 mutex_unlock(&fuse_mutex);
2307 }
2308 fput(old);
2309 }
2310 }
2311 break;
2312 case FUSE_DEV_IOC_PASSTHROUGH_OPEN:
2313 res = -EFAULT;
2314 if (!get_user(oldfd, (__u32 __user *)arg)) {
2315 res = -EINVAL;
2316 fud = fuse_get_dev(file);
2317 if (fud)
2318 res = fuse_passthrough_open(fud, oldfd);
2319 }
2320 break;
2321 default:
2322 res = -ENOTTY;
2323 break;
2324 }
2325 return res;
2326 }
2327
2328 const struct file_operations fuse_dev_operations = {
2329 .owner = THIS_MODULE,
2330 .open = fuse_dev_open,
2331 .llseek = no_llseek,
2332 .read_iter = fuse_dev_read,
2333 .splice_read = fuse_dev_splice_read,
2334 .write_iter = fuse_dev_write,
2335 .splice_write = fuse_dev_splice_write,
2336 .poll = fuse_dev_poll,
2337 .release = fuse_dev_release,
2338 .fasync = fuse_dev_fasync,
2339 .unlocked_ioctl = fuse_dev_ioctl,
2340 .compat_ioctl = compat_ptr_ioctl,
2341 };
2342 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2343
2344 static struct miscdevice fuse_miscdevice = {
2345 .minor = FUSE_MINOR,
2346 .name = "fuse",
2347 .fops = &fuse_dev_operations,
2348 };
2349
fuse_dev_init(void)2350 int __init fuse_dev_init(void)
2351 {
2352 int err = -ENOMEM;
2353 fuse_req_cachep = kmem_cache_create("fuse_request",
2354 sizeof(struct fuse_req),
2355 0, 0, NULL);
2356 if (!fuse_req_cachep)
2357 goto out;
2358
2359 err = misc_register(&fuse_miscdevice);
2360 if (err)
2361 goto out_cache_clean;
2362
2363 return 0;
2364
2365 out_cache_clean:
2366 kmem_cache_destroy(fuse_req_cachep);
2367 out:
2368 return err;
2369 }
2370
fuse_dev_cleanup(void)2371 void fuse_dev_cleanup(void)
2372 {
2373 misc_deregister(&fuse_miscdevice);
2374 kmem_cache_destroy(fuse_req_cachep);
2375 }
2376