1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/namei.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/slab.h>
21 #include <linux/pipe_fs_i.h>
22 #include <linux/swap.h>
23 #include <linux/splice.h>
24 #include <linux/sched.h>
25
26 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
27 MODULE_ALIAS("devname:fuse");
28
29 /* Ordinary requests have even IDs, while interrupts IDs are odd */
30 #define FUSE_INT_REQ_BIT (1ULL << 0)
31 #define FUSE_REQ_ID_STEP (1ULL << 1)
32
33 static struct kmem_cache *fuse_req_cachep;
34
fuse_get_dev(struct file * file)35 static struct fuse_dev *fuse_get_dev(struct file *file)
36 {
37 /*
38 * Lockless access is OK, because file->private data is set
39 * once during mount and is valid until the file is released.
40 */
41 return READ_ONCE(file->private_data);
42 }
43
fuse_request_init(struct fuse_mount * fm,struct fuse_req * req)44 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
45 {
46 INIT_LIST_HEAD(&req->list);
47 INIT_LIST_HEAD(&req->intr_entry);
48 init_waitqueue_head(&req->waitq);
49 refcount_set(&req->count, 1);
50 __set_bit(FR_PENDING, &req->flags);
51 req->fm = fm;
52 }
53
fuse_request_alloc(struct fuse_mount * fm,gfp_t flags)54 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
55 {
56 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
57 if (req)
58 fuse_request_init(fm, req);
59
60 return req;
61 }
62
fuse_request_free(struct fuse_req * req)63 static void fuse_request_free(struct fuse_req *req)
64 {
65 kmem_cache_free(fuse_req_cachep, req);
66 }
67
__fuse_get_request(struct fuse_req * req)68 static void __fuse_get_request(struct fuse_req *req)
69 {
70 refcount_inc(&req->count);
71 }
72
73 /* Must be called with > 1 refcount */
__fuse_put_request(struct fuse_req * req)74 static void __fuse_put_request(struct fuse_req *req)
75 {
76 refcount_dec(&req->count);
77 }
78
fuse_set_initialized(struct fuse_conn * fc)79 void fuse_set_initialized(struct fuse_conn *fc)
80 {
81 /* Make sure stores before this are seen on another CPU */
82 smp_wmb();
83 fc->initialized = 1;
84 }
85
fuse_block_alloc(struct fuse_conn * fc,bool for_background)86 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
87 {
88 return !fc->initialized || (for_background && fc->blocked);
89 }
90
fuse_drop_waiting(struct fuse_conn * fc)91 static void fuse_drop_waiting(struct fuse_conn *fc)
92 {
93 /*
94 * lockess check of fc->connected is okay, because atomic_dec_and_test()
95 * provides a memory barrier mached with the one in fuse_wait_aborted()
96 * to ensure no wake-up is missed.
97 */
98 if (atomic_dec_and_test(&fc->num_waiting) &&
99 !READ_ONCE(fc->connected)) {
100 /* wake up aborters */
101 wake_up_all(&fc->blocked_waitq);
102 }
103 }
104
105 static void fuse_put_request(struct fuse_req *req);
106
fuse_get_req(struct fuse_mount * fm,bool for_background)107 static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
108 {
109 struct fuse_conn *fc = fm->fc;
110 struct fuse_req *req;
111 int err;
112 atomic_inc(&fc->num_waiting);
113
114 if (fuse_block_alloc(fc, for_background)) {
115 err = -EINTR;
116 if (wait_event_killable_exclusive(fc->blocked_waitq,
117 !fuse_block_alloc(fc, for_background)))
118 goto out;
119 }
120 /* Matches smp_wmb() in fuse_set_initialized() */
121 smp_rmb();
122
123 err = -ENOTCONN;
124 if (!fc->connected)
125 goto out;
126
127 err = -ECONNREFUSED;
128 if (fc->conn_error)
129 goto out;
130
131 req = fuse_request_alloc(fm, GFP_KERNEL);
132 err = -ENOMEM;
133 if (!req) {
134 if (for_background)
135 wake_up(&fc->blocked_waitq);
136 goto out;
137 }
138
139 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
140 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
141 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
142
143 __set_bit(FR_WAITING, &req->flags);
144 if (for_background)
145 __set_bit(FR_BACKGROUND, &req->flags);
146
147 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
148 req->in.h.gid == ((gid_t)-1))) {
149 fuse_put_request(req);
150 return ERR_PTR(-EOVERFLOW);
151 }
152 return req;
153
154 out:
155 fuse_drop_waiting(fc);
156 return ERR_PTR(err);
157 }
158
fuse_put_request(struct fuse_req * req)159 static void fuse_put_request(struct fuse_req *req)
160 {
161 struct fuse_conn *fc = req->fm->fc;
162
163 if (refcount_dec_and_test(&req->count)) {
164 if (test_bit(FR_BACKGROUND, &req->flags)) {
165 /*
166 * We get here in the unlikely case that a background
167 * request was allocated but not sent
168 */
169 spin_lock(&fc->bg_lock);
170 if (!fc->blocked)
171 wake_up(&fc->blocked_waitq);
172 spin_unlock(&fc->bg_lock);
173 }
174
175 if (test_bit(FR_WAITING, &req->flags)) {
176 __clear_bit(FR_WAITING, &req->flags);
177 fuse_drop_waiting(fc);
178 }
179
180 fuse_request_free(req);
181 }
182 }
183
fuse_len_args(unsigned int numargs,struct fuse_arg * args)184 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
185 {
186 unsigned nbytes = 0;
187 unsigned i;
188
189 for (i = 0; i < numargs; i++)
190 nbytes += args[i].size;
191
192 return nbytes;
193 }
194 EXPORT_SYMBOL_GPL(fuse_len_args);
195
fuse_get_unique(struct fuse_iqueue * fiq)196 u64 fuse_get_unique(struct fuse_iqueue *fiq)
197 {
198 fiq->reqctr += FUSE_REQ_ID_STEP;
199 return fiq->reqctr;
200 }
201 EXPORT_SYMBOL_GPL(fuse_get_unique);
202
fuse_req_hash(u64 unique)203 static unsigned int fuse_req_hash(u64 unique)
204 {
205 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
206 }
207
208 /**
209 * A new request is available, wake fiq->waitq
210 */
fuse_dev_wake_and_unlock(struct fuse_iqueue * fiq,bool sync)211 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync)
212 __releases(fiq->lock)
213 {
214 if (sync)
215 wake_up_sync(&fiq->waitq);
216 else
217 wake_up(&fiq->waitq);
218 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
219 spin_unlock(&fiq->lock);
220 }
221
222 const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
223 .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
224 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
225 .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
226 };
227 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
228
queue_request_and_unlock(struct fuse_iqueue * fiq,struct fuse_req * req,bool sync)229 static void queue_request_and_unlock(struct fuse_iqueue *fiq,
230 struct fuse_req *req, bool sync)
231 __releases(fiq->lock)
232 {
233 req->in.h.len = sizeof(struct fuse_in_header) +
234 fuse_len_args(req->args->in_numargs,
235 (struct fuse_arg *) req->args->in_args);
236 list_add_tail(&req->list, &fiq->pending);
237 fiq->ops->wake_pending_and_unlock(fiq, sync);
238 }
239
fuse_queue_forget(struct fuse_conn * fc,struct fuse_forget_link * forget,u64 nodeid,u64 nlookup)240 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
241 u64 nodeid, u64 nlookup)
242 {
243 struct fuse_iqueue *fiq = &fc->iq;
244
245 if (nodeid == 0) {
246 kfree(forget);
247 return;
248 }
249
250 forget->forget_one.nodeid = nodeid;
251 forget->forget_one.nlookup = nlookup;
252
253 spin_lock(&fiq->lock);
254 if (fiq->connected) {
255 fiq->forget_list_tail->next = forget;
256 fiq->forget_list_tail = forget;
257 fiq->ops->wake_forget_and_unlock(fiq, false);
258 } else {
259 kfree(forget);
260 spin_unlock(&fiq->lock);
261 }
262 }
263
flush_bg_queue(struct fuse_conn * fc)264 static void flush_bg_queue(struct fuse_conn *fc)
265 {
266 struct fuse_iqueue *fiq = &fc->iq;
267
268 while (fc->active_background < fc->max_background &&
269 !list_empty(&fc->bg_queue)) {
270 struct fuse_req *req;
271
272 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
273 list_del(&req->list);
274 fc->active_background++;
275 spin_lock(&fiq->lock);
276 req->in.h.unique = fuse_get_unique(fiq);
277 queue_request_and_unlock(fiq, req, false);
278 }
279 }
280
281 /*
282 * This function is called when a request is finished. Either a reply
283 * has arrived or it was aborted (and not yet sent) or some error
284 * occurred during communication with userspace, or the device file
285 * was closed. The requester thread is woken up (if still waiting),
286 * the 'end' callback is called if given, else the reference to the
287 * request is released
288 */
fuse_request_end(struct fuse_req * req)289 void fuse_request_end(struct fuse_req *req)
290 {
291 struct fuse_mount *fm = req->fm;
292 struct fuse_conn *fc = fm->fc;
293 struct fuse_iqueue *fiq = &fc->iq;
294
295 if (test_and_set_bit(FR_FINISHED, &req->flags))
296 goto put_request;
297
298 /*
299 * test_and_set_bit() implies smp_mb() between bit
300 * changing and below FR_INTERRUPTED check. Pairs with
301 * smp_mb() from queue_interrupt().
302 */
303 if (test_bit(FR_INTERRUPTED, &req->flags)) {
304 spin_lock(&fiq->lock);
305 list_del_init(&req->intr_entry);
306 spin_unlock(&fiq->lock);
307 }
308 WARN_ON(test_bit(FR_PENDING, &req->flags));
309 WARN_ON(test_bit(FR_SENT, &req->flags));
310 if (test_bit(FR_BACKGROUND, &req->flags)) {
311 spin_lock(&fc->bg_lock);
312 clear_bit(FR_BACKGROUND, &req->flags);
313 if (fc->num_background == fc->max_background) {
314 fc->blocked = 0;
315 wake_up(&fc->blocked_waitq);
316 } else if (!fc->blocked) {
317 /*
318 * Wake up next waiter, if any. It's okay to use
319 * waitqueue_active(), as we've already synced up
320 * fc->blocked with waiters with the wake_up() call
321 * above.
322 */
323 if (waitqueue_active(&fc->blocked_waitq))
324 wake_up(&fc->blocked_waitq);
325 }
326
327 if (fc->num_background == fc->congestion_threshold && fm->sb) {
328 clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
329 clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
330 }
331 fc->num_background--;
332 fc->active_background--;
333 flush_bg_queue(fc);
334 spin_unlock(&fc->bg_lock);
335 } else {
336 /* Wake up waiter sleeping in request_wait_answer() */
337 wake_up(&req->waitq);
338 }
339
340 if (test_bit(FR_ASYNC, &req->flags))
341 req->args->end(fm, req->args, req->out.h.error);
342 put_request:
343 fuse_put_request(req);
344 }
345 EXPORT_SYMBOL_GPL(fuse_request_end);
346
queue_interrupt(struct fuse_req * req)347 static int queue_interrupt(struct fuse_req *req)
348 {
349 struct fuse_iqueue *fiq = &req->fm->fc->iq;
350
351 spin_lock(&fiq->lock);
352 /* Check for we've sent request to interrupt this req */
353 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
354 spin_unlock(&fiq->lock);
355 return -EINVAL;
356 }
357
358 if (list_empty(&req->intr_entry)) {
359 list_add_tail(&req->intr_entry, &fiq->interrupts);
360 /*
361 * Pairs with smp_mb() implied by test_and_set_bit()
362 * from fuse_request_end().
363 */
364 smp_mb();
365 if (test_bit(FR_FINISHED, &req->flags)) {
366 list_del_init(&req->intr_entry);
367 spin_unlock(&fiq->lock);
368 return 0;
369 }
370 fiq->ops->wake_interrupt_and_unlock(fiq, false);
371 } else {
372 spin_unlock(&fiq->lock);
373 }
374 return 0;
375 }
376
request_wait_answer(struct fuse_req * req)377 static void request_wait_answer(struct fuse_req *req)
378 {
379 struct fuse_conn *fc = req->fm->fc;
380 struct fuse_iqueue *fiq = &fc->iq;
381 int err;
382
383 if (!fc->no_interrupt) {
384 /* Any signal may interrupt this */
385 err = wait_event_interruptible(req->waitq,
386 test_bit(FR_FINISHED, &req->flags));
387 if (!err)
388 return;
389
390 set_bit(FR_INTERRUPTED, &req->flags);
391 /* matches barrier in fuse_dev_do_read() */
392 smp_mb__after_atomic();
393 if (test_bit(FR_SENT, &req->flags))
394 queue_interrupt(req);
395 }
396
397 if (!test_bit(FR_FORCE, &req->flags)) {
398 /* Only fatal signals may interrupt this */
399 err = wait_event_killable(req->waitq,
400 test_bit(FR_FINISHED, &req->flags));
401 if (!err)
402 return;
403
404 spin_lock(&fiq->lock);
405 /* Request is not yet in userspace, bail out */
406 if (test_bit(FR_PENDING, &req->flags)) {
407 list_del(&req->list);
408 spin_unlock(&fiq->lock);
409 __fuse_put_request(req);
410 req->out.h.error = -EINTR;
411 return;
412 }
413 spin_unlock(&fiq->lock);
414 }
415
416 /*
417 * Either request is already in userspace, or it was forced.
418 * Wait it out.
419 */
420 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
421 }
422
__fuse_request_send(struct fuse_req * req)423 static void __fuse_request_send(struct fuse_req *req)
424 {
425 struct fuse_iqueue *fiq = &req->fm->fc->iq;
426
427 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
428 spin_lock(&fiq->lock);
429 if (!fiq->connected) {
430 spin_unlock(&fiq->lock);
431 req->out.h.error = -ENOTCONN;
432 } else {
433 req->in.h.unique = fuse_get_unique(fiq);
434 /* acquire extra reference, since request is still needed
435 after fuse_request_end() */
436 __fuse_get_request(req);
437 queue_request_and_unlock(fiq, req, true);
438
439 request_wait_answer(req);
440 /* Pairs with smp_wmb() in fuse_request_end() */
441 smp_rmb();
442 }
443 }
444
fuse_adjust_compat(struct fuse_conn * fc,struct fuse_args * args)445 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
446 {
447 if (fc->minor < 4 && args->opcode == FUSE_STATFS)
448 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
449
450 if (fc->minor < 9) {
451 switch (args->opcode) {
452 case FUSE_LOOKUP:
453 case FUSE_CREATE:
454 case FUSE_MKNOD:
455 case FUSE_MKDIR:
456 case FUSE_SYMLINK:
457 case FUSE_LINK:
458 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
459 break;
460 case FUSE_GETATTR:
461 case FUSE_SETATTR:
462 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
463 break;
464 }
465 }
466 if (fc->minor < 12) {
467 switch (args->opcode) {
468 case FUSE_CREATE:
469 args->in_args[0].size = sizeof(struct fuse_open_in);
470 break;
471 case FUSE_MKNOD:
472 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
473 break;
474 }
475 }
476 }
477
fuse_force_creds(struct fuse_req * req)478 static void fuse_force_creds(struct fuse_req *req)
479 {
480 struct fuse_conn *fc = req->fm->fc;
481
482 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
483 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
484 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
485 }
486
fuse_args_to_req(struct fuse_req * req,struct fuse_args * args)487 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
488 {
489 req->in.h.opcode = args->opcode;
490 req->in.h.nodeid = args->nodeid;
491 req->in.h.error_in = args->error_in;
492 req->args = args;
493 if (args->end)
494 __set_bit(FR_ASYNC, &req->flags);
495 }
496
fuse_simple_request(struct fuse_mount * fm,struct fuse_args * args)497 ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
498 {
499 struct fuse_conn *fc = fm->fc;
500 struct fuse_req *req;
501 ssize_t ret;
502
503 if (args->force) {
504 atomic_inc(&fc->num_waiting);
505 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
506
507 if (!args->nocreds)
508 fuse_force_creds(req);
509
510 __set_bit(FR_WAITING, &req->flags);
511 __set_bit(FR_FORCE, &req->flags);
512 } else {
513 WARN_ON(args->nocreds);
514 req = fuse_get_req(fm, false);
515 if (IS_ERR(req))
516 return PTR_ERR(req);
517 }
518
519 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
520 fuse_adjust_compat(fc, args);
521 fuse_args_to_req(req, args);
522
523 if (!args->noreply)
524 __set_bit(FR_ISREPLY, &req->flags);
525 __fuse_request_send(req);
526 ret = req->out.h.error;
527 if (!ret && args->out_argvar) {
528 BUG_ON(args->out_numargs == 0);
529 ret = args->out_args[args->out_numargs - 1].size;
530 }
531 fuse_put_request(req);
532
533 return ret;
534 }
535
fuse_request_queue_background(struct fuse_req * req)536 static bool fuse_request_queue_background(struct fuse_req *req)
537 {
538 struct fuse_mount *fm = req->fm;
539 struct fuse_conn *fc = fm->fc;
540 bool queued = false;
541
542 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
543 if (!test_bit(FR_WAITING, &req->flags)) {
544 __set_bit(FR_WAITING, &req->flags);
545 atomic_inc(&fc->num_waiting);
546 }
547 __set_bit(FR_ISREPLY, &req->flags);
548 spin_lock(&fc->bg_lock);
549 if (likely(fc->connected)) {
550 fc->num_background++;
551 if (fc->num_background == fc->max_background)
552 fc->blocked = 1;
553 if (fc->num_background == fc->congestion_threshold && fm->sb) {
554 set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
555 set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
556 }
557 list_add_tail(&req->list, &fc->bg_queue);
558 flush_bg_queue(fc);
559 queued = true;
560 }
561 spin_unlock(&fc->bg_lock);
562
563 return queued;
564 }
565
fuse_simple_background(struct fuse_mount * fm,struct fuse_args * args,gfp_t gfp_flags)566 int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
567 gfp_t gfp_flags)
568 {
569 struct fuse_req *req;
570
571 if (args->force) {
572 WARN_ON(!args->nocreds);
573 req = fuse_request_alloc(fm, gfp_flags);
574 if (!req)
575 return -ENOMEM;
576 __set_bit(FR_BACKGROUND, &req->flags);
577 } else {
578 WARN_ON(args->nocreds);
579 req = fuse_get_req(fm, true);
580 if (IS_ERR(req))
581 return PTR_ERR(req);
582 }
583
584 fuse_args_to_req(req, args);
585
586 if (!fuse_request_queue_background(req)) {
587 fuse_put_request(req);
588 return -ENOTCONN;
589 }
590
591 return 0;
592 }
593 EXPORT_SYMBOL_GPL(fuse_simple_background);
594
fuse_simple_notify_reply(struct fuse_mount * fm,struct fuse_args * args,u64 unique)595 static int fuse_simple_notify_reply(struct fuse_mount *fm,
596 struct fuse_args *args, u64 unique)
597 {
598 struct fuse_req *req;
599 struct fuse_iqueue *fiq = &fm->fc->iq;
600 int err = 0;
601
602 req = fuse_get_req(fm, false);
603 if (IS_ERR(req))
604 return PTR_ERR(req);
605
606 __clear_bit(FR_ISREPLY, &req->flags);
607 req->in.h.unique = unique;
608
609 fuse_args_to_req(req, args);
610
611 spin_lock(&fiq->lock);
612 if (fiq->connected) {
613 queue_request_and_unlock(fiq, req, false);
614 } else {
615 err = -ENODEV;
616 spin_unlock(&fiq->lock);
617 fuse_put_request(req);
618 }
619
620 return err;
621 }
622
623 /*
624 * Lock the request. Up to the next unlock_request() there mustn't be
625 * anything that could cause a page-fault. If the request was already
626 * aborted bail out.
627 */
lock_request(struct fuse_req * req)628 static int lock_request(struct fuse_req *req)
629 {
630 int err = 0;
631 if (req) {
632 spin_lock(&req->waitq.lock);
633 if (test_bit(FR_ABORTED, &req->flags))
634 err = -ENOENT;
635 else
636 set_bit(FR_LOCKED, &req->flags);
637 spin_unlock(&req->waitq.lock);
638 }
639 return err;
640 }
641
642 /*
643 * Unlock request. If it was aborted while locked, caller is responsible
644 * for unlocking and ending the request.
645 */
unlock_request(struct fuse_req * req)646 static int unlock_request(struct fuse_req *req)
647 {
648 int err = 0;
649 if (req) {
650 spin_lock(&req->waitq.lock);
651 if (test_bit(FR_ABORTED, &req->flags))
652 err = -ENOENT;
653 else
654 clear_bit(FR_LOCKED, &req->flags);
655 spin_unlock(&req->waitq.lock);
656 }
657 return err;
658 }
659
660 struct fuse_copy_state {
661 int write;
662 struct fuse_req *req;
663 struct iov_iter *iter;
664 struct pipe_buffer *pipebufs;
665 struct pipe_buffer *currbuf;
666 struct pipe_inode_info *pipe;
667 unsigned long nr_segs;
668 struct page *pg;
669 unsigned len;
670 unsigned offset;
671 unsigned move_pages:1;
672 };
673
fuse_copy_init(struct fuse_copy_state * cs,int write,struct iov_iter * iter)674 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
675 struct iov_iter *iter)
676 {
677 memset(cs, 0, sizeof(*cs));
678 cs->write = write;
679 cs->iter = iter;
680 }
681
682 /* Unmap and put previous page of userspace buffer */
fuse_copy_finish(struct fuse_copy_state * cs)683 static void fuse_copy_finish(struct fuse_copy_state *cs)
684 {
685 if (cs->currbuf) {
686 struct pipe_buffer *buf = cs->currbuf;
687
688 if (cs->write)
689 buf->len = PAGE_SIZE - cs->len;
690 cs->currbuf = NULL;
691 } else if (cs->pg) {
692 if (cs->write) {
693 flush_dcache_page(cs->pg);
694 set_page_dirty_lock(cs->pg);
695 }
696 put_page(cs->pg);
697 }
698 cs->pg = NULL;
699 }
700
701 /*
702 * Get another pagefull of userspace buffer, and map it to kernel
703 * address space, and lock request
704 */
fuse_copy_fill(struct fuse_copy_state * cs)705 static int fuse_copy_fill(struct fuse_copy_state *cs)
706 {
707 struct page *page;
708 int err;
709
710 err = unlock_request(cs->req);
711 if (err)
712 return err;
713
714 fuse_copy_finish(cs);
715 if (cs->pipebufs) {
716 struct pipe_buffer *buf = cs->pipebufs;
717
718 if (!cs->write) {
719 err = pipe_buf_confirm(cs->pipe, buf);
720 if (err)
721 return err;
722
723 BUG_ON(!cs->nr_segs);
724 cs->currbuf = buf;
725 cs->pg = buf->page;
726 cs->offset = buf->offset;
727 cs->len = buf->len;
728 cs->pipebufs++;
729 cs->nr_segs--;
730 } else {
731 if (cs->nr_segs >= cs->pipe->max_usage)
732 return -EIO;
733
734 page = alloc_page(GFP_HIGHUSER);
735 if (!page)
736 return -ENOMEM;
737
738 buf->page = page;
739 buf->offset = 0;
740 buf->len = 0;
741
742 cs->currbuf = buf;
743 cs->pg = page;
744 cs->offset = 0;
745 cs->len = PAGE_SIZE;
746 cs->pipebufs++;
747 cs->nr_segs++;
748 }
749 } else {
750 size_t off;
751 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
752 if (err < 0)
753 return err;
754 BUG_ON(!err);
755 cs->len = err;
756 cs->offset = off;
757 cs->pg = page;
758 iov_iter_advance(cs->iter, err);
759 }
760
761 return lock_request(cs->req);
762 }
763
764 /* Do as much copy to/from userspace buffer as we can */
fuse_copy_do(struct fuse_copy_state * cs,void ** val,unsigned * size)765 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
766 {
767 unsigned ncpy = min(*size, cs->len);
768 if (val) {
769 void *pgaddr = kmap_atomic(cs->pg);
770 void *buf = pgaddr + cs->offset;
771
772 if (cs->write)
773 memcpy(buf, *val, ncpy);
774 else
775 memcpy(*val, buf, ncpy);
776
777 kunmap_atomic(pgaddr);
778 *val += ncpy;
779 }
780 *size -= ncpy;
781 cs->len -= ncpy;
782 cs->offset += ncpy;
783 return ncpy;
784 }
785
fuse_check_page(struct page * page)786 static int fuse_check_page(struct page *page)
787 {
788 if (page_mapcount(page) ||
789 page->mapping != NULL ||
790 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
791 ~(1 << PG_locked |
792 1 << PG_referenced |
793 1 << PG_uptodate |
794 1 << PG_lru |
795 1 << PG_active |
796 1 << PG_workingset |
797 1 << PG_reclaim |
798 1 << PG_waiters |
799 LRU_GEN_MASK | LRU_REFS_MASK))) {
800 dump_page(page, "fuse: trying to steal weird page");
801 return 1;
802 }
803 return 0;
804 }
805
fuse_try_move_page(struct fuse_copy_state * cs,struct page ** pagep)806 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
807 {
808 int err;
809 struct page *oldpage = *pagep;
810 struct page *newpage;
811 struct pipe_buffer *buf = cs->pipebufs;
812
813 get_page(oldpage);
814 err = unlock_request(cs->req);
815 if (err)
816 goto out_put_old;
817
818 fuse_copy_finish(cs);
819
820 err = pipe_buf_confirm(cs->pipe, buf);
821 if (err)
822 goto out_put_old;
823
824 BUG_ON(!cs->nr_segs);
825 cs->currbuf = buf;
826 cs->len = buf->len;
827 cs->pipebufs++;
828 cs->nr_segs--;
829
830 if (cs->len != PAGE_SIZE)
831 goto out_fallback;
832
833 if (!pipe_buf_try_steal(cs->pipe, buf))
834 goto out_fallback;
835
836 newpage = buf->page;
837
838 if (!PageUptodate(newpage))
839 SetPageUptodate(newpage);
840
841 ClearPageMappedToDisk(newpage);
842
843 if (fuse_check_page(newpage) != 0)
844 goto out_fallback_unlock;
845
846 /*
847 * This is a new and locked page, it shouldn't be mapped or
848 * have any special flags on it
849 */
850 if (WARN_ON(page_mapped(oldpage)))
851 goto out_fallback_unlock;
852 if (WARN_ON(page_has_private(oldpage)))
853 goto out_fallback_unlock;
854 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
855 goto out_fallback_unlock;
856 if (WARN_ON(PageMlocked(oldpage)))
857 goto out_fallback_unlock;
858
859 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
860 if (err) {
861 unlock_page(newpage);
862 goto out_put_old;
863 }
864
865 get_page(newpage);
866
867 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
868 lru_cache_add(newpage);
869
870 /*
871 * Release while we have extra ref on stolen page. Otherwise
872 * anon_pipe_buf_release() might think the page can be reused.
873 */
874 pipe_buf_release(cs->pipe, buf);
875
876 err = 0;
877 spin_lock(&cs->req->waitq.lock);
878 if (test_bit(FR_ABORTED, &cs->req->flags))
879 err = -ENOENT;
880 else
881 *pagep = newpage;
882 spin_unlock(&cs->req->waitq.lock);
883
884 if (err) {
885 unlock_page(newpage);
886 put_page(newpage);
887 goto out_put_old;
888 }
889
890 unlock_page(oldpage);
891 /* Drop ref for ap->pages[] array */
892 put_page(oldpage);
893 cs->len = 0;
894
895 err = 0;
896 out_put_old:
897 /* Drop ref obtained in this function */
898 put_page(oldpage);
899 return err;
900
901 out_fallback_unlock:
902 unlock_page(newpage);
903 out_fallback:
904 cs->pg = buf->page;
905 cs->offset = buf->offset;
906
907 err = lock_request(cs->req);
908 if (!err)
909 err = 1;
910
911 goto out_put_old;
912 }
913
fuse_ref_page(struct fuse_copy_state * cs,struct page * page,unsigned offset,unsigned count)914 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
915 unsigned offset, unsigned count)
916 {
917 struct pipe_buffer *buf;
918 int err;
919
920 if (cs->nr_segs >= cs->pipe->max_usage)
921 return -EIO;
922
923 get_page(page);
924 err = unlock_request(cs->req);
925 if (err) {
926 put_page(page);
927 return err;
928 }
929
930 fuse_copy_finish(cs);
931
932 buf = cs->pipebufs;
933 buf->page = page;
934 buf->offset = offset;
935 buf->len = count;
936
937 cs->pipebufs++;
938 cs->nr_segs++;
939 cs->len = 0;
940
941 return 0;
942 }
943
944 /*
945 * Copy a page in the request to/from the userspace buffer. Must be
946 * done atomically
947 */
fuse_copy_page(struct fuse_copy_state * cs,struct page ** pagep,unsigned offset,unsigned count,int zeroing)948 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
949 unsigned offset, unsigned count, int zeroing)
950 {
951 int err;
952 struct page *page = *pagep;
953
954 if (page && zeroing && count < PAGE_SIZE)
955 clear_highpage(page);
956
957 while (count) {
958 if (cs->write && cs->pipebufs && page) {
959 /*
960 * Can't control lifetime of pipe buffers, so always
961 * copy user pages.
962 */
963 if (cs->req->args->user_pages) {
964 err = fuse_copy_fill(cs);
965 if (err)
966 return err;
967 } else {
968 return fuse_ref_page(cs, page, offset, count);
969 }
970 } else if (!cs->len) {
971 if (cs->move_pages && page &&
972 offset == 0 && count == PAGE_SIZE) {
973 err = fuse_try_move_page(cs, pagep);
974 if (err <= 0)
975 return err;
976 } else {
977 err = fuse_copy_fill(cs);
978 if (err)
979 return err;
980 }
981 }
982 if (page) {
983 void *mapaddr = kmap_atomic(page);
984 void *buf = mapaddr + offset;
985 offset += fuse_copy_do(cs, &buf, &count);
986 kunmap_atomic(mapaddr);
987 } else
988 offset += fuse_copy_do(cs, NULL, &count);
989 }
990 if (page && !cs->write)
991 flush_dcache_page(page);
992 return 0;
993 }
994
995 /* Copy pages in the request to/from userspace buffer */
fuse_copy_pages(struct fuse_copy_state * cs,unsigned nbytes,int zeroing)996 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
997 int zeroing)
998 {
999 unsigned i;
1000 struct fuse_req *req = cs->req;
1001 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1002
1003
1004 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
1005 int err;
1006 unsigned int offset = ap->descs[i].offset;
1007 unsigned int count = min(nbytes, ap->descs[i].length);
1008
1009 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
1010 if (err)
1011 return err;
1012
1013 nbytes -= count;
1014 }
1015 return 0;
1016 }
1017
1018 /* Copy a single argument in the request to/from userspace buffer */
fuse_copy_one(struct fuse_copy_state * cs,void * val,unsigned size)1019 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1020 {
1021 while (size) {
1022 if (!cs->len) {
1023 int err = fuse_copy_fill(cs);
1024 if (err)
1025 return err;
1026 }
1027 fuse_copy_do(cs, &val, &size);
1028 }
1029 return 0;
1030 }
1031
1032 /* Copy request arguments to/from userspace buffer */
fuse_copy_args(struct fuse_copy_state * cs,unsigned numargs,unsigned argpages,struct fuse_arg * args,int zeroing)1033 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1034 unsigned argpages, struct fuse_arg *args,
1035 int zeroing)
1036 {
1037 int err = 0;
1038 unsigned i;
1039
1040 for (i = 0; !err && i < numargs; i++) {
1041 struct fuse_arg *arg = &args[i];
1042 if (i == numargs - 1 && argpages)
1043 err = fuse_copy_pages(cs, arg->size, zeroing);
1044 else
1045 err = fuse_copy_one(cs, arg->value, arg->size);
1046 }
1047 return err;
1048 }
1049
forget_pending(struct fuse_iqueue * fiq)1050 static int forget_pending(struct fuse_iqueue *fiq)
1051 {
1052 return fiq->forget_list_head.next != NULL;
1053 }
1054
request_pending(struct fuse_iqueue * fiq)1055 static int request_pending(struct fuse_iqueue *fiq)
1056 {
1057 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1058 forget_pending(fiq);
1059 }
1060
1061 /*
1062 * Transfer an interrupt request to userspace
1063 *
1064 * Unlike other requests this is assembled on demand, without a need
1065 * to allocate a separate fuse_req structure.
1066 *
1067 * Called with fiq->lock held, releases it
1068 */
fuse_read_interrupt(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes,struct fuse_req * req)1069 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1070 struct fuse_copy_state *cs,
1071 size_t nbytes, struct fuse_req *req)
1072 __releases(fiq->lock)
1073 {
1074 struct fuse_in_header ih;
1075 struct fuse_interrupt_in arg;
1076 unsigned reqsize = sizeof(ih) + sizeof(arg);
1077 int err;
1078
1079 list_del_init(&req->intr_entry);
1080 memset(&ih, 0, sizeof(ih));
1081 memset(&arg, 0, sizeof(arg));
1082 ih.len = reqsize;
1083 ih.opcode = FUSE_INTERRUPT;
1084 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1085 arg.unique = req->in.h.unique;
1086
1087 spin_unlock(&fiq->lock);
1088 if (nbytes < reqsize)
1089 return -EINVAL;
1090
1091 err = fuse_copy_one(cs, &ih, sizeof(ih));
1092 if (!err)
1093 err = fuse_copy_one(cs, &arg, sizeof(arg));
1094 fuse_copy_finish(cs);
1095
1096 return err ? err : reqsize;
1097 }
1098
fuse_dequeue_forget(struct fuse_iqueue * fiq,unsigned int max,unsigned int * countp)1099 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1100 unsigned int max,
1101 unsigned int *countp)
1102 {
1103 struct fuse_forget_link *head = fiq->forget_list_head.next;
1104 struct fuse_forget_link **newhead = &head;
1105 unsigned count;
1106
1107 for (count = 0; *newhead != NULL && count < max; count++)
1108 newhead = &(*newhead)->next;
1109
1110 fiq->forget_list_head.next = *newhead;
1111 *newhead = NULL;
1112 if (fiq->forget_list_head.next == NULL)
1113 fiq->forget_list_tail = &fiq->forget_list_head;
1114
1115 if (countp != NULL)
1116 *countp = count;
1117
1118 return head;
1119 }
1120 EXPORT_SYMBOL(fuse_dequeue_forget);
1121
fuse_read_single_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1122 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1123 struct fuse_copy_state *cs,
1124 size_t nbytes)
1125 __releases(fiq->lock)
1126 {
1127 int err;
1128 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
1129 struct fuse_forget_in arg = {
1130 .nlookup = forget->forget_one.nlookup,
1131 };
1132 struct fuse_in_header ih = {
1133 .opcode = FUSE_FORGET,
1134 .nodeid = forget->forget_one.nodeid,
1135 .unique = fuse_get_unique(fiq),
1136 .len = sizeof(ih) + sizeof(arg),
1137 };
1138
1139 spin_unlock(&fiq->lock);
1140 kfree(forget);
1141 if (nbytes < ih.len)
1142 return -EINVAL;
1143
1144 err = fuse_copy_one(cs, &ih, sizeof(ih));
1145 if (!err)
1146 err = fuse_copy_one(cs, &arg, sizeof(arg));
1147 fuse_copy_finish(cs);
1148
1149 if (err)
1150 return err;
1151
1152 return ih.len;
1153 }
1154
fuse_read_batch_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1155 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1156 struct fuse_copy_state *cs, size_t nbytes)
1157 __releases(fiq->lock)
1158 {
1159 int err;
1160 unsigned max_forgets;
1161 unsigned count;
1162 struct fuse_forget_link *head;
1163 struct fuse_batch_forget_in arg = { .count = 0 };
1164 struct fuse_in_header ih = {
1165 .opcode = FUSE_BATCH_FORGET,
1166 .unique = fuse_get_unique(fiq),
1167 .len = sizeof(ih) + sizeof(arg),
1168 };
1169
1170 if (nbytes < ih.len) {
1171 spin_unlock(&fiq->lock);
1172 return -EINVAL;
1173 }
1174
1175 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1176 head = fuse_dequeue_forget(fiq, max_forgets, &count);
1177 spin_unlock(&fiq->lock);
1178
1179 arg.count = count;
1180 ih.len += count * sizeof(struct fuse_forget_one);
1181 err = fuse_copy_one(cs, &ih, sizeof(ih));
1182 if (!err)
1183 err = fuse_copy_one(cs, &arg, sizeof(arg));
1184
1185 while (head) {
1186 struct fuse_forget_link *forget = head;
1187
1188 if (!err) {
1189 err = fuse_copy_one(cs, &forget->forget_one,
1190 sizeof(forget->forget_one));
1191 }
1192 head = forget->next;
1193 kfree(forget);
1194 }
1195
1196 fuse_copy_finish(cs);
1197
1198 if (err)
1199 return err;
1200
1201 return ih.len;
1202 }
1203
fuse_read_forget(struct fuse_conn * fc,struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1204 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1205 struct fuse_copy_state *cs,
1206 size_t nbytes)
1207 __releases(fiq->lock)
1208 {
1209 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1210 return fuse_read_single_forget(fiq, cs, nbytes);
1211 else
1212 return fuse_read_batch_forget(fiq, cs, nbytes);
1213 }
1214
1215 /*
1216 * Read a single request into the userspace filesystem's buffer. This
1217 * function waits until a request is available, then removes it from
1218 * the pending list and copies request data to userspace buffer. If
1219 * no reply is needed (FORGET) or request has been aborted or there
1220 * was an error during the copying then it's finished by calling
1221 * fuse_request_end(). Otherwise add it to the processing list, and set
1222 * the 'sent' flag.
1223 */
fuse_dev_do_read(struct fuse_dev * fud,struct file * file,struct fuse_copy_state * cs,size_t nbytes)1224 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1225 struct fuse_copy_state *cs, size_t nbytes)
1226 {
1227 ssize_t err;
1228 struct fuse_conn *fc = fud->fc;
1229 struct fuse_iqueue *fiq = &fc->iq;
1230 struct fuse_pqueue *fpq = &fud->pq;
1231 struct fuse_req *req;
1232 struct fuse_args *args;
1233 unsigned reqsize;
1234 unsigned int hash;
1235
1236 /*
1237 * Require sane minimum read buffer - that has capacity for fixed part
1238 * of any request header + negotiated max_write room for data.
1239 *
1240 * Historically libfuse reserves 4K for fixed header room, but e.g.
1241 * GlusterFS reserves only 80 bytes
1242 *
1243 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1244 *
1245 * which is the absolute minimum any sane filesystem should be using
1246 * for header room.
1247 */
1248 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1249 sizeof(struct fuse_in_header) +
1250 sizeof(struct fuse_write_in) +
1251 fc->max_write))
1252 return -EINVAL;
1253
1254 restart:
1255 for (;;) {
1256 spin_lock(&fiq->lock);
1257 if (!fiq->connected || request_pending(fiq))
1258 break;
1259 spin_unlock(&fiq->lock);
1260
1261 if (file->f_flags & O_NONBLOCK)
1262 return -EAGAIN;
1263 err = wait_event_interruptible_exclusive(fiq->waitq,
1264 !fiq->connected || request_pending(fiq));
1265 if (err)
1266 return err;
1267 }
1268
1269 if (!fiq->connected) {
1270 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1271 goto err_unlock;
1272 }
1273
1274 if (!list_empty(&fiq->interrupts)) {
1275 req = list_entry(fiq->interrupts.next, struct fuse_req,
1276 intr_entry);
1277 return fuse_read_interrupt(fiq, cs, nbytes, req);
1278 }
1279
1280 if (forget_pending(fiq)) {
1281 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1282 return fuse_read_forget(fc, fiq, cs, nbytes);
1283
1284 if (fiq->forget_batch <= -8)
1285 fiq->forget_batch = 16;
1286 }
1287
1288 req = list_entry(fiq->pending.next, struct fuse_req, list);
1289 clear_bit(FR_PENDING, &req->flags);
1290 list_del_init(&req->list);
1291 spin_unlock(&fiq->lock);
1292
1293 args = req->args;
1294 reqsize = req->in.h.len;
1295
1296 /* If request is too large, reply with an error and restart the read */
1297 if (nbytes < reqsize) {
1298 req->out.h.error = -EIO;
1299 /* SETXATTR is special, since it may contain too large data */
1300 if (args->opcode == FUSE_SETXATTR)
1301 req->out.h.error = -E2BIG;
1302 fuse_request_end(req);
1303 goto restart;
1304 }
1305 spin_lock(&fpq->lock);
1306 /*
1307 * Must not put request on fpq->io queue after having been shut down by
1308 * fuse_abort_conn()
1309 */
1310 if (!fpq->connected) {
1311 req->out.h.error = err = -ECONNABORTED;
1312 goto out_end;
1313
1314 }
1315 list_add(&req->list, &fpq->io);
1316 spin_unlock(&fpq->lock);
1317 cs->req = req;
1318 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1319 if (!err)
1320 err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1321 (struct fuse_arg *) args->in_args, 0);
1322 fuse_copy_finish(cs);
1323 spin_lock(&fpq->lock);
1324 clear_bit(FR_LOCKED, &req->flags);
1325 if (!fpq->connected) {
1326 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1327 goto out_end;
1328 }
1329 if (err) {
1330 req->out.h.error = -EIO;
1331 goto out_end;
1332 }
1333 if (!test_bit(FR_ISREPLY, &req->flags)) {
1334 err = reqsize;
1335 goto out_end;
1336 }
1337 hash = fuse_req_hash(req->in.h.unique);
1338 list_move_tail(&req->list, &fpq->processing[hash]);
1339 __fuse_get_request(req);
1340 set_bit(FR_SENT, &req->flags);
1341 spin_unlock(&fpq->lock);
1342 /* matches barrier in request_wait_answer() */
1343 smp_mb__after_atomic();
1344 if (test_bit(FR_INTERRUPTED, &req->flags))
1345 queue_interrupt(req);
1346 fuse_put_request(req);
1347
1348 return reqsize;
1349
1350 out_end:
1351 if (!test_bit(FR_PRIVATE, &req->flags))
1352 list_del_init(&req->list);
1353 spin_unlock(&fpq->lock);
1354 fuse_request_end(req);
1355 return err;
1356
1357 err_unlock:
1358 spin_unlock(&fiq->lock);
1359 return err;
1360 }
1361
fuse_dev_open(struct inode * inode,struct file * file)1362 static int fuse_dev_open(struct inode *inode, struct file *file)
1363 {
1364 /*
1365 * The fuse device's file's private_data is used to hold
1366 * the fuse_conn(ection) when it is mounted, and is used to
1367 * keep track of whether the file has been mounted already.
1368 */
1369 file->private_data = NULL;
1370 return 0;
1371 }
1372
fuse_dev_read(struct kiocb * iocb,struct iov_iter * to)1373 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1374 {
1375 struct fuse_copy_state cs;
1376 struct file *file = iocb->ki_filp;
1377 struct fuse_dev *fud = fuse_get_dev(file);
1378
1379 if (!fud)
1380 return -EPERM;
1381
1382 if (!iter_is_iovec(to))
1383 return -EINVAL;
1384
1385 fuse_copy_init(&cs, 1, to);
1386
1387 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1388 }
1389
fuse_dev_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1390 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1391 struct pipe_inode_info *pipe,
1392 size_t len, unsigned int flags)
1393 {
1394 int total, ret;
1395 int page_nr = 0;
1396 struct pipe_buffer *bufs;
1397 struct fuse_copy_state cs;
1398 struct fuse_dev *fud = fuse_get_dev(in);
1399
1400 if (!fud)
1401 return -EPERM;
1402
1403 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
1404 GFP_KERNEL);
1405 if (!bufs)
1406 return -ENOMEM;
1407
1408 fuse_copy_init(&cs, 1, NULL);
1409 cs.pipebufs = bufs;
1410 cs.pipe = pipe;
1411 ret = fuse_dev_do_read(fud, in, &cs, len);
1412 if (ret < 0)
1413 goto out;
1414
1415 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
1416 ret = -EIO;
1417 goto out;
1418 }
1419
1420 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1421 /*
1422 * Need to be careful about this. Having buf->ops in module
1423 * code can Oops if the buffer persists after module unload.
1424 */
1425 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1426 bufs[page_nr].flags = 0;
1427 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1428 if (unlikely(ret < 0))
1429 break;
1430 }
1431 if (total)
1432 ret = total;
1433 out:
1434 for (; page_nr < cs.nr_segs; page_nr++)
1435 put_page(bufs[page_nr].page);
1436
1437 kvfree(bufs);
1438 return ret;
1439 }
1440
fuse_notify_poll(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1441 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1442 struct fuse_copy_state *cs)
1443 {
1444 struct fuse_notify_poll_wakeup_out outarg;
1445 int err = -EINVAL;
1446
1447 if (size != sizeof(outarg))
1448 goto err;
1449
1450 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1451 if (err)
1452 goto err;
1453
1454 fuse_copy_finish(cs);
1455 return fuse_notify_poll_wakeup(fc, &outarg);
1456
1457 err:
1458 fuse_copy_finish(cs);
1459 return err;
1460 }
1461
fuse_notify_inval_inode(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1462 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1463 struct fuse_copy_state *cs)
1464 {
1465 struct fuse_notify_inval_inode_out outarg;
1466 int err = -EINVAL;
1467
1468 if (size != sizeof(outarg))
1469 goto err;
1470
1471 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1472 if (err)
1473 goto err;
1474 fuse_copy_finish(cs);
1475
1476 down_read(&fc->killsb);
1477 err = fuse_reverse_inval_inode(fc, outarg.ino,
1478 outarg.off, outarg.len);
1479 up_read(&fc->killsb);
1480 return err;
1481
1482 err:
1483 fuse_copy_finish(cs);
1484 return err;
1485 }
1486
fuse_notify_inval_entry(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1487 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1488 struct fuse_copy_state *cs)
1489 {
1490 struct fuse_notify_inval_entry_out outarg;
1491 int err = -ENOMEM;
1492 char *buf;
1493 struct qstr name;
1494
1495 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1496 if (!buf)
1497 goto err;
1498
1499 err = -EINVAL;
1500 if (size < sizeof(outarg))
1501 goto err;
1502
1503 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1504 if (err)
1505 goto err;
1506
1507 err = -ENAMETOOLONG;
1508 if (outarg.namelen > FUSE_NAME_MAX)
1509 goto err;
1510
1511 err = -EINVAL;
1512 if (size != sizeof(outarg) + outarg.namelen + 1)
1513 goto err;
1514
1515 name.name = buf;
1516 name.len = outarg.namelen;
1517 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1518 if (err)
1519 goto err;
1520 fuse_copy_finish(cs);
1521 buf[outarg.namelen] = 0;
1522
1523 down_read(&fc->killsb);
1524 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name);
1525 up_read(&fc->killsb);
1526 kfree(buf);
1527 return err;
1528
1529 err:
1530 kfree(buf);
1531 fuse_copy_finish(cs);
1532 return err;
1533 }
1534
fuse_notify_delete(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1535 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1536 struct fuse_copy_state *cs)
1537 {
1538 struct fuse_notify_delete_out outarg;
1539 int err = -ENOMEM;
1540 char *buf;
1541 struct qstr name;
1542
1543 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1544 if (!buf)
1545 goto err;
1546
1547 err = -EINVAL;
1548 if (size < sizeof(outarg))
1549 goto err;
1550
1551 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1552 if (err)
1553 goto err;
1554
1555 err = -ENAMETOOLONG;
1556 if (outarg.namelen > FUSE_NAME_MAX)
1557 goto err;
1558
1559 err = -EINVAL;
1560 if (size != sizeof(outarg) + outarg.namelen + 1)
1561 goto err;
1562
1563 name.name = buf;
1564 name.len = outarg.namelen;
1565 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1566 if (err)
1567 goto err;
1568 fuse_copy_finish(cs);
1569 buf[outarg.namelen] = 0;
1570
1571 down_read(&fc->killsb);
1572 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name);
1573 up_read(&fc->killsb);
1574 kfree(buf);
1575 return err;
1576
1577 err:
1578 kfree(buf);
1579 fuse_copy_finish(cs);
1580 return err;
1581 }
1582
fuse_notify_store(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1583 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1584 struct fuse_copy_state *cs)
1585 {
1586 struct fuse_notify_store_out outarg;
1587 struct inode *inode;
1588 struct address_space *mapping;
1589 u64 nodeid;
1590 int err;
1591 pgoff_t index;
1592 unsigned int offset;
1593 unsigned int num;
1594 loff_t file_size;
1595 loff_t end;
1596
1597 err = -EINVAL;
1598 if (size < sizeof(outarg))
1599 goto out_finish;
1600
1601 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1602 if (err)
1603 goto out_finish;
1604
1605 err = -EINVAL;
1606 if (size - sizeof(outarg) != outarg.size)
1607 goto out_finish;
1608
1609 nodeid = outarg.nodeid;
1610
1611 down_read(&fc->killsb);
1612
1613 err = -ENOENT;
1614 inode = fuse_ilookup(fc, nodeid, NULL);
1615 if (!inode)
1616 goto out_up_killsb;
1617
1618 mapping = inode->i_mapping;
1619 index = outarg.offset >> PAGE_SHIFT;
1620 offset = outarg.offset & ~PAGE_MASK;
1621 file_size = i_size_read(inode);
1622 end = outarg.offset + outarg.size;
1623 if (end > file_size) {
1624 file_size = end;
1625 fuse_write_update_size(inode, file_size);
1626 }
1627
1628 num = outarg.size;
1629 while (num) {
1630 struct page *page;
1631 unsigned int this_num;
1632
1633 err = -ENOMEM;
1634 page = find_or_create_page(mapping, index,
1635 mapping_gfp_mask(mapping));
1636 if (!page)
1637 goto out_iput;
1638
1639 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1640 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1641 if (!err && offset == 0 &&
1642 (this_num == PAGE_SIZE || file_size == end))
1643 SetPageUptodate(page);
1644 unlock_page(page);
1645 put_page(page);
1646
1647 if (err)
1648 goto out_iput;
1649
1650 num -= this_num;
1651 offset = 0;
1652 index++;
1653 }
1654
1655 err = 0;
1656
1657 out_iput:
1658 iput(inode);
1659 out_up_killsb:
1660 up_read(&fc->killsb);
1661 out_finish:
1662 fuse_copy_finish(cs);
1663 return err;
1664 }
1665
1666 struct fuse_retrieve_args {
1667 struct fuse_args_pages ap;
1668 struct fuse_notify_retrieve_in inarg;
1669 };
1670
fuse_retrieve_end(struct fuse_mount * fm,struct fuse_args * args,int error)1671 static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
1672 int error)
1673 {
1674 struct fuse_retrieve_args *ra =
1675 container_of(args, typeof(*ra), ap.args);
1676
1677 release_pages(ra->ap.pages, ra->ap.num_pages);
1678 kfree(ra);
1679 }
1680
fuse_retrieve(struct fuse_mount * fm,struct inode * inode,struct fuse_notify_retrieve_out * outarg)1681 static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
1682 struct fuse_notify_retrieve_out *outarg)
1683 {
1684 int err;
1685 struct address_space *mapping = inode->i_mapping;
1686 pgoff_t index;
1687 loff_t file_size;
1688 unsigned int num;
1689 unsigned int offset;
1690 size_t total_len = 0;
1691 unsigned int num_pages;
1692 struct fuse_conn *fc = fm->fc;
1693 struct fuse_retrieve_args *ra;
1694 size_t args_size = sizeof(*ra);
1695 struct fuse_args_pages *ap;
1696 struct fuse_args *args;
1697
1698 offset = outarg->offset & ~PAGE_MASK;
1699 file_size = i_size_read(inode);
1700
1701 num = min(outarg->size, fc->max_write);
1702 if (outarg->offset > file_size)
1703 num = 0;
1704 else if (outarg->offset + num > file_size)
1705 num = file_size - outarg->offset;
1706
1707 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1708 num_pages = min(num_pages, fc->max_pages);
1709
1710 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
1711
1712 ra = kzalloc(args_size, GFP_KERNEL);
1713 if (!ra)
1714 return -ENOMEM;
1715
1716 ap = &ra->ap;
1717 ap->pages = (void *) (ra + 1);
1718 ap->descs = (void *) (ap->pages + num_pages);
1719
1720 args = &ap->args;
1721 args->nodeid = outarg->nodeid;
1722 args->opcode = FUSE_NOTIFY_REPLY;
1723 args->in_numargs = 2;
1724 args->in_pages = true;
1725 args->end = fuse_retrieve_end;
1726
1727 index = outarg->offset >> PAGE_SHIFT;
1728
1729 while (num && ap->num_pages < num_pages) {
1730 struct page *page;
1731 unsigned int this_num;
1732
1733 page = find_get_page(mapping, index);
1734 if (!page)
1735 break;
1736
1737 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1738 ap->pages[ap->num_pages] = page;
1739 ap->descs[ap->num_pages].offset = offset;
1740 ap->descs[ap->num_pages].length = this_num;
1741 ap->num_pages++;
1742
1743 offset = 0;
1744 num -= this_num;
1745 total_len += this_num;
1746 index++;
1747 }
1748 ra->inarg.offset = outarg->offset;
1749 ra->inarg.size = total_len;
1750 args->in_args[0].size = sizeof(ra->inarg);
1751 args->in_args[0].value = &ra->inarg;
1752 args->in_args[1].size = total_len;
1753
1754 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
1755 if (err)
1756 fuse_retrieve_end(fm, args, err);
1757
1758 return err;
1759 }
1760
fuse_notify_retrieve(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1761 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1762 struct fuse_copy_state *cs)
1763 {
1764 struct fuse_notify_retrieve_out outarg;
1765 struct fuse_mount *fm;
1766 struct inode *inode;
1767 u64 nodeid;
1768 int err;
1769
1770 err = -EINVAL;
1771 if (size != sizeof(outarg))
1772 goto copy_finish;
1773
1774 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1775 if (err)
1776 goto copy_finish;
1777
1778 fuse_copy_finish(cs);
1779
1780 down_read(&fc->killsb);
1781 err = -ENOENT;
1782 nodeid = outarg.nodeid;
1783
1784 inode = fuse_ilookup(fc, nodeid, &fm);
1785 if (inode) {
1786 err = fuse_retrieve(fm, inode, &outarg);
1787 iput(inode);
1788 }
1789 up_read(&fc->killsb);
1790
1791 return err;
1792
1793 copy_finish:
1794 fuse_copy_finish(cs);
1795 return err;
1796 }
1797
fuse_notify(struct fuse_conn * fc,enum fuse_notify_code code,unsigned int size,struct fuse_copy_state * cs)1798 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1799 unsigned int size, struct fuse_copy_state *cs)
1800 {
1801 /* Don't try to move pages (yet) */
1802 cs->move_pages = 0;
1803
1804 switch (code) {
1805 case FUSE_NOTIFY_POLL:
1806 return fuse_notify_poll(fc, size, cs);
1807
1808 case FUSE_NOTIFY_INVAL_INODE:
1809 return fuse_notify_inval_inode(fc, size, cs);
1810
1811 case FUSE_NOTIFY_INVAL_ENTRY:
1812 return fuse_notify_inval_entry(fc, size, cs);
1813
1814 case FUSE_NOTIFY_STORE:
1815 return fuse_notify_store(fc, size, cs);
1816
1817 case FUSE_NOTIFY_RETRIEVE:
1818 return fuse_notify_retrieve(fc, size, cs);
1819
1820 case FUSE_NOTIFY_DELETE:
1821 return fuse_notify_delete(fc, size, cs);
1822
1823 default:
1824 fuse_copy_finish(cs);
1825 return -EINVAL;
1826 }
1827 }
1828
1829 /* Look up request on processing list by unique ID */
request_find(struct fuse_pqueue * fpq,u64 unique)1830 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1831 {
1832 unsigned int hash = fuse_req_hash(unique);
1833 struct fuse_req *req;
1834
1835 list_for_each_entry(req, &fpq->processing[hash], list) {
1836 if (req->in.h.unique == unique)
1837 return req;
1838 }
1839 return NULL;
1840 }
1841
copy_out_args(struct fuse_copy_state * cs,struct fuse_args * args,unsigned nbytes)1842 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
1843 unsigned nbytes)
1844 {
1845 unsigned reqsize = sizeof(struct fuse_out_header);
1846
1847 reqsize += fuse_len_args(args->out_numargs, args->out_args);
1848
1849 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
1850 return -EINVAL;
1851 else if (reqsize > nbytes) {
1852 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
1853 unsigned diffsize = reqsize - nbytes;
1854
1855 if (diffsize > lastarg->size)
1856 return -EINVAL;
1857 lastarg->size -= diffsize;
1858 }
1859 return fuse_copy_args(cs, args->out_numargs, args->out_pages,
1860 args->out_args, args->page_zeroing);
1861 }
1862
1863 /*
1864 * Write a single reply to a request. First the header is copied from
1865 * the write buffer. The request is then searched on the processing
1866 * list by the unique ID found in the header. If found, then remove
1867 * it from the list and copy the rest of the buffer to the request.
1868 * The request is finished by calling fuse_request_end().
1869 */
fuse_dev_do_write(struct fuse_dev * fud,struct fuse_copy_state * cs,size_t nbytes)1870 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1871 struct fuse_copy_state *cs, size_t nbytes)
1872 {
1873 int err;
1874 struct fuse_conn *fc = fud->fc;
1875 struct fuse_pqueue *fpq = &fud->pq;
1876 struct fuse_req *req;
1877 struct fuse_out_header oh;
1878
1879 err = -EINVAL;
1880 if (nbytes < sizeof(struct fuse_out_header))
1881 goto out;
1882
1883 err = fuse_copy_one(cs, &oh, sizeof(oh));
1884 if (err)
1885 goto copy_finish;
1886
1887 err = -EINVAL;
1888 if (oh.len != nbytes)
1889 goto copy_finish;
1890
1891 /*
1892 * Zero oh.unique indicates unsolicited notification message
1893 * and error contains notification code.
1894 */
1895 if (!oh.unique) {
1896 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1897 goto out;
1898 }
1899
1900 err = -EINVAL;
1901 if (oh.error <= -512 || oh.error > 0)
1902 goto copy_finish;
1903
1904 spin_lock(&fpq->lock);
1905 req = NULL;
1906 if (fpq->connected)
1907 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1908
1909 err = -ENOENT;
1910 if (!req) {
1911 spin_unlock(&fpq->lock);
1912 goto copy_finish;
1913 }
1914
1915 /* Is it an interrupt reply ID? */
1916 if (oh.unique & FUSE_INT_REQ_BIT) {
1917 __fuse_get_request(req);
1918 spin_unlock(&fpq->lock);
1919
1920 err = 0;
1921 if (nbytes != sizeof(struct fuse_out_header))
1922 err = -EINVAL;
1923 else if (oh.error == -ENOSYS)
1924 fc->no_interrupt = 1;
1925 else if (oh.error == -EAGAIN)
1926 err = queue_interrupt(req);
1927
1928 fuse_put_request(req);
1929
1930 goto copy_finish;
1931 }
1932
1933 clear_bit(FR_SENT, &req->flags);
1934 list_move(&req->list, &fpq->io);
1935 req->out.h = oh;
1936 set_bit(FR_LOCKED, &req->flags);
1937 spin_unlock(&fpq->lock);
1938 cs->req = req;
1939 if (!req->args->page_replace)
1940 cs->move_pages = 0;
1941
1942 if (oh.error)
1943 err = nbytes != sizeof(oh) ? -EINVAL : 0;
1944 else
1945 err = copy_out_args(cs, req->args, nbytes);
1946 fuse_copy_finish(cs);
1947
1948 if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH && !oh.error) {
1949 char *path = (char *)req->args->out_args[0].value;
1950
1951 path[req->args->out_args[0].size - 1] = 0;
1952 req->out.h.error =
1953 kern_path(path, 0, req->args->canonical_path);
1954 }
1955
1956 if (!err && (req->in.h.opcode == FUSE_LOOKUP ||
1957 req->in.h.opcode == (FUSE_LOOKUP | FUSE_POSTFILTER)) &&
1958 req->args->out_args[1].size == sizeof(struct fuse_entry_bpf_out)) {
1959 struct fuse_entry_bpf_out *febo = (struct fuse_entry_bpf_out *)
1960 req->args->out_args[1].value;
1961 struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf, out);
1962
1963 if (febo->backing_action == FUSE_ACTION_REPLACE)
1964 feb->backing_file = fget(febo->backing_fd);
1965 if (febo->bpf_action == FUSE_ACTION_REPLACE)
1966 feb->bpf_file = fget(febo->bpf_fd);
1967 }
1968
1969 spin_lock(&fpq->lock);
1970 clear_bit(FR_LOCKED, &req->flags);
1971 if (!fpq->connected)
1972 err = -ENOENT;
1973 else if (err)
1974 req->out.h.error = -EIO;
1975 if (!test_bit(FR_PRIVATE, &req->flags))
1976 list_del_init(&req->list);
1977 spin_unlock(&fpq->lock);
1978
1979 fuse_request_end(req);
1980 out:
1981 return err ? err : nbytes;
1982
1983 copy_finish:
1984 fuse_copy_finish(cs);
1985 goto out;
1986 }
1987
fuse_dev_write(struct kiocb * iocb,struct iov_iter * from)1988 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1989 {
1990 struct fuse_copy_state cs;
1991 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1992
1993 if (!fud)
1994 return -EPERM;
1995
1996 if (!iter_is_iovec(from))
1997 return -EINVAL;
1998
1999 fuse_copy_init(&cs, 0, from);
2000
2001 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
2002 }
2003
fuse_dev_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)2004 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2005 struct file *out, loff_t *ppos,
2006 size_t len, unsigned int flags)
2007 {
2008 unsigned int head, tail, mask, count;
2009 unsigned nbuf;
2010 unsigned idx;
2011 struct pipe_buffer *bufs;
2012 struct fuse_copy_state cs;
2013 struct fuse_dev *fud;
2014 size_t rem;
2015 ssize_t ret;
2016
2017 fud = fuse_get_dev(out);
2018 if (!fud)
2019 return -EPERM;
2020
2021 pipe_lock(pipe);
2022
2023 head = pipe->head;
2024 tail = pipe->tail;
2025 mask = pipe->ring_size - 1;
2026 count = head - tail;
2027
2028 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
2029 if (!bufs) {
2030 pipe_unlock(pipe);
2031 return -ENOMEM;
2032 }
2033
2034 nbuf = 0;
2035 rem = 0;
2036 for (idx = tail; idx != head && rem < len; idx++)
2037 rem += pipe->bufs[idx & mask].len;
2038
2039 ret = -EINVAL;
2040 if (rem < len)
2041 goto out_free;
2042
2043 rem = len;
2044 while (rem) {
2045 struct pipe_buffer *ibuf;
2046 struct pipe_buffer *obuf;
2047
2048 if (WARN_ON(nbuf >= count || tail == head))
2049 goto out_free;
2050
2051 ibuf = &pipe->bufs[tail & mask];
2052 obuf = &bufs[nbuf];
2053
2054 if (rem >= ibuf->len) {
2055 *obuf = *ibuf;
2056 ibuf->ops = NULL;
2057 tail++;
2058 pipe->tail = tail;
2059 } else {
2060 if (!pipe_buf_get(pipe, ibuf))
2061 goto out_free;
2062
2063 *obuf = *ibuf;
2064 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2065 obuf->len = rem;
2066 ibuf->offset += obuf->len;
2067 ibuf->len -= obuf->len;
2068 }
2069 nbuf++;
2070 rem -= obuf->len;
2071 }
2072 pipe_unlock(pipe);
2073
2074 fuse_copy_init(&cs, 0, NULL);
2075 cs.pipebufs = bufs;
2076 cs.nr_segs = nbuf;
2077 cs.pipe = pipe;
2078
2079 if (flags & SPLICE_F_MOVE)
2080 cs.move_pages = 1;
2081
2082 ret = fuse_dev_do_write(fud, &cs, len);
2083
2084 pipe_lock(pipe);
2085 out_free:
2086 for (idx = 0; idx < nbuf; idx++) {
2087 struct pipe_buffer *buf = &bufs[idx];
2088
2089 if (buf->ops)
2090 pipe_buf_release(pipe, buf);
2091 }
2092 pipe_unlock(pipe);
2093
2094 kvfree(bufs);
2095 return ret;
2096 }
2097
fuse_dev_poll(struct file * file,poll_table * wait)2098 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2099 {
2100 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2101 struct fuse_iqueue *fiq;
2102 struct fuse_dev *fud = fuse_get_dev(file);
2103
2104 if (!fud)
2105 return EPOLLERR;
2106
2107 fiq = &fud->fc->iq;
2108 poll_wait(file, &fiq->waitq, wait);
2109
2110 spin_lock(&fiq->lock);
2111 if (!fiq->connected)
2112 mask = EPOLLERR;
2113 else if (request_pending(fiq))
2114 mask |= EPOLLIN | EPOLLRDNORM;
2115 spin_unlock(&fiq->lock);
2116
2117 return mask;
2118 }
2119
2120 /* Abort all requests on the given list (pending or processing) */
end_requests(struct list_head * head)2121 static void end_requests(struct list_head *head)
2122 {
2123 while (!list_empty(head)) {
2124 struct fuse_req *req;
2125 req = list_entry(head->next, struct fuse_req, list);
2126 req->out.h.error = -ECONNABORTED;
2127 clear_bit(FR_SENT, &req->flags);
2128 list_del_init(&req->list);
2129 fuse_request_end(req);
2130 }
2131 }
2132
end_polls(struct fuse_conn * fc)2133 static void end_polls(struct fuse_conn *fc)
2134 {
2135 struct rb_node *p;
2136
2137 p = rb_first(&fc->polled_files);
2138
2139 while (p) {
2140 struct fuse_file *ff;
2141 ff = rb_entry(p, struct fuse_file, polled_node);
2142 wake_up_interruptible_all(&ff->poll_wait);
2143
2144 p = rb_next(p);
2145 }
2146 }
2147
2148 /*
2149 * Abort all requests.
2150 *
2151 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2152 * filesystem.
2153 *
2154 * The same effect is usually achievable through killing the filesystem daemon
2155 * and all users of the filesystem. The exception is the combination of an
2156 * asynchronous request and the tricky deadlock (see
2157 * Documentation/filesystems/fuse.rst).
2158 *
2159 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2160 * requests, they should be finished off immediately. Locked requests will be
2161 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2162 * requests. It is possible that some request will finish before we can. This
2163 * is OK, the request will in that case be removed from the list before we touch
2164 * it.
2165 */
fuse_abort_conn(struct fuse_conn * fc)2166 void fuse_abort_conn(struct fuse_conn *fc)
2167 {
2168 struct fuse_iqueue *fiq = &fc->iq;
2169
2170 spin_lock(&fc->lock);
2171 if (fc->connected) {
2172 struct fuse_dev *fud;
2173 struct fuse_req *req, *next;
2174 LIST_HEAD(to_end);
2175 unsigned int i;
2176
2177 /* Background queuing checks fc->connected under bg_lock */
2178 spin_lock(&fc->bg_lock);
2179 fc->connected = 0;
2180 spin_unlock(&fc->bg_lock);
2181
2182 fuse_set_initialized(fc);
2183 list_for_each_entry(fud, &fc->devices, entry) {
2184 struct fuse_pqueue *fpq = &fud->pq;
2185
2186 spin_lock(&fpq->lock);
2187 fpq->connected = 0;
2188 list_for_each_entry_safe(req, next, &fpq->io, list) {
2189 req->out.h.error = -ECONNABORTED;
2190 spin_lock(&req->waitq.lock);
2191 set_bit(FR_ABORTED, &req->flags);
2192 if (!test_bit(FR_LOCKED, &req->flags)) {
2193 set_bit(FR_PRIVATE, &req->flags);
2194 __fuse_get_request(req);
2195 list_move(&req->list, &to_end);
2196 }
2197 spin_unlock(&req->waitq.lock);
2198 }
2199 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2200 list_splice_tail_init(&fpq->processing[i],
2201 &to_end);
2202 spin_unlock(&fpq->lock);
2203 }
2204 spin_lock(&fc->bg_lock);
2205 fc->blocked = 0;
2206 fc->max_background = UINT_MAX;
2207 flush_bg_queue(fc);
2208 spin_unlock(&fc->bg_lock);
2209
2210 spin_lock(&fiq->lock);
2211 fiq->connected = 0;
2212 list_for_each_entry(req, &fiq->pending, list)
2213 clear_bit(FR_PENDING, &req->flags);
2214 list_splice_tail_init(&fiq->pending, &to_end);
2215 while (forget_pending(fiq))
2216 kfree(fuse_dequeue_forget(fiq, 1, NULL));
2217 wake_up_all(&fiq->waitq);
2218 spin_unlock(&fiq->lock);
2219 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2220 end_polls(fc);
2221 wake_up_all(&fc->blocked_waitq);
2222 spin_unlock(&fc->lock);
2223
2224 end_requests(&to_end);
2225 } else {
2226 spin_unlock(&fc->lock);
2227 }
2228 }
2229 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2230
fuse_wait_aborted(struct fuse_conn * fc)2231 void fuse_wait_aborted(struct fuse_conn *fc)
2232 {
2233 /* matches implicit memory barrier in fuse_drop_waiting() */
2234 smp_mb();
2235 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2236 }
2237
fuse_dev_release(struct inode * inode,struct file * file)2238 int fuse_dev_release(struct inode *inode, struct file *file)
2239 {
2240 struct fuse_dev *fud = fuse_get_dev(file);
2241
2242 if (fud) {
2243 struct fuse_conn *fc = fud->fc;
2244 struct fuse_pqueue *fpq = &fud->pq;
2245 LIST_HEAD(to_end);
2246 unsigned int i;
2247
2248 spin_lock(&fpq->lock);
2249 WARN_ON(!list_empty(&fpq->io));
2250 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2251 list_splice_init(&fpq->processing[i], &to_end);
2252 spin_unlock(&fpq->lock);
2253
2254 end_requests(&to_end);
2255
2256 /* Are we the last open device? */
2257 if (atomic_dec_and_test(&fc->dev_count)) {
2258 WARN_ON(fc->iq.fasync != NULL);
2259 fuse_abort_conn(fc);
2260 }
2261 fuse_dev_free(fud);
2262 }
2263 return 0;
2264 }
2265 EXPORT_SYMBOL_GPL(fuse_dev_release);
2266
fuse_dev_fasync(int fd,struct file * file,int on)2267 static int fuse_dev_fasync(int fd, struct file *file, int on)
2268 {
2269 struct fuse_dev *fud = fuse_get_dev(file);
2270
2271 if (!fud)
2272 return -EPERM;
2273
2274 /* No locking - fasync_helper does its own locking */
2275 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2276 }
2277
fuse_device_clone(struct fuse_conn * fc,struct file * new)2278 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2279 {
2280 struct fuse_dev *fud;
2281
2282 if (new->private_data)
2283 return -EINVAL;
2284
2285 fud = fuse_dev_alloc_install(fc);
2286 if (!fud)
2287 return -ENOMEM;
2288
2289 new->private_data = fud;
2290 atomic_inc(&fc->dev_count);
2291
2292 return 0;
2293 }
2294
fuse_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2295 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2296 unsigned long arg)
2297 {
2298 int res;
2299 int oldfd;
2300 struct fuse_dev *fud = NULL;
2301
2302 switch (cmd) {
2303 case FUSE_DEV_IOC_CLONE:
2304 res = -EFAULT;
2305 if (!get_user(oldfd, (__u32 __user *)arg)) {
2306 struct file *old = fget(oldfd);
2307
2308 res = -EINVAL;
2309 if (old) {
2310 /*
2311 * Check against file->f_op because CUSE
2312 * uses the same ioctl handler.
2313 */
2314 if (old->f_op == file->f_op &&
2315 old->f_cred->user_ns ==
2316 file->f_cred->user_ns)
2317 fud = fuse_get_dev(old);
2318
2319 if (fud) {
2320 mutex_lock(&fuse_mutex);
2321 res = fuse_device_clone(fud->fc, file);
2322 mutex_unlock(&fuse_mutex);
2323 }
2324 fput(old);
2325 }
2326 }
2327 break;
2328 case FUSE_DEV_IOC_PASSTHROUGH_OPEN:
2329 res = -EFAULT;
2330 if (!get_user(oldfd, (__u32 __user *)arg)) {
2331 res = -EINVAL;
2332 fud = fuse_get_dev(file);
2333 if (fud)
2334 res = fuse_passthrough_open(fud, oldfd);
2335 }
2336 break;
2337 default:
2338 res = -ENOTTY;
2339 break;
2340 }
2341 return res;
2342 }
2343
2344 const struct file_operations fuse_dev_operations = {
2345 .owner = THIS_MODULE,
2346 .open = fuse_dev_open,
2347 .llseek = no_llseek,
2348 .read_iter = fuse_dev_read,
2349 .splice_read = fuse_dev_splice_read,
2350 .write_iter = fuse_dev_write,
2351 .splice_write = fuse_dev_splice_write,
2352 .poll = fuse_dev_poll,
2353 .release = fuse_dev_release,
2354 .fasync = fuse_dev_fasync,
2355 .unlocked_ioctl = fuse_dev_ioctl,
2356 .compat_ioctl = compat_ptr_ioctl,
2357 };
2358 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2359
2360 static struct miscdevice fuse_miscdevice = {
2361 .minor = FUSE_MINOR,
2362 .name = "fuse",
2363 .fops = &fuse_dev_operations,
2364 };
2365
fuse_dev_init(void)2366 int __init fuse_dev_init(void)
2367 {
2368 int err = -ENOMEM;
2369 fuse_req_cachep = kmem_cache_create("fuse_request",
2370 sizeof(struct fuse_req),
2371 0, 0, NULL);
2372 if (!fuse_req_cachep)
2373 goto out;
2374
2375 err = misc_register(&fuse_miscdevice);
2376 if (err)
2377 goto out_cache_clean;
2378
2379 return 0;
2380
2381 out_cache_clean:
2382 kmem_cache_destroy(fuse_req_cachep);
2383 out:
2384 return err;
2385 }
2386
fuse_dev_cleanup(void)2387 void fuse_dev_cleanup(void)
2388 {
2389 misc_deregister(&fuse_miscdevice);
2390 kmem_cache_destroy(fuse_req_cachep);
2391 }
2392