• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2    FUSE: Filesystem in Userspace
3    Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4  
5    This program can be distributed under the terms of the GNU GPL.
6    See the file COPYING.
7  */
8  
9  #include "fuse_i.h"
10  
11  #include <linux/init.h>
12  #include <linux/module.h>
13  #include <linux/poll.h>
14  #include <linux/uio.h>
15  #include <linux/miscdevice.h>
16  #include <linux/pagemap.h>
17  #include <linux/file.h>
18  #include <linux/slab.h>
19  #include <linux/pipe_fs_i.h>
20  #include <linux/swap.h>
21  #include <linux/splice.h>
22  #include <linux/freezer.h>
23  
24  MODULE_ALIAS_MISCDEV(FUSE_MINOR);
25  MODULE_ALIAS("devname:fuse");
26  
27  static struct kmem_cache *fuse_req_cachep;
28  
fuse_get_conn(struct file * file)29  static struct fuse_conn *fuse_get_conn(struct file *file)
30  {
31  	/*
32  	 * Lockless access is OK, because file->private data is set
33  	 * once during mount and is valid until the file is released.
34  	 */
35  	return file->private_data;
36  }
37  
fuse_request_init(struct fuse_req * req)38  static void fuse_request_init(struct fuse_req *req)
39  {
40  	memset(req, 0, sizeof(*req));
41  	INIT_LIST_HEAD(&req->list);
42  	INIT_LIST_HEAD(&req->intr_entry);
43  	init_waitqueue_head(&req->waitq);
44  	atomic_set(&req->count, 1);
45  }
46  
fuse_request_alloc(void)47  struct fuse_req *fuse_request_alloc(void)
48  {
49  	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
50  	if (req)
51  		fuse_request_init(req);
52  	return req;
53  }
54  EXPORT_SYMBOL_GPL(fuse_request_alloc);
55  
fuse_request_alloc_nofs(void)56  struct fuse_req *fuse_request_alloc_nofs(void)
57  {
58  	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
59  	if (req)
60  		fuse_request_init(req);
61  	return req;
62  }
63  
fuse_request_free(struct fuse_req * req)64  void fuse_request_free(struct fuse_req *req)
65  {
66  	kmem_cache_free(fuse_req_cachep, req);
67  }
68  
block_sigs(sigset_t * oldset)69  static void block_sigs(sigset_t *oldset)
70  {
71  	sigset_t mask;
72  
73  	siginitsetinv(&mask, sigmask(SIGKILL));
74  	sigprocmask(SIG_BLOCK, &mask, oldset);
75  }
76  
restore_sigs(sigset_t * oldset)77  static void restore_sigs(sigset_t *oldset)
78  {
79  	sigprocmask(SIG_SETMASK, oldset, NULL);
80  }
81  
__fuse_get_request(struct fuse_req * req)82  static void __fuse_get_request(struct fuse_req *req)
83  {
84  	atomic_inc(&req->count);
85  }
86  
87  /* Must be called with > 1 refcount */
__fuse_put_request(struct fuse_req * req)88  static void __fuse_put_request(struct fuse_req *req)
89  {
90  	BUG_ON(atomic_read(&req->count) < 2);
91  	atomic_dec(&req->count);
92  }
93  
fuse_req_init_context(struct fuse_req * req)94  static void fuse_req_init_context(struct fuse_req *req)
95  {
96  	req->in.h.uid = current_fsuid();
97  	req->in.h.gid = current_fsgid();
98  	req->in.h.pid = current->pid;
99  }
100  
fuse_get_req(struct fuse_conn * fc)101  struct fuse_req *fuse_get_req(struct fuse_conn *fc)
102  {
103  	struct fuse_req *req;
104  	sigset_t oldset;
105  	int intr;
106  	int err;
107  
108  	atomic_inc(&fc->num_waiting);
109  	block_sigs(&oldset);
110  	intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
111  	restore_sigs(&oldset);
112  	err = -EINTR;
113  	if (intr)
114  		goto out;
115  
116  	err = -ENOTCONN;
117  	if (!fc->connected)
118  		goto out;
119  
120  	req = fuse_request_alloc();
121  	err = -ENOMEM;
122  	if (!req)
123  		goto out;
124  
125  	fuse_req_init_context(req);
126  	req->waiting = 1;
127  	return req;
128  
129   out:
130  	atomic_dec(&fc->num_waiting);
131  	return ERR_PTR(err);
132  }
133  EXPORT_SYMBOL_GPL(fuse_get_req);
134  
135  /*
136   * Return request in fuse_file->reserved_req.  However that may
137   * currently be in use.  If that is the case, wait for it to become
138   * available.
139   */
get_reserved_req(struct fuse_conn * fc,struct file * file)140  static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
141  					 struct file *file)
142  {
143  	struct fuse_req *req = NULL;
144  	struct fuse_file *ff = file->private_data;
145  
146  	do {
147  		wait_event(fc->reserved_req_waitq, ff->reserved_req);
148  		spin_lock(&fc->lock);
149  		if (ff->reserved_req) {
150  			req = ff->reserved_req;
151  			ff->reserved_req = NULL;
152  			get_file(file);
153  			req->stolen_file = file;
154  		}
155  		spin_unlock(&fc->lock);
156  	} while (!req);
157  
158  	return req;
159  }
160  
161  /*
162   * Put stolen request back into fuse_file->reserved_req
163   */
put_reserved_req(struct fuse_conn * fc,struct fuse_req * req)164  static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
165  {
166  	struct file *file = req->stolen_file;
167  	struct fuse_file *ff = file->private_data;
168  
169  	spin_lock(&fc->lock);
170  	fuse_request_init(req);
171  	BUG_ON(ff->reserved_req);
172  	ff->reserved_req = req;
173  	wake_up_all(&fc->reserved_req_waitq);
174  	spin_unlock(&fc->lock);
175  	fput(file);
176  }
177  
178  /*
179   * Gets a requests for a file operation, always succeeds
180   *
181   * This is used for sending the FLUSH request, which must get to
182   * userspace, due to POSIX locks which may need to be unlocked.
183   *
184   * If allocation fails due to OOM, use the reserved request in
185   * fuse_file.
186   *
187   * This is very unlikely to deadlock accidentally, since the
188   * filesystem should not have it's own file open.  If deadlock is
189   * intentional, it can still be broken by "aborting" the filesystem.
190   */
fuse_get_req_nofail(struct fuse_conn * fc,struct file * file)191  struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
192  {
193  	struct fuse_req *req;
194  
195  	atomic_inc(&fc->num_waiting);
196  	wait_event(fc->blocked_waitq, !fc->blocked);
197  	req = fuse_request_alloc();
198  	if (!req)
199  		req = get_reserved_req(fc, file);
200  
201  	fuse_req_init_context(req);
202  	req->waiting = 1;
203  	return req;
204  }
205  
fuse_put_request(struct fuse_conn * fc,struct fuse_req * req)206  void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
207  {
208  	if (atomic_dec_and_test(&req->count)) {
209  		if (req->waiting)
210  			atomic_dec(&fc->num_waiting);
211  
212  		if (req->stolen_file)
213  			put_reserved_req(fc, req);
214  		else
215  			fuse_request_free(req);
216  	}
217  }
218  EXPORT_SYMBOL_GPL(fuse_put_request);
219  
len_args(unsigned numargs,struct fuse_arg * args)220  static unsigned len_args(unsigned numargs, struct fuse_arg *args)
221  {
222  	unsigned nbytes = 0;
223  	unsigned i;
224  
225  	for (i = 0; i < numargs; i++)
226  		nbytes += args[i].size;
227  
228  	return nbytes;
229  }
230  
fuse_get_unique(struct fuse_conn * fc)231  static u64 fuse_get_unique(struct fuse_conn *fc)
232  {
233  	fc->reqctr++;
234  	/* zero is special */
235  	if (fc->reqctr == 0)
236  		fc->reqctr = 1;
237  
238  	return fc->reqctr;
239  }
240  
queue_request(struct fuse_conn * fc,struct fuse_req * req)241  static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
242  {
243  	req->in.h.len = sizeof(struct fuse_in_header) +
244  		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
245  	list_add_tail(&req->list, &fc->pending);
246  	req->state = FUSE_REQ_PENDING;
247  	if (!req->waiting) {
248  		req->waiting = 1;
249  		atomic_inc(&fc->num_waiting);
250  	}
251  	wake_up(&fc->waitq);
252  	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
253  }
254  
fuse_queue_forget(struct fuse_conn * fc,struct fuse_forget_link * forget,u64 nodeid,u64 nlookup)255  void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
256  		       u64 nodeid, u64 nlookup)
257  {
258  	forget->forget_one.nodeid = nodeid;
259  	forget->forget_one.nlookup = nlookup;
260  
261  	spin_lock(&fc->lock);
262  	if (fc->connected) {
263  		fc->forget_list_tail->next = forget;
264  		fc->forget_list_tail = forget;
265  		wake_up(&fc->waitq);
266  		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
267  	} else {
268  		kfree(forget);
269  	}
270  	spin_unlock(&fc->lock);
271  }
272  
flush_bg_queue(struct fuse_conn * fc)273  static void flush_bg_queue(struct fuse_conn *fc)
274  {
275  	while (fc->active_background < fc->max_background &&
276  	       !list_empty(&fc->bg_queue)) {
277  		struct fuse_req *req;
278  
279  		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
280  		list_del(&req->list);
281  		fc->active_background++;
282  		req->in.h.unique = fuse_get_unique(fc);
283  		queue_request(fc, req);
284  	}
285  }
286  
287  /*
288   * This function is called when a request is finished.  Either a reply
289   * has arrived or it was aborted (and not yet sent) or some error
290   * occurred during communication with userspace, or the device file
291   * was closed.  The requester thread is woken up (if still waiting),
292   * the 'end' callback is called if given, else the reference to the
293   * request is released
294   *
295   * Called with fc->lock, unlocks it
296   */
request_end(struct fuse_conn * fc,struct fuse_req * req)297  static void request_end(struct fuse_conn *fc, struct fuse_req *req)
298  __releases(fc->lock)
299  {
300  	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
301  	req->end = NULL;
302  	list_del(&req->list);
303  	list_del(&req->intr_entry);
304  	req->state = FUSE_REQ_FINISHED;
305  	if (req->background) {
306  		if (fc->num_background == fc->max_background) {
307  			fc->blocked = 0;
308  			wake_up_all(&fc->blocked_waitq);
309  		}
310  		if (fc->num_background == fc->congestion_threshold &&
311  		    fc->connected && fc->bdi_initialized) {
312  			clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
313  			clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
314  		}
315  		fc->num_background--;
316  		fc->active_background--;
317  		flush_bg_queue(fc);
318  	}
319  	spin_unlock(&fc->lock);
320  	wake_up(&req->waitq);
321  	if (end)
322  		end(fc, req);
323  	fuse_put_request(fc, req);
324  }
325  
wait_answer_interruptible(struct fuse_conn * fc,struct fuse_req * req)326  static void wait_answer_interruptible(struct fuse_conn *fc,
327  				      struct fuse_req *req)
328  __releases(fc->lock)
329  __acquires(fc->lock)
330  {
331  	if (signal_pending(current))
332  		return;
333  
334  	spin_unlock(&fc->lock);
335  	wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
336  	spin_lock(&fc->lock);
337  }
338  
queue_interrupt(struct fuse_conn * fc,struct fuse_req * req)339  static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
340  {
341  	list_add_tail(&req->intr_entry, &fc->interrupts);
342  	wake_up(&fc->waitq);
343  	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
344  }
345  
request_wait_answer(struct fuse_conn * fc,struct fuse_req * req)346  static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
347  __releases(fc->lock)
348  __acquires(fc->lock)
349  {
350  	if (!fc->no_interrupt) {
351  		/* Any signal may interrupt this */
352  		wait_answer_interruptible(fc, req);
353  
354  		if (req->aborted)
355  			goto aborted;
356  		if (req->state == FUSE_REQ_FINISHED)
357  			return;
358  
359  		req->interrupted = 1;
360  		if (req->state == FUSE_REQ_SENT)
361  			queue_interrupt(fc, req);
362  	}
363  
364  	if (!req->force) {
365  		sigset_t oldset;
366  
367  		/* Only fatal signals may interrupt this */
368  		block_sigs(&oldset);
369  		wait_answer_interruptible(fc, req);
370  		restore_sigs(&oldset);
371  
372  		if (req->aborted)
373  			goto aborted;
374  		if (req->state == FUSE_REQ_FINISHED)
375  			return;
376  
377  		/* Request is not yet in userspace, bail out */
378  		if (req->state == FUSE_REQ_PENDING) {
379  			list_del(&req->list);
380  			__fuse_put_request(req);
381  			req->out.h.error = -EINTR;
382  			return;
383  		}
384  	}
385  
386  	/*
387  	 * Either request is already in userspace, or it was forced.
388  	 * Wait it out.
389  	 */
390  	spin_unlock(&fc->lock);
391  
392  	while (req->state != FUSE_REQ_FINISHED)
393  		wait_event_freezable(req->waitq,
394  				     req->state == FUSE_REQ_FINISHED);
395  	spin_lock(&fc->lock);
396  
397  	if (!req->aborted)
398  		return;
399  
400   aborted:
401  	BUG_ON(req->state != FUSE_REQ_FINISHED);
402  	if (req->locked) {
403  		/* This is uninterruptible sleep, because data is
404  		   being copied to/from the buffers of req.  During
405  		   locked state, there mustn't be any filesystem
406  		   operation (e.g. page fault), since that could lead
407  		   to deadlock */
408  		spin_unlock(&fc->lock);
409  		wait_event(req->waitq, !req->locked);
410  		spin_lock(&fc->lock);
411  	}
412  }
413  
fuse_request_send(struct fuse_conn * fc,struct fuse_req * req)414  void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
415  {
416  	req->isreply = 1;
417  	spin_lock(&fc->lock);
418  	if (!fc->connected)
419  		req->out.h.error = -ENOTCONN;
420  	else if (fc->conn_error)
421  		req->out.h.error = -ECONNREFUSED;
422  	else {
423  		req->in.h.unique = fuse_get_unique(fc);
424  		queue_request(fc, req);
425  		/* acquire extra reference, since request is still needed
426  		   after request_end() */
427  		__fuse_get_request(req);
428  
429  		request_wait_answer(fc, req);
430  	}
431  	spin_unlock(&fc->lock);
432  }
433  EXPORT_SYMBOL_GPL(fuse_request_send);
434  
fuse_request_send_nowait_locked(struct fuse_conn * fc,struct fuse_req * req)435  static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
436  					    struct fuse_req *req)
437  {
438  	req->background = 1;
439  	fc->num_background++;
440  	if (fc->num_background == fc->max_background)
441  		fc->blocked = 1;
442  	if (fc->num_background == fc->congestion_threshold &&
443  	    fc->bdi_initialized) {
444  		set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
445  		set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
446  	}
447  	list_add_tail(&req->list, &fc->bg_queue);
448  	flush_bg_queue(fc);
449  }
450  
fuse_request_send_nowait(struct fuse_conn * fc,struct fuse_req * req)451  static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
452  {
453  	spin_lock(&fc->lock);
454  	if (fc->connected) {
455  		fuse_request_send_nowait_locked(fc, req);
456  		spin_unlock(&fc->lock);
457  	} else {
458  		req->out.h.error = -ENOTCONN;
459  		request_end(fc, req);
460  	}
461  }
462  
fuse_request_send_background(struct fuse_conn * fc,struct fuse_req * req)463  void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
464  {
465  	req->isreply = 1;
466  	fuse_request_send_nowait(fc, req);
467  }
468  EXPORT_SYMBOL_GPL(fuse_request_send_background);
469  
fuse_request_send_notify_reply(struct fuse_conn * fc,struct fuse_req * req,u64 unique)470  static int fuse_request_send_notify_reply(struct fuse_conn *fc,
471  					  struct fuse_req *req, u64 unique)
472  {
473  	int err = -ENODEV;
474  
475  	req->isreply = 0;
476  	req->in.h.unique = unique;
477  	spin_lock(&fc->lock);
478  	if (fc->connected) {
479  		queue_request(fc, req);
480  		err = 0;
481  	}
482  	spin_unlock(&fc->lock);
483  
484  	return err;
485  }
486  
487  /*
488   * Called under fc->lock
489   *
490   * fc->connected must have been checked previously
491   */
fuse_request_send_background_locked(struct fuse_conn * fc,struct fuse_req * req)492  void fuse_request_send_background_locked(struct fuse_conn *fc,
493  					 struct fuse_req *req)
494  {
495  	req->isreply = 1;
496  	fuse_request_send_nowait_locked(fc, req);
497  }
498  
499  /*
500   * Lock the request.  Up to the next unlock_request() there mustn't be
501   * anything that could cause a page-fault.  If the request was already
502   * aborted bail out.
503   */
lock_request(struct fuse_conn * fc,struct fuse_req * req)504  static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
505  {
506  	int err = 0;
507  	if (req) {
508  		spin_lock(&fc->lock);
509  		if (req->aborted)
510  			err = -ENOENT;
511  		else
512  			req->locked = 1;
513  		spin_unlock(&fc->lock);
514  	}
515  	return err;
516  }
517  
518  /*
519   * Unlock request.  If it was aborted during being locked, the
520   * requester thread is currently waiting for it to be unlocked, so
521   * wake it up.
522   */
unlock_request(struct fuse_conn * fc,struct fuse_req * req)523  static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
524  {
525  	if (req) {
526  		spin_lock(&fc->lock);
527  		req->locked = 0;
528  		if (req->aborted)
529  			wake_up(&req->waitq);
530  		spin_unlock(&fc->lock);
531  	}
532  }
533  
534  struct fuse_copy_state {
535  	struct fuse_conn *fc;
536  	int write;
537  	struct fuse_req *req;
538  	const struct iovec *iov;
539  	struct pipe_buffer *pipebufs;
540  	struct pipe_buffer *currbuf;
541  	struct pipe_inode_info *pipe;
542  	unsigned long nr_segs;
543  	unsigned long seglen;
544  	unsigned long addr;
545  	struct page *pg;
546  	void *mapaddr;
547  	void *buf;
548  	unsigned len;
549  	unsigned move_pages:1;
550  };
551  
fuse_copy_init(struct fuse_copy_state * cs,struct fuse_conn * fc,int write,const struct iovec * iov,unsigned long nr_segs)552  static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
553  			   int write,
554  			   const struct iovec *iov, unsigned long nr_segs)
555  {
556  	memset(cs, 0, sizeof(*cs));
557  	cs->fc = fc;
558  	cs->write = write;
559  	cs->iov = iov;
560  	cs->nr_segs = nr_segs;
561  }
562  
563  /* Unmap and put previous page of userspace buffer */
fuse_copy_finish(struct fuse_copy_state * cs)564  static void fuse_copy_finish(struct fuse_copy_state *cs)
565  {
566  	if (cs->currbuf) {
567  		struct pipe_buffer *buf = cs->currbuf;
568  
569  		if (!cs->write) {
570  			buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
571  		} else {
572  			kunmap(buf->page);
573  			buf->len = PAGE_SIZE - cs->len;
574  		}
575  		cs->currbuf = NULL;
576  		cs->mapaddr = NULL;
577  	} else if (cs->mapaddr) {
578  		kunmap(cs->pg);
579  		if (cs->write) {
580  			flush_dcache_page(cs->pg);
581  			set_page_dirty_lock(cs->pg);
582  		}
583  		put_page(cs->pg);
584  		cs->mapaddr = NULL;
585  	}
586  }
587  
588  /*
589   * Get another pagefull of userspace buffer, and map it to kernel
590   * address space, and lock request
591   */
fuse_copy_fill(struct fuse_copy_state * cs)592  static int fuse_copy_fill(struct fuse_copy_state *cs)
593  {
594  	unsigned long offset;
595  	int err;
596  
597  	unlock_request(cs->fc, cs->req);
598  	fuse_copy_finish(cs);
599  	if (cs->pipebufs) {
600  		struct pipe_buffer *buf = cs->pipebufs;
601  
602  		if (!cs->write) {
603  			err = buf->ops->confirm(cs->pipe, buf);
604  			if (err)
605  				return err;
606  
607  			BUG_ON(!cs->nr_segs);
608  			cs->currbuf = buf;
609  			cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
610  			cs->len = buf->len;
611  			cs->buf = cs->mapaddr + buf->offset;
612  			cs->pipebufs++;
613  			cs->nr_segs--;
614  		} else {
615  			struct page *page;
616  
617  			if (cs->nr_segs == cs->pipe->buffers)
618  				return -EIO;
619  
620  			page = alloc_page(GFP_HIGHUSER);
621  			if (!page)
622  				return -ENOMEM;
623  
624  			buf->page = page;
625  			buf->offset = 0;
626  			buf->len = 0;
627  
628  			cs->currbuf = buf;
629  			cs->mapaddr = kmap(page);
630  			cs->buf = cs->mapaddr;
631  			cs->len = PAGE_SIZE;
632  			cs->pipebufs++;
633  			cs->nr_segs++;
634  		}
635  	} else {
636  		if (!cs->seglen) {
637  			BUG_ON(!cs->nr_segs);
638  			cs->seglen = cs->iov[0].iov_len;
639  			cs->addr = (unsigned long) cs->iov[0].iov_base;
640  			cs->iov++;
641  			cs->nr_segs--;
642  		}
643  		err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
644  		if (err < 0)
645  			return err;
646  		BUG_ON(err != 1);
647  		offset = cs->addr % PAGE_SIZE;
648  		cs->mapaddr = kmap(cs->pg);
649  		cs->buf = cs->mapaddr + offset;
650  		cs->len = min(PAGE_SIZE - offset, cs->seglen);
651  		cs->seglen -= cs->len;
652  		cs->addr += cs->len;
653  	}
654  
655  	return lock_request(cs->fc, cs->req);
656  }
657  
658  /* Do as much copy to/from userspace buffer as we can */
fuse_copy_do(struct fuse_copy_state * cs,void ** val,unsigned * size)659  static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
660  {
661  	unsigned ncpy = min(*size, cs->len);
662  	if (val) {
663  		if (cs->write)
664  			memcpy(cs->buf, *val, ncpy);
665  		else
666  			memcpy(*val, cs->buf, ncpy);
667  		*val += ncpy;
668  	}
669  	*size -= ncpy;
670  	cs->len -= ncpy;
671  	cs->buf += ncpy;
672  	return ncpy;
673  }
674  
fuse_check_page(struct page * page)675  static int fuse_check_page(struct page *page)
676  {
677  	if (page_mapcount(page) ||
678  	    page->mapping != NULL ||
679  	    page_count(page) != 1 ||
680  	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
681  	     ~(1 << PG_locked |
682  	       1 << PG_referenced |
683  	       1 << PG_uptodate |
684  	       1 << PG_lru |
685  	       1 << PG_active |
686  	       1 << PG_reclaim))) {
687  		printk(KERN_WARNING "fuse: trying to steal weird page\n");
688  		printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
689  		return 1;
690  	}
691  	return 0;
692  }
693  
fuse_try_move_page(struct fuse_copy_state * cs,struct page ** pagep)694  static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
695  {
696  	int err;
697  	struct page *oldpage = *pagep;
698  	struct page *newpage;
699  	struct pipe_buffer *buf = cs->pipebufs;
700  	struct address_space *mapping;
701  	pgoff_t index;
702  
703  	unlock_request(cs->fc, cs->req);
704  	fuse_copy_finish(cs);
705  
706  	err = buf->ops->confirm(cs->pipe, buf);
707  	if (err)
708  		return err;
709  
710  	BUG_ON(!cs->nr_segs);
711  	cs->currbuf = buf;
712  	cs->len = buf->len;
713  	cs->pipebufs++;
714  	cs->nr_segs--;
715  
716  	if (cs->len != PAGE_SIZE)
717  		goto out_fallback;
718  
719  	if (buf->ops->steal(cs->pipe, buf) != 0)
720  		goto out_fallback;
721  
722  	newpage = buf->page;
723  
724  	if (WARN_ON(!PageUptodate(newpage)))
725  		return -EIO;
726  
727  	ClearPageMappedToDisk(newpage);
728  
729  	if (fuse_check_page(newpage) != 0)
730  		goto out_fallback_unlock;
731  
732  	mapping = oldpage->mapping;
733  	index = oldpage->index;
734  
735  	/*
736  	 * This is a new and locked page, it shouldn't be mapped or
737  	 * have any special flags on it
738  	 */
739  	if (WARN_ON(page_mapped(oldpage)))
740  		goto out_fallback_unlock;
741  	if (WARN_ON(page_has_private(oldpage)))
742  		goto out_fallback_unlock;
743  	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
744  		goto out_fallback_unlock;
745  	if (WARN_ON(PageMlocked(oldpage)))
746  		goto out_fallback_unlock;
747  
748  	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
749  	if (err) {
750  		unlock_page(newpage);
751  		return err;
752  	}
753  
754  	page_cache_get(newpage);
755  
756  	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
757  		lru_cache_add_file(newpage);
758  
759  	err = 0;
760  	spin_lock(&cs->fc->lock);
761  	if (cs->req->aborted)
762  		err = -ENOENT;
763  	else
764  		*pagep = newpage;
765  	spin_unlock(&cs->fc->lock);
766  
767  	if (err) {
768  		unlock_page(newpage);
769  		page_cache_release(newpage);
770  		return err;
771  	}
772  
773  	unlock_page(oldpage);
774  	page_cache_release(oldpage);
775  	cs->len = 0;
776  
777  	return 0;
778  
779  out_fallback_unlock:
780  	unlock_page(newpage);
781  out_fallback:
782  	cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
783  	cs->buf = cs->mapaddr + buf->offset;
784  
785  	err = lock_request(cs->fc, cs->req);
786  	if (err)
787  		return err;
788  
789  	return 1;
790  }
791  
fuse_ref_page(struct fuse_copy_state * cs,struct page * page,unsigned offset,unsigned count)792  static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
793  			 unsigned offset, unsigned count)
794  {
795  	struct pipe_buffer *buf;
796  
797  	if (cs->nr_segs == cs->pipe->buffers)
798  		return -EIO;
799  
800  	unlock_request(cs->fc, cs->req);
801  	fuse_copy_finish(cs);
802  
803  	buf = cs->pipebufs;
804  	page_cache_get(page);
805  	buf->page = page;
806  	buf->offset = offset;
807  	buf->len = count;
808  
809  	cs->pipebufs++;
810  	cs->nr_segs++;
811  	cs->len = 0;
812  
813  	return 0;
814  }
815  
816  /*
817   * Copy a page in the request to/from the userspace buffer.  Must be
818   * done atomically
819   */
fuse_copy_page(struct fuse_copy_state * cs,struct page ** pagep,unsigned offset,unsigned count,int zeroing)820  static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
821  			  unsigned offset, unsigned count, int zeroing)
822  {
823  	int err;
824  	struct page *page = *pagep;
825  
826  	if (page && zeroing && count < PAGE_SIZE)
827  		clear_highpage(page);
828  
829  	while (count) {
830  		if (cs->write && cs->pipebufs && page) {
831  			return fuse_ref_page(cs, page, offset, count);
832  		} else if (!cs->len) {
833  			if (cs->move_pages && page &&
834  			    offset == 0 && count == PAGE_SIZE) {
835  				err = fuse_try_move_page(cs, pagep);
836  				if (err <= 0)
837  					return err;
838  			} else {
839  				err = fuse_copy_fill(cs);
840  				if (err)
841  					return err;
842  			}
843  		}
844  		if (page) {
845  			void *mapaddr = kmap_atomic(page);
846  			void *buf = mapaddr + offset;
847  			offset += fuse_copy_do(cs, &buf, &count);
848  			kunmap_atomic(mapaddr);
849  		} else
850  			offset += fuse_copy_do(cs, NULL, &count);
851  	}
852  	if (page && !cs->write)
853  		flush_dcache_page(page);
854  	return 0;
855  }
856  
857  /* Copy pages in the request to/from userspace buffer */
fuse_copy_pages(struct fuse_copy_state * cs,unsigned nbytes,int zeroing)858  static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
859  			   int zeroing)
860  {
861  	unsigned i;
862  	struct fuse_req *req = cs->req;
863  	unsigned offset = req->page_offset;
864  	unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
865  
866  	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
867  		int err;
868  
869  		err = fuse_copy_page(cs, &req->pages[i], offset, count,
870  				     zeroing);
871  		if (err)
872  			return err;
873  
874  		nbytes -= count;
875  		count = min(nbytes, (unsigned) PAGE_SIZE);
876  		offset = 0;
877  	}
878  	return 0;
879  }
880  
881  /* Copy a single argument in the request to/from userspace buffer */
fuse_copy_one(struct fuse_copy_state * cs,void * val,unsigned size)882  static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
883  {
884  	while (size) {
885  		if (!cs->len) {
886  			int err = fuse_copy_fill(cs);
887  			if (err)
888  				return err;
889  		}
890  		fuse_copy_do(cs, &val, &size);
891  	}
892  	return 0;
893  }
894  
895  /* Copy request arguments to/from userspace buffer */
fuse_copy_args(struct fuse_copy_state * cs,unsigned numargs,unsigned argpages,struct fuse_arg * args,int zeroing)896  static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
897  			  unsigned argpages, struct fuse_arg *args,
898  			  int zeroing)
899  {
900  	int err = 0;
901  	unsigned i;
902  
903  	for (i = 0; !err && i < numargs; i++)  {
904  		struct fuse_arg *arg = &args[i];
905  		if (i == numargs - 1 && argpages)
906  			err = fuse_copy_pages(cs, arg->size, zeroing);
907  		else
908  			err = fuse_copy_one(cs, arg->value, arg->size);
909  	}
910  	return err;
911  }
912  
forget_pending(struct fuse_conn * fc)913  static int forget_pending(struct fuse_conn *fc)
914  {
915  	return fc->forget_list_head.next != NULL;
916  }
917  
request_pending(struct fuse_conn * fc)918  static int request_pending(struct fuse_conn *fc)
919  {
920  	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
921  		forget_pending(fc);
922  }
923  
924  /* Wait until a request is available on the pending list */
request_wait(struct fuse_conn * fc)925  static void request_wait(struct fuse_conn *fc)
926  __releases(fc->lock)
927  __acquires(fc->lock)
928  {
929  	DECLARE_WAITQUEUE(wait, current);
930  
931  	add_wait_queue_exclusive(&fc->waitq, &wait);
932  	while (fc->connected && !request_pending(fc)) {
933  		set_current_state(TASK_INTERRUPTIBLE);
934  		if (signal_pending(current))
935  			break;
936  
937  		spin_unlock(&fc->lock);
938  		schedule();
939  		spin_lock(&fc->lock);
940  	}
941  	set_current_state(TASK_RUNNING);
942  	remove_wait_queue(&fc->waitq, &wait);
943  }
944  
945  /*
946   * Transfer an interrupt request to userspace
947   *
948   * Unlike other requests this is assembled on demand, without a need
949   * to allocate a separate fuse_req structure.
950   *
951   * Called with fc->lock held, releases it
952   */
fuse_read_interrupt(struct fuse_conn * fc,struct fuse_copy_state * cs,size_t nbytes,struct fuse_req * req)953  static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
954  			       size_t nbytes, struct fuse_req *req)
955  __releases(fc->lock)
956  {
957  	struct fuse_in_header ih;
958  	struct fuse_interrupt_in arg;
959  	unsigned reqsize = sizeof(ih) + sizeof(arg);
960  	int err;
961  
962  	list_del_init(&req->intr_entry);
963  	req->intr_unique = fuse_get_unique(fc);
964  	memset(&ih, 0, sizeof(ih));
965  	memset(&arg, 0, sizeof(arg));
966  	ih.len = reqsize;
967  	ih.opcode = FUSE_INTERRUPT;
968  	ih.unique = req->intr_unique;
969  	arg.unique = req->in.h.unique;
970  
971  	spin_unlock(&fc->lock);
972  	if (nbytes < reqsize)
973  		return -EINVAL;
974  
975  	err = fuse_copy_one(cs, &ih, sizeof(ih));
976  	if (!err)
977  		err = fuse_copy_one(cs, &arg, sizeof(arg));
978  	fuse_copy_finish(cs);
979  
980  	return err ? err : reqsize;
981  }
982  
dequeue_forget(struct fuse_conn * fc,unsigned max,unsigned * countp)983  static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
984  					       unsigned max,
985  					       unsigned *countp)
986  {
987  	struct fuse_forget_link *head = fc->forget_list_head.next;
988  	struct fuse_forget_link **newhead = &head;
989  	unsigned count;
990  
991  	for (count = 0; *newhead != NULL && count < max; count++)
992  		newhead = &(*newhead)->next;
993  
994  	fc->forget_list_head.next = *newhead;
995  	*newhead = NULL;
996  	if (fc->forget_list_head.next == NULL)
997  		fc->forget_list_tail = &fc->forget_list_head;
998  
999  	if (countp != NULL)
1000  		*countp = count;
1001  
1002  	return head;
1003  }
1004  
fuse_read_single_forget(struct fuse_conn * fc,struct fuse_copy_state * cs,size_t nbytes)1005  static int fuse_read_single_forget(struct fuse_conn *fc,
1006  				   struct fuse_copy_state *cs,
1007  				   size_t nbytes)
1008  __releases(fc->lock)
1009  {
1010  	int err;
1011  	struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1012  	struct fuse_forget_in arg = {
1013  		.nlookup = forget->forget_one.nlookup,
1014  	};
1015  	struct fuse_in_header ih = {
1016  		.opcode = FUSE_FORGET,
1017  		.nodeid = forget->forget_one.nodeid,
1018  		.unique = fuse_get_unique(fc),
1019  		.len = sizeof(ih) + sizeof(arg),
1020  	};
1021  
1022  	spin_unlock(&fc->lock);
1023  	kfree(forget);
1024  	if (nbytes < ih.len)
1025  		return -EINVAL;
1026  
1027  	err = fuse_copy_one(cs, &ih, sizeof(ih));
1028  	if (!err)
1029  		err = fuse_copy_one(cs, &arg, sizeof(arg));
1030  	fuse_copy_finish(cs);
1031  
1032  	if (err)
1033  		return err;
1034  
1035  	return ih.len;
1036  }
1037  
fuse_read_batch_forget(struct fuse_conn * fc,struct fuse_copy_state * cs,size_t nbytes)1038  static int fuse_read_batch_forget(struct fuse_conn *fc,
1039  				   struct fuse_copy_state *cs, size_t nbytes)
1040  __releases(fc->lock)
1041  {
1042  	int err;
1043  	unsigned max_forgets;
1044  	unsigned count;
1045  	struct fuse_forget_link *head;
1046  	struct fuse_batch_forget_in arg = { .count = 0 };
1047  	struct fuse_in_header ih = {
1048  		.opcode = FUSE_BATCH_FORGET,
1049  		.unique = fuse_get_unique(fc),
1050  		.len = sizeof(ih) + sizeof(arg),
1051  	};
1052  
1053  	if (nbytes < ih.len) {
1054  		spin_unlock(&fc->lock);
1055  		return -EINVAL;
1056  	}
1057  
1058  	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1059  	head = dequeue_forget(fc, max_forgets, &count);
1060  	spin_unlock(&fc->lock);
1061  
1062  	arg.count = count;
1063  	ih.len += count * sizeof(struct fuse_forget_one);
1064  	err = fuse_copy_one(cs, &ih, sizeof(ih));
1065  	if (!err)
1066  		err = fuse_copy_one(cs, &arg, sizeof(arg));
1067  
1068  	while (head) {
1069  		struct fuse_forget_link *forget = head;
1070  
1071  		if (!err) {
1072  			err = fuse_copy_one(cs, &forget->forget_one,
1073  					    sizeof(forget->forget_one));
1074  		}
1075  		head = forget->next;
1076  		kfree(forget);
1077  	}
1078  
1079  	fuse_copy_finish(cs);
1080  
1081  	if (err)
1082  		return err;
1083  
1084  	return ih.len;
1085  }
1086  
fuse_read_forget(struct fuse_conn * fc,struct fuse_copy_state * cs,size_t nbytes)1087  static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1088  			    size_t nbytes)
1089  __releases(fc->lock)
1090  {
1091  	if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1092  		return fuse_read_single_forget(fc, cs, nbytes);
1093  	else
1094  		return fuse_read_batch_forget(fc, cs, nbytes);
1095  }
1096  
1097  /*
1098   * Read a single request into the userspace filesystem's buffer.  This
1099   * function waits until a request is available, then removes it from
1100   * the pending list and copies request data to userspace buffer.  If
1101   * no reply is needed (FORGET) or request has been aborted or there
1102   * was an error during the copying then it's finished by calling
1103   * request_end().  Otherwise add it to the processing list, and set
1104   * the 'sent' flag.
1105   */
fuse_dev_do_read(struct fuse_conn * fc,struct file * file,struct fuse_copy_state * cs,size_t nbytes)1106  static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1107  				struct fuse_copy_state *cs, size_t nbytes)
1108  {
1109  	int err;
1110  	struct fuse_req *req;
1111  	struct fuse_in *in;
1112  	unsigned reqsize;
1113  
1114   restart:
1115  	spin_lock(&fc->lock);
1116  	err = -EAGAIN;
1117  	if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1118  	    !request_pending(fc))
1119  		goto err_unlock;
1120  
1121  	request_wait(fc);
1122  	err = -ENODEV;
1123  	if (!fc->connected)
1124  		goto err_unlock;
1125  	err = -ERESTARTSYS;
1126  	if (!request_pending(fc))
1127  		goto err_unlock;
1128  
1129  	if (!list_empty(&fc->interrupts)) {
1130  		req = list_entry(fc->interrupts.next, struct fuse_req,
1131  				 intr_entry);
1132  		return fuse_read_interrupt(fc, cs, nbytes, req);
1133  	}
1134  
1135  	if (forget_pending(fc)) {
1136  		if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1137  			return fuse_read_forget(fc, cs, nbytes);
1138  
1139  		if (fc->forget_batch <= -8)
1140  			fc->forget_batch = 16;
1141  	}
1142  
1143  	req = list_entry(fc->pending.next, struct fuse_req, list);
1144  	req->state = FUSE_REQ_READING;
1145  	list_move(&req->list, &fc->io);
1146  
1147  	in = &req->in;
1148  	reqsize = in->h.len;
1149  	/* If request is too large, reply with an error and restart the read */
1150  	if (nbytes < reqsize) {
1151  		req->out.h.error = -EIO;
1152  		/* SETXATTR is special, since it may contain too large data */
1153  		if (in->h.opcode == FUSE_SETXATTR)
1154  			req->out.h.error = -E2BIG;
1155  		request_end(fc, req);
1156  		goto restart;
1157  	}
1158  	spin_unlock(&fc->lock);
1159  	cs->req = req;
1160  	err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1161  	if (!err)
1162  		err = fuse_copy_args(cs, in->numargs, in->argpages,
1163  				     (struct fuse_arg *) in->args, 0);
1164  	fuse_copy_finish(cs);
1165  	spin_lock(&fc->lock);
1166  	req->locked = 0;
1167  	if (req->aborted) {
1168  		request_end(fc, req);
1169  		return -ENODEV;
1170  	}
1171  	if (err) {
1172  		req->out.h.error = -EIO;
1173  		request_end(fc, req);
1174  		return err;
1175  	}
1176  	if (!req->isreply)
1177  		request_end(fc, req);
1178  	else {
1179  		req->state = FUSE_REQ_SENT;
1180  		list_move_tail(&req->list, &fc->processing);
1181  		if (req->interrupted)
1182  			queue_interrupt(fc, req);
1183  		spin_unlock(&fc->lock);
1184  	}
1185  	return reqsize;
1186  
1187   err_unlock:
1188  	spin_unlock(&fc->lock);
1189  	return err;
1190  }
1191  
fuse_dev_read(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)1192  static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1193  			      unsigned long nr_segs, loff_t pos)
1194  {
1195  	struct fuse_copy_state cs;
1196  	struct file *file = iocb->ki_filp;
1197  	struct fuse_conn *fc = fuse_get_conn(file);
1198  	if (!fc)
1199  		return -EPERM;
1200  
1201  	fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1202  
1203  	return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1204  }
1205  
fuse_dev_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)1206  static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1207  				   struct pipe_buffer *buf)
1208  {
1209  	return 1;
1210  }
1211  
1212  static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1213  	.can_merge = 0,
1214  	.map = generic_pipe_buf_map,
1215  	.unmap = generic_pipe_buf_unmap,
1216  	.confirm = generic_pipe_buf_confirm,
1217  	.release = generic_pipe_buf_release,
1218  	.steal = fuse_dev_pipe_buf_steal,
1219  	.get = generic_pipe_buf_get,
1220  };
1221  
fuse_dev_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1222  static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1223  				    struct pipe_inode_info *pipe,
1224  				    size_t len, unsigned int flags)
1225  {
1226  	int ret;
1227  	int page_nr = 0;
1228  	int do_wakeup = 0;
1229  	struct pipe_buffer *bufs;
1230  	struct fuse_copy_state cs;
1231  	struct fuse_conn *fc = fuse_get_conn(in);
1232  	if (!fc)
1233  		return -EPERM;
1234  
1235  	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1236  	if (!bufs)
1237  		return -ENOMEM;
1238  
1239  	fuse_copy_init(&cs, fc, 1, NULL, 0);
1240  	cs.pipebufs = bufs;
1241  	cs.pipe = pipe;
1242  	ret = fuse_dev_do_read(fc, in, &cs, len);
1243  	if (ret < 0)
1244  		goto out;
1245  
1246  	ret = 0;
1247  	pipe_lock(pipe);
1248  
1249  	if (!pipe->readers) {
1250  		send_sig(SIGPIPE, current, 0);
1251  		if (!ret)
1252  			ret = -EPIPE;
1253  		goto out_unlock;
1254  	}
1255  
1256  	if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1257  		ret = -EIO;
1258  		goto out_unlock;
1259  	}
1260  
1261  	while (page_nr < cs.nr_segs) {
1262  		int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1263  		struct pipe_buffer *buf = pipe->bufs + newbuf;
1264  
1265  		buf->page = bufs[page_nr].page;
1266  		buf->offset = bufs[page_nr].offset;
1267  		buf->len = bufs[page_nr].len;
1268  		buf->ops = &fuse_dev_pipe_buf_ops;
1269  
1270  		pipe->nrbufs++;
1271  		page_nr++;
1272  		ret += buf->len;
1273  
1274  		if (pipe->inode)
1275  			do_wakeup = 1;
1276  	}
1277  
1278  out_unlock:
1279  	pipe_unlock(pipe);
1280  
1281  	if (do_wakeup) {
1282  		smp_mb();
1283  		if (waitqueue_active(&pipe->wait))
1284  			wake_up_interruptible(&pipe->wait);
1285  		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1286  	}
1287  
1288  out:
1289  	for (; page_nr < cs.nr_segs; page_nr++)
1290  		page_cache_release(bufs[page_nr].page);
1291  
1292  	kfree(bufs);
1293  	return ret;
1294  }
1295  
fuse_notify_poll(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1296  static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1297  			    struct fuse_copy_state *cs)
1298  {
1299  	struct fuse_notify_poll_wakeup_out outarg;
1300  	int err = -EINVAL;
1301  
1302  	if (size != sizeof(outarg))
1303  		goto err;
1304  
1305  	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1306  	if (err)
1307  		goto err;
1308  
1309  	fuse_copy_finish(cs);
1310  	return fuse_notify_poll_wakeup(fc, &outarg);
1311  
1312  err:
1313  	fuse_copy_finish(cs);
1314  	return err;
1315  }
1316  
fuse_notify_inval_inode(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1317  static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1318  				   struct fuse_copy_state *cs)
1319  {
1320  	struct fuse_notify_inval_inode_out outarg;
1321  	int err = -EINVAL;
1322  
1323  	if (size != sizeof(outarg))
1324  		goto err;
1325  
1326  	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1327  	if (err)
1328  		goto err;
1329  	fuse_copy_finish(cs);
1330  
1331  	down_read(&fc->killsb);
1332  	err = -ENOENT;
1333  	if (fc->sb) {
1334  		err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1335  					       outarg.off, outarg.len);
1336  	}
1337  	up_read(&fc->killsb);
1338  	return err;
1339  
1340  err:
1341  	fuse_copy_finish(cs);
1342  	return err;
1343  }
1344  
fuse_notify_inval_entry(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1345  static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1346  				   struct fuse_copy_state *cs)
1347  {
1348  	struct fuse_notify_inval_entry_out outarg;
1349  	int err = -ENOMEM;
1350  	char *buf;
1351  	struct qstr name;
1352  
1353  	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1354  	if (!buf)
1355  		goto err;
1356  
1357  	err = -EINVAL;
1358  	if (size < sizeof(outarg))
1359  		goto err;
1360  
1361  	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1362  	if (err)
1363  		goto err;
1364  
1365  	err = -ENAMETOOLONG;
1366  	if (outarg.namelen > FUSE_NAME_MAX)
1367  		goto err;
1368  
1369  	err = -EINVAL;
1370  	if (size != sizeof(outarg) + outarg.namelen + 1)
1371  		goto err;
1372  
1373  	name.name = buf;
1374  	name.len = outarg.namelen;
1375  	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1376  	if (err)
1377  		goto err;
1378  	fuse_copy_finish(cs);
1379  	buf[outarg.namelen] = 0;
1380  	name.hash = full_name_hash(name.name, name.len);
1381  
1382  	down_read(&fc->killsb);
1383  	err = -ENOENT;
1384  	if (fc->sb)
1385  		err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1386  	up_read(&fc->killsb);
1387  	kfree(buf);
1388  	return err;
1389  
1390  err:
1391  	kfree(buf);
1392  	fuse_copy_finish(cs);
1393  	return err;
1394  }
1395  
fuse_notify_delete(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1396  static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1397  			      struct fuse_copy_state *cs)
1398  {
1399  	struct fuse_notify_delete_out outarg;
1400  	int err = -ENOMEM;
1401  	char *buf;
1402  	struct qstr name;
1403  
1404  	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1405  	if (!buf)
1406  		goto err;
1407  
1408  	err = -EINVAL;
1409  	if (size < sizeof(outarg))
1410  		goto err;
1411  
1412  	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1413  	if (err)
1414  		goto err;
1415  
1416  	err = -ENAMETOOLONG;
1417  	if (outarg.namelen > FUSE_NAME_MAX)
1418  		goto err;
1419  
1420  	err = -EINVAL;
1421  	if (size != sizeof(outarg) + outarg.namelen + 1)
1422  		goto err;
1423  
1424  	name.name = buf;
1425  	name.len = outarg.namelen;
1426  	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1427  	if (err)
1428  		goto err;
1429  	fuse_copy_finish(cs);
1430  	buf[outarg.namelen] = 0;
1431  	name.hash = full_name_hash(name.name, name.len);
1432  
1433  	down_read(&fc->killsb);
1434  	err = -ENOENT;
1435  	if (fc->sb)
1436  		err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1437  					       outarg.child, &name);
1438  	up_read(&fc->killsb);
1439  	kfree(buf);
1440  	return err;
1441  
1442  err:
1443  	kfree(buf);
1444  	fuse_copy_finish(cs);
1445  	return err;
1446  }
1447  
fuse_notify_store(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1448  static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1449  			     struct fuse_copy_state *cs)
1450  {
1451  	struct fuse_notify_store_out outarg;
1452  	struct inode *inode;
1453  	struct address_space *mapping;
1454  	u64 nodeid;
1455  	int err;
1456  	pgoff_t index;
1457  	unsigned int offset;
1458  	unsigned int num;
1459  	loff_t file_size;
1460  	loff_t end;
1461  
1462  	err = -EINVAL;
1463  	if (size < sizeof(outarg))
1464  		goto out_finish;
1465  
1466  	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1467  	if (err)
1468  		goto out_finish;
1469  
1470  	err = -EINVAL;
1471  	if (size - sizeof(outarg) != outarg.size)
1472  		goto out_finish;
1473  
1474  	nodeid = outarg.nodeid;
1475  
1476  	down_read(&fc->killsb);
1477  
1478  	err = -ENOENT;
1479  	if (!fc->sb)
1480  		goto out_up_killsb;
1481  
1482  	inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1483  	if (!inode)
1484  		goto out_up_killsb;
1485  
1486  	mapping = inode->i_mapping;
1487  	index = outarg.offset >> PAGE_CACHE_SHIFT;
1488  	offset = outarg.offset & ~PAGE_CACHE_MASK;
1489  	file_size = i_size_read(inode);
1490  	end = outarg.offset + outarg.size;
1491  	if (end > file_size) {
1492  		file_size = end;
1493  		fuse_write_update_size(inode, file_size);
1494  	}
1495  
1496  	num = outarg.size;
1497  	while (num) {
1498  		struct page *page;
1499  		unsigned int this_num;
1500  
1501  		err = -ENOMEM;
1502  		page = find_or_create_page(mapping, index,
1503  					   mapping_gfp_mask(mapping));
1504  		if (!page)
1505  			goto out_iput;
1506  
1507  		this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1508  		err = fuse_copy_page(cs, &page, offset, this_num, 0);
1509  		if (!err && offset == 0 && (num != 0 || file_size == end))
1510  			SetPageUptodate(page);
1511  		unlock_page(page);
1512  		page_cache_release(page);
1513  
1514  		if (err)
1515  			goto out_iput;
1516  
1517  		num -= this_num;
1518  		offset = 0;
1519  		index++;
1520  	}
1521  
1522  	err = 0;
1523  
1524  out_iput:
1525  	iput(inode);
1526  out_up_killsb:
1527  	up_read(&fc->killsb);
1528  out_finish:
1529  	fuse_copy_finish(cs);
1530  	return err;
1531  }
1532  
fuse_retrieve_end(struct fuse_conn * fc,struct fuse_req * req)1533  static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1534  {
1535  	release_pages(req->pages, req->num_pages, 0);
1536  }
1537  
fuse_retrieve(struct fuse_conn * fc,struct inode * inode,struct fuse_notify_retrieve_out * outarg)1538  static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1539  			 struct fuse_notify_retrieve_out *outarg)
1540  {
1541  	int err;
1542  	struct address_space *mapping = inode->i_mapping;
1543  	struct fuse_req *req;
1544  	pgoff_t index;
1545  	loff_t file_size;
1546  	unsigned int num;
1547  	unsigned int offset;
1548  	size_t total_len = 0;
1549  
1550  	req = fuse_get_req(fc);
1551  	if (IS_ERR(req))
1552  		return PTR_ERR(req);
1553  
1554  	offset = outarg->offset & ~PAGE_CACHE_MASK;
1555  
1556  	req->in.h.opcode = FUSE_NOTIFY_REPLY;
1557  	req->in.h.nodeid = outarg->nodeid;
1558  	req->in.numargs = 2;
1559  	req->in.argpages = 1;
1560  	req->page_offset = offset;
1561  	req->end = fuse_retrieve_end;
1562  
1563  	index = outarg->offset >> PAGE_CACHE_SHIFT;
1564  	file_size = i_size_read(inode);
1565  	num = outarg->size;
1566  	if (outarg->offset > file_size)
1567  		num = 0;
1568  	else if (outarg->offset + num > file_size)
1569  		num = file_size - outarg->offset;
1570  
1571  	while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
1572  		struct page *page;
1573  		unsigned int this_num;
1574  
1575  		page = find_get_page(mapping, index);
1576  		if (!page)
1577  			break;
1578  
1579  		this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1580  		req->pages[req->num_pages] = page;
1581  		req->num_pages++;
1582  
1583  		offset = 0;
1584  		num -= this_num;
1585  		total_len += this_num;
1586  		index++;
1587  	}
1588  	req->misc.retrieve_in.offset = outarg->offset;
1589  	req->misc.retrieve_in.size = total_len;
1590  	req->in.args[0].size = sizeof(req->misc.retrieve_in);
1591  	req->in.args[0].value = &req->misc.retrieve_in;
1592  	req->in.args[1].size = total_len;
1593  
1594  	err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1595  	if (err)
1596  		fuse_retrieve_end(fc, req);
1597  
1598  	return err;
1599  }
1600  
fuse_notify_retrieve(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1601  static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1602  				struct fuse_copy_state *cs)
1603  {
1604  	struct fuse_notify_retrieve_out outarg;
1605  	struct inode *inode;
1606  	int err;
1607  
1608  	err = -EINVAL;
1609  	if (size != sizeof(outarg))
1610  		goto copy_finish;
1611  
1612  	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1613  	if (err)
1614  		goto copy_finish;
1615  
1616  	fuse_copy_finish(cs);
1617  
1618  	down_read(&fc->killsb);
1619  	err = -ENOENT;
1620  	if (fc->sb) {
1621  		u64 nodeid = outarg.nodeid;
1622  
1623  		inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1624  		if (inode) {
1625  			err = fuse_retrieve(fc, inode, &outarg);
1626  			iput(inode);
1627  		}
1628  	}
1629  	up_read(&fc->killsb);
1630  
1631  	return err;
1632  
1633  copy_finish:
1634  	fuse_copy_finish(cs);
1635  	return err;
1636  }
1637  
fuse_notify(struct fuse_conn * fc,enum fuse_notify_code code,unsigned int size,struct fuse_copy_state * cs)1638  static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1639  		       unsigned int size, struct fuse_copy_state *cs)
1640  {
1641  	switch (code) {
1642  	case FUSE_NOTIFY_POLL:
1643  		return fuse_notify_poll(fc, size, cs);
1644  
1645  	case FUSE_NOTIFY_INVAL_INODE:
1646  		return fuse_notify_inval_inode(fc, size, cs);
1647  
1648  	case FUSE_NOTIFY_INVAL_ENTRY:
1649  		return fuse_notify_inval_entry(fc, size, cs);
1650  
1651  	case FUSE_NOTIFY_STORE:
1652  		return fuse_notify_store(fc, size, cs);
1653  
1654  	case FUSE_NOTIFY_RETRIEVE:
1655  		return fuse_notify_retrieve(fc, size, cs);
1656  
1657  	case FUSE_NOTIFY_DELETE:
1658  		return fuse_notify_delete(fc, size, cs);
1659  
1660  	default:
1661  		fuse_copy_finish(cs);
1662  		return -EINVAL;
1663  	}
1664  }
1665  
1666  /* Look up request on processing list by unique ID */
request_find(struct fuse_conn * fc,u64 unique)1667  static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1668  {
1669  	struct list_head *entry;
1670  
1671  	list_for_each(entry, &fc->processing) {
1672  		struct fuse_req *req;
1673  		req = list_entry(entry, struct fuse_req, list);
1674  		if (req->in.h.unique == unique || req->intr_unique == unique)
1675  			return req;
1676  	}
1677  	return NULL;
1678  }
1679  
copy_out_args(struct fuse_copy_state * cs,struct fuse_out * out,unsigned nbytes)1680  static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1681  			 unsigned nbytes)
1682  {
1683  	unsigned reqsize = sizeof(struct fuse_out_header);
1684  
1685  	if (out->h.error)
1686  		return nbytes != reqsize ? -EINVAL : 0;
1687  
1688  	reqsize += len_args(out->numargs, out->args);
1689  
1690  	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1691  		return -EINVAL;
1692  	else if (reqsize > nbytes) {
1693  		struct fuse_arg *lastarg = &out->args[out->numargs-1];
1694  		unsigned diffsize = reqsize - nbytes;
1695  		if (diffsize > lastarg->size)
1696  			return -EINVAL;
1697  		lastarg->size -= diffsize;
1698  	}
1699  	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1700  			      out->page_zeroing);
1701  }
1702  
1703  /*
1704   * Write a single reply to a request.  First the header is copied from
1705   * the write buffer.  The request is then searched on the processing
1706   * list by the unique ID found in the header.  If found, then remove
1707   * it from the list and copy the rest of the buffer to the request.
1708   * The request is finished by calling request_end()
1709   */
fuse_dev_do_write(struct fuse_conn * fc,struct fuse_copy_state * cs,size_t nbytes)1710  static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1711  				 struct fuse_copy_state *cs, size_t nbytes)
1712  {
1713  	int err;
1714  	struct fuse_req *req;
1715  	struct fuse_out_header oh;
1716  
1717  	if (nbytes < sizeof(struct fuse_out_header))
1718  		return -EINVAL;
1719  
1720  	err = fuse_copy_one(cs, &oh, sizeof(oh));
1721  	if (err)
1722  		goto err_finish;
1723  
1724  	err = -EINVAL;
1725  	if (oh.len != nbytes)
1726  		goto err_finish;
1727  
1728  	/*
1729  	 * Zero oh.unique indicates unsolicited notification message
1730  	 * and error contains notification code.
1731  	 */
1732  	if (!oh.unique) {
1733  		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1734  		return err ? err : nbytes;
1735  	}
1736  
1737  	err = -EINVAL;
1738  	if (oh.error <= -1000 || oh.error > 0)
1739  		goto err_finish;
1740  
1741  	spin_lock(&fc->lock);
1742  	err = -ENOENT;
1743  	if (!fc->connected)
1744  		goto err_unlock;
1745  
1746  	req = request_find(fc, oh.unique);
1747  	if (!req)
1748  		goto err_unlock;
1749  
1750  	if (req->aborted) {
1751  		spin_unlock(&fc->lock);
1752  		fuse_copy_finish(cs);
1753  		spin_lock(&fc->lock);
1754  		request_end(fc, req);
1755  		return -ENOENT;
1756  	}
1757  	/* Is it an interrupt reply? */
1758  	if (req->intr_unique == oh.unique) {
1759  		err = -EINVAL;
1760  		if (nbytes != sizeof(struct fuse_out_header))
1761  			goto err_unlock;
1762  
1763  		if (oh.error == -ENOSYS)
1764  			fc->no_interrupt = 1;
1765  		else if (oh.error == -EAGAIN)
1766  			queue_interrupt(fc, req);
1767  
1768  		spin_unlock(&fc->lock);
1769  		fuse_copy_finish(cs);
1770  		return nbytes;
1771  	}
1772  
1773  	req->state = FUSE_REQ_WRITING;
1774  	list_move(&req->list, &fc->io);
1775  	req->out.h = oh;
1776  	req->locked = 1;
1777  	cs->req = req;
1778  	if (!req->out.page_replace)
1779  		cs->move_pages = 0;
1780  	spin_unlock(&fc->lock);
1781  
1782  	err = copy_out_args(cs, &req->out, nbytes);
1783  	fuse_copy_finish(cs);
1784  
1785  	spin_lock(&fc->lock);
1786  	req->locked = 0;
1787  	if (!err) {
1788  		if (req->aborted)
1789  			err = -ENOENT;
1790  	} else if (!req->aborted)
1791  		req->out.h.error = -EIO;
1792  	request_end(fc, req);
1793  
1794  	return err ? err : nbytes;
1795  
1796   err_unlock:
1797  	spin_unlock(&fc->lock);
1798   err_finish:
1799  	fuse_copy_finish(cs);
1800  	return err;
1801  }
1802  
fuse_dev_write(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)1803  static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1804  			      unsigned long nr_segs, loff_t pos)
1805  {
1806  	struct fuse_copy_state cs;
1807  	struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1808  	if (!fc)
1809  		return -EPERM;
1810  
1811  	fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1812  
1813  	return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1814  }
1815  
fuse_dev_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)1816  static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1817  				     struct file *out, loff_t *ppos,
1818  				     size_t len, unsigned int flags)
1819  {
1820  	unsigned nbuf;
1821  	unsigned idx;
1822  	struct pipe_buffer *bufs;
1823  	struct fuse_copy_state cs;
1824  	struct fuse_conn *fc;
1825  	size_t rem;
1826  	ssize_t ret;
1827  
1828  	fc = fuse_get_conn(out);
1829  	if (!fc)
1830  		return -EPERM;
1831  
1832  	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1833  	if (!bufs)
1834  		return -ENOMEM;
1835  
1836  	pipe_lock(pipe);
1837  	nbuf = 0;
1838  	rem = 0;
1839  	for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1840  		rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1841  
1842  	ret = -EINVAL;
1843  	if (rem < len) {
1844  		pipe_unlock(pipe);
1845  		goto out;
1846  	}
1847  
1848  	rem = len;
1849  	while (rem) {
1850  		struct pipe_buffer *ibuf;
1851  		struct pipe_buffer *obuf;
1852  
1853  		BUG_ON(nbuf >= pipe->buffers);
1854  		BUG_ON(!pipe->nrbufs);
1855  		ibuf = &pipe->bufs[pipe->curbuf];
1856  		obuf = &bufs[nbuf];
1857  
1858  		if (rem >= ibuf->len) {
1859  			*obuf = *ibuf;
1860  			ibuf->ops = NULL;
1861  			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1862  			pipe->nrbufs--;
1863  		} else {
1864  			ibuf->ops->get(pipe, ibuf);
1865  			*obuf = *ibuf;
1866  			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1867  			obuf->len = rem;
1868  			ibuf->offset += obuf->len;
1869  			ibuf->len -= obuf->len;
1870  		}
1871  		nbuf++;
1872  		rem -= obuf->len;
1873  	}
1874  	pipe_unlock(pipe);
1875  
1876  	fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1877  	cs.pipebufs = bufs;
1878  	cs.pipe = pipe;
1879  
1880  	if (flags & SPLICE_F_MOVE)
1881  		cs.move_pages = 1;
1882  
1883  	ret = fuse_dev_do_write(fc, &cs, len);
1884  
1885  	for (idx = 0; idx < nbuf; idx++) {
1886  		struct pipe_buffer *buf = &bufs[idx];
1887  		buf->ops->release(pipe, buf);
1888  	}
1889  out:
1890  	kfree(bufs);
1891  	return ret;
1892  }
1893  
fuse_dev_poll(struct file * file,poll_table * wait)1894  static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1895  {
1896  	unsigned mask = POLLOUT | POLLWRNORM;
1897  	struct fuse_conn *fc = fuse_get_conn(file);
1898  	if (!fc)
1899  		return POLLERR;
1900  
1901  	poll_wait(file, &fc->waitq, wait);
1902  
1903  	spin_lock(&fc->lock);
1904  	if (!fc->connected)
1905  		mask = POLLERR;
1906  	else if (request_pending(fc))
1907  		mask |= POLLIN | POLLRDNORM;
1908  	spin_unlock(&fc->lock);
1909  
1910  	return mask;
1911  }
1912  
1913  /*
1914   * Abort all requests on the given list (pending or processing)
1915   *
1916   * This function releases and reacquires fc->lock
1917   */
end_requests(struct fuse_conn * fc,struct list_head * head)1918  static void end_requests(struct fuse_conn *fc, struct list_head *head)
1919  __releases(fc->lock)
1920  __acquires(fc->lock)
1921  {
1922  	while (!list_empty(head)) {
1923  		struct fuse_req *req;
1924  		req = list_entry(head->next, struct fuse_req, list);
1925  		req->out.h.error = -ECONNABORTED;
1926  		request_end(fc, req);
1927  		spin_lock(&fc->lock);
1928  	}
1929  }
1930  
1931  /*
1932   * Abort requests under I/O
1933   *
1934   * The requests are set to aborted and finished, and the request
1935   * waiter is woken up.  This will make request_wait_answer() wait
1936   * until the request is unlocked and then return.
1937   *
1938   * If the request is asynchronous, then the end function needs to be
1939   * called after waiting for the request to be unlocked (if it was
1940   * locked).
1941   */
end_io_requests(struct fuse_conn * fc)1942  static void end_io_requests(struct fuse_conn *fc)
1943  __releases(fc->lock)
1944  __acquires(fc->lock)
1945  {
1946  	while (!list_empty(&fc->io)) {
1947  		struct fuse_req *req =
1948  			list_entry(fc->io.next, struct fuse_req, list);
1949  		void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1950  
1951  		req->aborted = 1;
1952  		req->out.h.error = -ECONNABORTED;
1953  		req->state = FUSE_REQ_FINISHED;
1954  		list_del_init(&req->list);
1955  		wake_up(&req->waitq);
1956  		if (end) {
1957  			req->end = NULL;
1958  			__fuse_get_request(req);
1959  			spin_unlock(&fc->lock);
1960  			wait_event(req->waitq, !req->locked);
1961  			end(fc, req);
1962  			fuse_put_request(fc, req);
1963  			spin_lock(&fc->lock);
1964  		}
1965  	}
1966  }
1967  
end_queued_requests(struct fuse_conn * fc)1968  static void end_queued_requests(struct fuse_conn *fc)
1969  __releases(fc->lock)
1970  __acquires(fc->lock)
1971  {
1972  	fc->max_background = UINT_MAX;
1973  	flush_bg_queue(fc);
1974  	end_requests(fc, &fc->pending);
1975  	end_requests(fc, &fc->processing);
1976  	while (forget_pending(fc))
1977  		kfree(dequeue_forget(fc, 1, NULL));
1978  }
1979  
end_polls(struct fuse_conn * fc)1980  static void end_polls(struct fuse_conn *fc)
1981  {
1982  	struct rb_node *p;
1983  
1984  	p = rb_first(&fc->polled_files);
1985  
1986  	while (p) {
1987  		struct fuse_file *ff;
1988  		ff = rb_entry(p, struct fuse_file, polled_node);
1989  		wake_up_interruptible_all(&ff->poll_wait);
1990  
1991  		p = rb_next(p);
1992  	}
1993  }
1994  
1995  /*
1996   * Abort all requests.
1997   *
1998   * Emergency exit in case of a malicious or accidental deadlock, or
1999   * just a hung filesystem.
2000   *
2001   * The same effect is usually achievable through killing the
2002   * filesystem daemon and all users of the filesystem.  The exception
2003   * is the combination of an asynchronous request and the tricky
2004   * deadlock (see Documentation/filesystems/fuse.txt).
2005   *
2006   * During the aborting, progression of requests from the pending and
2007   * processing lists onto the io list, and progression of new requests
2008   * onto the pending list is prevented by req->connected being false.
2009   *
2010   * Progression of requests under I/O to the processing list is
2011   * prevented by the req->aborted flag being true for these requests.
2012   * For this reason requests on the io list must be aborted first.
2013   */
fuse_abort_conn(struct fuse_conn * fc)2014  void fuse_abort_conn(struct fuse_conn *fc)
2015  {
2016  	spin_lock(&fc->lock);
2017  	if (fc->connected) {
2018  		fc->connected = 0;
2019  		fc->blocked = 0;
2020  		end_io_requests(fc);
2021  		end_queued_requests(fc);
2022  		end_polls(fc);
2023  		wake_up_all(&fc->waitq);
2024  		wake_up_all(&fc->blocked_waitq);
2025  		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
2026  	}
2027  	spin_unlock(&fc->lock);
2028  }
2029  EXPORT_SYMBOL_GPL(fuse_abort_conn);
2030  
fuse_dev_release(struct inode * inode,struct file * file)2031  int fuse_dev_release(struct inode *inode, struct file *file)
2032  {
2033  	struct fuse_conn *fc = fuse_get_conn(file);
2034  	if (fc) {
2035  		spin_lock(&fc->lock);
2036  		fc->connected = 0;
2037  		fc->blocked = 0;
2038  		end_queued_requests(fc);
2039  		end_polls(fc);
2040  		wake_up_all(&fc->blocked_waitq);
2041  		spin_unlock(&fc->lock);
2042  		fuse_conn_put(fc);
2043  	}
2044  
2045  	return 0;
2046  }
2047  EXPORT_SYMBOL_GPL(fuse_dev_release);
2048  
fuse_dev_fasync(int fd,struct file * file,int on)2049  static int fuse_dev_fasync(int fd, struct file *file, int on)
2050  {
2051  	struct fuse_conn *fc = fuse_get_conn(file);
2052  	if (!fc)
2053  		return -EPERM;
2054  
2055  	/* No locking - fasync_helper does its own locking */
2056  	return fasync_helper(fd, file, on, &fc->fasync);
2057  }
2058  
2059  const struct file_operations fuse_dev_operations = {
2060  	.owner		= THIS_MODULE,
2061  	.llseek		= no_llseek,
2062  	.read		= do_sync_read,
2063  	.aio_read	= fuse_dev_read,
2064  	.splice_read	= fuse_dev_splice_read,
2065  	.write		= do_sync_write,
2066  	.aio_write	= fuse_dev_write,
2067  	.splice_write	= fuse_dev_splice_write,
2068  	.poll		= fuse_dev_poll,
2069  	.release	= fuse_dev_release,
2070  	.fasync		= fuse_dev_fasync,
2071  };
2072  EXPORT_SYMBOL_GPL(fuse_dev_operations);
2073  
2074  static struct miscdevice fuse_miscdevice = {
2075  	.minor = FUSE_MINOR,
2076  	.name  = "fuse",
2077  	.fops = &fuse_dev_operations,
2078  };
2079  
fuse_dev_init(void)2080  int __init fuse_dev_init(void)
2081  {
2082  	int err = -ENOMEM;
2083  	fuse_req_cachep = kmem_cache_create("fuse_request",
2084  					    sizeof(struct fuse_req),
2085  					    0, 0, NULL);
2086  	if (!fuse_req_cachep)
2087  		goto out;
2088  
2089  	err = misc_register(&fuse_miscdevice);
2090  	if (err)
2091  		goto out_cache_clean;
2092  
2093  	return 0;
2094  
2095   out_cache_clean:
2096  	kmem_cache_destroy(fuse_req_cachep);
2097   out:
2098  	return err;
2099  }
2100  
fuse_dev_cleanup(void)2101  void fuse_dev_cleanup(void)
2102  {
2103  	misc_deregister(&fuse_miscdevice);
2104  	kmem_cache_destroy(fuse_req_cachep);
2105  }
2106