• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/compat.h>
17 #include <linux/swap.h>
18 
19 static const struct file_operations fuse_direct_io_file_operations;
20 
fuse_send_open(struct fuse_conn * fc,u64 nodeid,struct file * file,int opcode,struct fuse_open_out * outargp)21 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
22 			  int opcode, struct fuse_open_out *outargp)
23 {
24 	struct fuse_open_in inarg;
25 	struct fuse_req *req;
26 	int err;
27 
28 	req = fuse_get_req(fc);
29 	if (IS_ERR(req))
30 		return PTR_ERR(req);
31 
32 	memset(&inarg, 0, sizeof(inarg));
33 	inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
34 	if (!fc->atomic_o_trunc)
35 		inarg.flags &= ~O_TRUNC;
36 	req->in.h.opcode = opcode;
37 	req->in.h.nodeid = nodeid;
38 	req->in.numargs = 1;
39 	req->in.args[0].size = sizeof(inarg);
40 	req->in.args[0].value = &inarg;
41 	req->out.numargs = 1;
42 	req->out.args[0].size = sizeof(*outargp);
43 	req->out.args[0].value = outargp;
44 	fuse_request_send(fc, req);
45 	err = req->out.h.error;
46 	fuse_put_request(fc, req);
47 
48 	return err;
49 }
50 
fuse_file_alloc(struct fuse_conn * fc)51 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
52 {
53 	struct fuse_file *ff;
54 
55 	ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
56 	if (unlikely(!ff))
57 		return NULL;
58 
59 	ff->fc = fc;
60 	ff->reserved_req = fuse_request_alloc();
61 	if (unlikely(!ff->reserved_req)) {
62 		kfree(ff);
63 		return NULL;
64 	}
65 
66 	INIT_LIST_HEAD(&ff->write_entry);
67 	atomic_set(&ff->count, 0);
68 	RB_CLEAR_NODE(&ff->polled_node);
69 	init_waitqueue_head(&ff->poll_wait);
70 
71 	spin_lock(&fc->lock);
72 	ff->kh = ++fc->khctr;
73 	spin_unlock(&fc->lock);
74 
75 	return ff;
76 }
77 
fuse_file_free(struct fuse_file * ff)78 void fuse_file_free(struct fuse_file *ff)
79 {
80 	fuse_request_free(ff->reserved_req);
81 	kfree(ff);
82 }
83 
fuse_file_get(struct fuse_file * ff)84 struct fuse_file *fuse_file_get(struct fuse_file *ff)
85 {
86 	atomic_inc(&ff->count);
87 	return ff;
88 }
89 
fuse_release_async(struct work_struct * work)90 static void fuse_release_async(struct work_struct *work)
91 {
92 	struct fuse_req *req;
93 	struct fuse_conn *fc;
94 	struct path path;
95 
96 	req = container_of(work, struct fuse_req, misc.release.work);
97 	path = req->misc.release.path;
98 	fc = get_fuse_conn(path.dentry->d_inode);
99 
100 	fuse_put_request(fc, req);
101 	path_put(&path);
102 }
103 
fuse_release_end(struct fuse_conn * fc,struct fuse_req * req)104 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
105 {
106 	if (fc->destroy_req) {
107 		/*
108 		 * If this is a fuseblk mount, then it's possible that
109 		 * releasing the path will result in releasing the
110 		 * super block and sending the DESTROY request.  If
111 		 * the server is single threaded, this would hang.
112 		 * For this reason do the path_put() in a separate
113 		 * thread.
114 		 */
115 		atomic_inc(&req->count);
116 		INIT_WORK(&req->misc.release.work, fuse_release_async);
117 		schedule_work(&req->misc.release.work);
118 	} else {
119 		path_put(&req->misc.release.path);
120 	}
121 }
122 
fuse_file_put(struct fuse_file * ff,bool sync)123 static void fuse_file_put(struct fuse_file *ff, bool sync)
124 {
125 	if (atomic_dec_and_test(&ff->count)) {
126 		struct fuse_req *req = ff->reserved_req;
127 
128 		if (sync) {
129 			fuse_request_send(ff->fc, req);
130 			path_put(&req->misc.release.path);
131 			fuse_put_request(ff->fc, req);
132 		} else {
133 			req->end = fuse_release_end;
134 			fuse_request_send_background(ff->fc, req);
135 		}
136 		kfree(ff);
137 	}
138 }
139 
fuse_do_open(struct fuse_conn * fc,u64 nodeid,struct file * file,bool isdir)140 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
141 		 bool isdir)
142 {
143 	struct fuse_open_out outarg;
144 	struct fuse_file *ff;
145 	int err;
146 	int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
147 
148 	ff = fuse_file_alloc(fc);
149 	if (!ff)
150 		return -ENOMEM;
151 
152 	err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
153 	if (err) {
154 		fuse_file_free(ff);
155 		return err;
156 	}
157 
158 	if (isdir)
159 		outarg.open_flags &= ~FOPEN_DIRECT_IO;
160 
161 	ff->fh = outarg.fh;
162 	ff->nodeid = nodeid;
163 	ff->open_flags = outarg.open_flags;
164 	file->private_data = fuse_file_get(ff);
165 
166 	return 0;
167 }
168 EXPORT_SYMBOL_GPL(fuse_do_open);
169 
fuse_finish_open(struct inode * inode,struct file * file)170 void fuse_finish_open(struct inode *inode, struct file *file)
171 {
172 	struct fuse_file *ff = file->private_data;
173 	struct fuse_conn *fc = get_fuse_conn(inode);
174 
175 	if (ff->open_flags & FOPEN_DIRECT_IO)
176 		file->f_op = &fuse_direct_io_file_operations;
177 	if (!(ff->open_flags & FOPEN_KEEP_CACHE))
178 		invalidate_inode_pages2(inode->i_mapping);
179 	if (ff->open_flags & FOPEN_NONSEEKABLE)
180 		nonseekable_open(inode, file);
181 	if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
182 		struct fuse_inode *fi = get_fuse_inode(inode);
183 
184 		spin_lock(&fc->lock);
185 		fi->attr_version = ++fc->attr_version;
186 		i_size_write(inode, 0);
187 		spin_unlock(&fc->lock);
188 		fuse_invalidate_attr(inode);
189 	}
190 }
191 
fuse_open_common(struct inode * inode,struct file * file,bool isdir)192 int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
193 {
194 	struct fuse_conn *fc = get_fuse_conn(inode);
195 	int err;
196 
197 	err = generic_file_open(inode, file);
198 	if (err)
199 		return err;
200 
201 	err = fuse_do_open(fc, get_node_id(inode), file, isdir);
202 	if (err)
203 		return err;
204 
205 	fuse_finish_open(inode, file);
206 
207 	return 0;
208 }
209 
fuse_prepare_release(struct fuse_file * ff,int flags,int opcode)210 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
211 {
212 	struct fuse_conn *fc = ff->fc;
213 	struct fuse_req *req = ff->reserved_req;
214 	struct fuse_release_in *inarg = &req->misc.release.in;
215 
216 	spin_lock(&fc->lock);
217 	list_del(&ff->write_entry);
218 	if (!RB_EMPTY_NODE(&ff->polled_node))
219 		rb_erase(&ff->polled_node, &fc->polled_files);
220 	spin_unlock(&fc->lock);
221 
222 	wake_up_interruptible_all(&ff->poll_wait);
223 
224 	inarg->fh = ff->fh;
225 	inarg->flags = flags;
226 	req->in.h.opcode = opcode;
227 	req->in.h.nodeid = ff->nodeid;
228 	req->in.numargs = 1;
229 	req->in.args[0].size = sizeof(struct fuse_release_in);
230 	req->in.args[0].value = inarg;
231 }
232 
fuse_release_common(struct file * file,int opcode)233 void fuse_release_common(struct file *file, int opcode)
234 {
235 	struct fuse_file *ff;
236 	struct fuse_req *req;
237 
238 	ff = file->private_data;
239 	if (unlikely(!ff))
240 		return;
241 
242 	req = ff->reserved_req;
243 	fuse_prepare_release(ff, file->f_flags, opcode);
244 
245 	if (ff->flock) {
246 		struct fuse_release_in *inarg = &req->misc.release.in;
247 		inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
248 		inarg->lock_owner = fuse_lock_owner_id(ff->fc,
249 						       (fl_owner_t) file);
250 	}
251 	/* Hold vfsmount and dentry until release is finished */
252 	path_get(&file->f_path);
253 	req->misc.release.path = file->f_path;
254 
255 	/*
256 	 * Normally this will send the RELEASE request, however if
257 	 * some asynchronous READ or WRITE requests are outstanding,
258 	 * the sending will be delayed.
259 	 *
260 	 * Make the release synchronous if this is a fuseblk mount,
261 	 * synchronous RELEASE is allowed (and desirable) in this case
262 	 * because the server can be trusted not to screw up.
263 	 */
264 	fuse_file_put(ff, ff->fc->destroy_req != NULL);
265 }
266 
fuse_open(struct inode * inode,struct file * file)267 static int fuse_open(struct inode *inode, struct file *file)
268 {
269 	return fuse_open_common(inode, file, false);
270 }
271 
fuse_release(struct inode * inode,struct file * file)272 static int fuse_release(struct inode *inode, struct file *file)
273 {
274 	fuse_release_common(file, FUSE_RELEASE);
275 
276 	/* return value is ignored by VFS */
277 	return 0;
278 }
279 
fuse_sync_release(struct fuse_file * ff,int flags)280 void fuse_sync_release(struct fuse_file *ff, int flags)
281 {
282 	WARN_ON(atomic_read(&ff->count) > 1);
283 	fuse_prepare_release(ff, flags, FUSE_RELEASE);
284 	ff->reserved_req->force = 1;
285 	fuse_request_send(ff->fc, ff->reserved_req);
286 	fuse_put_request(ff->fc, ff->reserved_req);
287 	kfree(ff);
288 }
289 EXPORT_SYMBOL_GPL(fuse_sync_release);
290 
291 /*
292  * Scramble the ID space with XTEA, so that the value of the files_struct
293  * pointer is not exposed to userspace.
294  */
fuse_lock_owner_id(struct fuse_conn * fc,fl_owner_t id)295 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
296 {
297 	u32 *k = fc->scramble_key;
298 	u64 v = (unsigned long) id;
299 	u32 v0 = v;
300 	u32 v1 = v >> 32;
301 	u32 sum = 0;
302 	int i;
303 
304 	for (i = 0; i < 32; i++) {
305 		v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
306 		sum += 0x9E3779B9;
307 		v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
308 	}
309 
310 	return (u64) v0 + ((u64) v1 << 32);
311 }
312 
313 /*
314  * Check if page is under writeback
315  *
316  * This is currently done by walking the list of writepage requests
317  * for the inode, which can be pretty inefficient.
318  */
fuse_page_is_writeback(struct inode * inode,pgoff_t index)319 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
320 {
321 	struct fuse_conn *fc = get_fuse_conn(inode);
322 	struct fuse_inode *fi = get_fuse_inode(inode);
323 	struct fuse_req *req;
324 	bool found = false;
325 
326 	spin_lock(&fc->lock);
327 	list_for_each_entry(req, &fi->writepages, writepages_entry) {
328 		pgoff_t curr_index;
329 
330 		BUG_ON(req->inode != inode);
331 		curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
332 		if (curr_index == index) {
333 			found = true;
334 			break;
335 		}
336 	}
337 	spin_unlock(&fc->lock);
338 
339 	return found;
340 }
341 
342 /*
343  * Wait for page writeback to be completed.
344  *
345  * Since fuse doesn't rely on the VM writeback tracking, this has to
346  * use some other means.
347  */
fuse_wait_on_page_writeback(struct inode * inode,pgoff_t index)348 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
349 {
350 	struct fuse_inode *fi = get_fuse_inode(inode);
351 
352 	wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
353 	return 0;
354 }
355 
fuse_flush(struct file * file,fl_owner_t id)356 static int fuse_flush(struct file *file, fl_owner_t id)
357 {
358 	struct inode *inode = file->f_path.dentry->d_inode;
359 	struct fuse_conn *fc = get_fuse_conn(inode);
360 	struct fuse_file *ff = file->private_data;
361 	struct fuse_req *req;
362 	struct fuse_flush_in inarg;
363 	int err;
364 
365 	if (is_bad_inode(inode))
366 		return -EIO;
367 
368 	if (fc->no_flush)
369 		return 0;
370 
371 	req = fuse_get_req_nofail(fc, file);
372 	memset(&inarg, 0, sizeof(inarg));
373 	inarg.fh = ff->fh;
374 	inarg.lock_owner = fuse_lock_owner_id(fc, id);
375 	req->in.h.opcode = FUSE_FLUSH;
376 	req->in.h.nodeid = get_node_id(inode);
377 	req->in.numargs = 1;
378 	req->in.args[0].size = sizeof(inarg);
379 	req->in.args[0].value = &inarg;
380 	req->force = 1;
381 	fuse_request_send(fc, req);
382 	err = req->out.h.error;
383 	fuse_put_request(fc, req);
384 	if (err == -ENOSYS) {
385 		fc->no_flush = 1;
386 		err = 0;
387 	}
388 	return err;
389 }
390 
391 /*
392  * Wait for all pending writepages on the inode to finish.
393  *
394  * This is currently done by blocking further writes with FUSE_NOWRITE
395  * and waiting for all sent writes to complete.
396  *
397  * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
398  * could conflict with truncation.
399  */
fuse_sync_writes(struct inode * inode)400 static void fuse_sync_writes(struct inode *inode)
401 {
402 	fuse_set_nowrite(inode);
403 	fuse_release_nowrite(inode);
404 }
405 
fuse_fsync_common(struct file * file,loff_t start,loff_t end,int datasync,int isdir)406 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
407 		      int datasync, int isdir)
408 {
409 	struct inode *inode = file->f_mapping->host;
410 	struct fuse_conn *fc = get_fuse_conn(inode);
411 	struct fuse_file *ff = file->private_data;
412 	struct fuse_req *req;
413 	struct fuse_fsync_in inarg;
414 	int err;
415 
416 	if (is_bad_inode(inode))
417 		return -EIO;
418 
419 	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
420 	if (err)
421 		return err;
422 
423 	if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
424 		return 0;
425 
426 	mutex_lock(&inode->i_mutex);
427 
428 	/*
429 	 * Start writeback against all dirty pages of the inode, then
430 	 * wait for all outstanding writes, before sending the FSYNC
431 	 * request.
432 	 */
433 	err = write_inode_now(inode, 0);
434 	if (err)
435 		goto out;
436 
437 	fuse_sync_writes(inode);
438 
439 	req = fuse_get_req(fc);
440 	if (IS_ERR(req)) {
441 		err = PTR_ERR(req);
442 		goto out;
443 	}
444 
445 	memset(&inarg, 0, sizeof(inarg));
446 	inarg.fh = ff->fh;
447 	inarg.fsync_flags = datasync ? 1 : 0;
448 	req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
449 	req->in.h.nodeid = get_node_id(inode);
450 	req->in.numargs = 1;
451 	req->in.args[0].size = sizeof(inarg);
452 	req->in.args[0].value = &inarg;
453 	fuse_request_send(fc, req);
454 	err = req->out.h.error;
455 	fuse_put_request(fc, req);
456 	if (err == -ENOSYS) {
457 		if (isdir)
458 			fc->no_fsyncdir = 1;
459 		else
460 			fc->no_fsync = 1;
461 		err = 0;
462 	}
463 out:
464 	mutex_unlock(&inode->i_mutex);
465 	return err;
466 }
467 
fuse_fsync(struct file * file,loff_t start,loff_t end,int datasync)468 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
469 		      int datasync)
470 {
471 	return fuse_fsync_common(file, start, end, datasync, 0);
472 }
473 
fuse_read_fill(struct fuse_req * req,struct file * file,loff_t pos,size_t count,int opcode)474 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
475 		    size_t count, int opcode)
476 {
477 	struct fuse_read_in *inarg = &req->misc.read.in;
478 	struct fuse_file *ff = file->private_data;
479 
480 	inarg->fh = ff->fh;
481 	inarg->offset = pos;
482 	inarg->size = count;
483 	inarg->flags = file->f_flags;
484 	req->in.h.opcode = opcode;
485 	req->in.h.nodeid = ff->nodeid;
486 	req->in.numargs = 1;
487 	req->in.args[0].size = sizeof(struct fuse_read_in);
488 	req->in.args[0].value = inarg;
489 	req->out.argvar = 1;
490 	req->out.numargs = 1;
491 	req->out.args[0].size = count;
492 }
493 
fuse_send_read(struct fuse_req * req,struct file * file,loff_t pos,size_t count,fl_owner_t owner)494 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
495 			     loff_t pos, size_t count, fl_owner_t owner)
496 {
497 	struct fuse_file *ff = file->private_data;
498 	struct fuse_conn *fc = ff->fc;
499 
500 	fuse_read_fill(req, file, pos, count, FUSE_READ);
501 	if (owner != NULL) {
502 		struct fuse_read_in *inarg = &req->misc.read.in;
503 
504 		inarg->read_flags |= FUSE_READ_LOCKOWNER;
505 		inarg->lock_owner = fuse_lock_owner_id(fc, owner);
506 	}
507 	fuse_request_send(fc, req);
508 	return req->out.args[0].size;
509 }
510 
fuse_read_update_size(struct inode * inode,loff_t size,u64 attr_ver)511 static void fuse_read_update_size(struct inode *inode, loff_t size,
512 				  u64 attr_ver)
513 {
514 	struct fuse_conn *fc = get_fuse_conn(inode);
515 	struct fuse_inode *fi = get_fuse_inode(inode);
516 
517 	spin_lock(&fc->lock);
518 	if (attr_ver == fi->attr_version && size < inode->i_size) {
519 		fi->attr_version = ++fc->attr_version;
520 		i_size_write(inode, size);
521 	}
522 	spin_unlock(&fc->lock);
523 }
524 
fuse_readpage(struct file * file,struct page * page)525 static int fuse_readpage(struct file *file, struct page *page)
526 {
527 	struct inode *inode = page->mapping->host;
528 	struct fuse_conn *fc = get_fuse_conn(inode);
529 	struct fuse_req *req;
530 	size_t num_read;
531 	loff_t pos = page_offset(page);
532 	size_t count = PAGE_CACHE_SIZE;
533 	u64 attr_ver;
534 	int err;
535 
536 	err = -EIO;
537 	if (is_bad_inode(inode))
538 		goto out;
539 
540 	/*
541 	 * Page writeback can extend beyond the lifetime of the
542 	 * page-cache page, so make sure we read a properly synced
543 	 * page.
544 	 */
545 	fuse_wait_on_page_writeback(inode, page->index);
546 
547 	req = fuse_get_req(fc);
548 	err = PTR_ERR(req);
549 	if (IS_ERR(req))
550 		goto out;
551 
552 	attr_ver = fuse_get_attr_version(fc);
553 
554 	req->out.page_zeroing = 1;
555 	req->out.argpages = 1;
556 	req->num_pages = 1;
557 	req->pages[0] = page;
558 	num_read = fuse_send_read(req, file, pos, count, NULL);
559 	err = req->out.h.error;
560 	fuse_put_request(fc, req);
561 
562 	if (!err) {
563 		/*
564 		 * Short read means EOF.  If file size is larger, truncate it
565 		 */
566 		if (num_read < count)
567 			fuse_read_update_size(inode, pos + num_read, attr_ver);
568 
569 		SetPageUptodate(page);
570 	}
571 
572 	fuse_invalidate_attr(inode); /* atime changed */
573  out:
574 	unlock_page(page);
575 	return err;
576 }
577 
fuse_readpages_end(struct fuse_conn * fc,struct fuse_req * req)578 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
579 {
580 	int i;
581 	size_t count = req->misc.read.in.size;
582 	size_t num_read = req->out.args[0].size;
583 	struct address_space *mapping = NULL;
584 
585 	for (i = 0; mapping == NULL && i < req->num_pages; i++)
586 		mapping = req->pages[i]->mapping;
587 
588 	if (mapping) {
589 		struct inode *inode = mapping->host;
590 
591 		/*
592 		 * Short read means EOF. If file size is larger, truncate it
593 		 */
594 		if (!req->out.h.error && num_read < count) {
595 			loff_t pos;
596 
597 			pos = page_offset(req->pages[0]) + num_read;
598 			fuse_read_update_size(inode, pos,
599 					      req->misc.read.attr_ver);
600 		}
601 		fuse_invalidate_attr(inode); /* atime changed */
602 	}
603 
604 	for (i = 0; i < req->num_pages; i++) {
605 		struct page *page = req->pages[i];
606 		if (!req->out.h.error)
607 			SetPageUptodate(page);
608 		else
609 			SetPageError(page);
610 		unlock_page(page);
611 		page_cache_release(page);
612 	}
613 	if (req->ff)
614 		fuse_file_put(req->ff, false);
615 }
616 
fuse_send_readpages(struct fuse_req * req,struct file * file)617 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
618 {
619 	struct fuse_file *ff = file->private_data;
620 	struct fuse_conn *fc = ff->fc;
621 	loff_t pos = page_offset(req->pages[0]);
622 	size_t count = req->num_pages << PAGE_CACHE_SHIFT;
623 
624 	req->out.argpages = 1;
625 	req->out.page_zeroing = 1;
626 	req->out.page_replace = 1;
627 	fuse_read_fill(req, file, pos, count, FUSE_READ);
628 	req->misc.read.attr_ver = fuse_get_attr_version(fc);
629 	if (fc->async_read) {
630 		req->ff = fuse_file_get(ff);
631 		req->end = fuse_readpages_end;
632 		fuse_request_send_background(fc, req);
633 	} else {
634 		fuse_request_send(fc, req);
635 		fuse_readpages_end(fc, req);
636 		fuse_put_request(fc, req);
637 	}
638 }
639 
640 struct fuse_fill_data {
641 	struct fuse_req *req;
642 	struct file *file;
643 	struct inode *inode;
644 };
645 
fuse_readpages_fill(void * _data,struct page * page)646 static int fuse_readpages_fill(void *_data, struct page *page)
647 {
648 	struct fuse_fill_data *data = _data;
649 	struct fuse_req *req = data->req;
650 	struct inode *inode = data->inode;
651 	struct fuse_conn *fc = get_fuse_conn(inode);
652 
653 	fuse_wait_on_page_writeback(inode, page->index);
654 
655 	if (req->num_pages &&
656 	    (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
657 	     (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
658 	     req->pages[req->num_pages - 1]->index + 1 != page->index)) {
659 		fuse_send_readpages(req, data->file);
660 		data->req = req = fuse_get_req(fc);
661 		if (IS_ERR(req)) {
662 			unlock_page(page);
663 			return PTR_ERR(req);
664 		}
665 	}
666 	page_cache_get(page);
667 	req->pages[req->num_pages] = page;
668 	req->num_pages++;
669 	return 0;
670 }
671 
fuse_readpages(struct file * file,struct address_space * mapping,struct list_head * pages,unsigned nr_pages)672 static int fuse_readpages(struct file *file, struct address_space *mapping,
673 			  struct list_head *pages, unsigned nr_pages)
674 {
675 	struct inode *inode = mapping->host;
676 	struct fuse_conn *fc = get_fuse_conn(inode);
677 	struct fuse_fill_data data;
678 	int err;
679 
680 	err = -EIO;
681 	if (is_bad_inode(inode))
682 		goto out;
683 
684 	data.file = file;
685 	data.inode = inode;
686 	data.req = fuse_get_req(fc);
687 	err = PTR_ERR(data.req);
688 	if (IS_ERR(data.req))
689 		goto out;
690 
691 	err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
692 	if (!err) {
693 		if (data.req->num_pages)
694 			fuse_send_readpages(data.req, file);
695 		else
696 			fuse_put_request(fc, data.req);
697 	}
698 out:
699 	return err;
700 }
701 
fuse_file_aio_read(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)702 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
703 				  unsigned long nr_segs, loff_t pos)
704 {
705 	struct inode *inode = iocb->ki_filp->f_mapping->host;
706 
707 	if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
708 		int err;
709 		/*
710 		 * If trying to read past EOF, make sure the i_size
711 		 * attribute is up-to-date.
712 		 */
713 		err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
714 		if (err)
715 			return err;
716 	}
717 
718 	return generic_file_aio_read(iocb, iov, nr_segs, pos);
719 }
720 
fuse_write_fill(struct fuse_req * req,struct fuse_file * ff,loff_t pos,size_t count)721 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
722 			    loff_t pos, size_t count)
723 {
724 	struct fuse_write_in *inarg = &req->misc.write.in;
725 	struct fuse_write_out *outarg = &req->misc.write.out;
726 
727 	inarg->fh = ff->fh;
728 	inarg->offset = pos;
729 	inarg->size = count;
730 	req->in.h.opcode = FUSE_WRITE;
731 	req->in.h.nodeid = ff->nodeid;
732 	req->in.numargs = 2;
733 	if (ff->fc->minor < 9)
734 		req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
735 	else
736 		req->in.args[0].size = sizeof(struct fuse_write_in);
737 	req->in.args[0].value = inarg;
738 	req->in.args[1].size = count;
739 	req->out.numargs = 1;
740 	req->out.args[0].size = sizeof(struct fuse_write_out);
741 	req->out.args[0].value = outarg;
742 }
743 
fuse_send_write(struct fuse_req * req,struct file * file,loff_t pos,size_t count,fl_owner_t owner)744 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
745 			      loff_t pos, size_t count, fl_owner_t owner)
746 {
747 	struct fuse_file *ff = file->private_data;
748 	struct fuse_conn *fc = ff->fc;
749 	struct fuse_write_in *inarg = &req->misc.write.in;
750 
751 	fuse_write_fill(req, ff, pos, count);
752 	inarg->flags = file->f_flags;
753 	if (owner != NULL) {
754 		inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
755 		inarg->lock_owner = fuse_lock_owner_id(fc, owner);
756 	}
757 	fuse_request_send(fc, req);
758 	return req->misc.write.out.size;
759 }
760 
fuse_write_update_size(struct inode * inode,loff_t pos)761 void fuse_write_update_size(struct inode *inode, loff_t pos)
762 {
763 	struct fuse_conn *fc = get_fuse_conn(inode);
764 	struct fuse_inode *fi = get_fuse_inode(inode);
765 
766 	spin_lock(&fc->lock);
767 	fi->attr_version = ++fc->attr_version;
768 	if (pos > inode->i_size)
769 		i_size_write(inode, pos);
770 	spin_unlock(&fc->lock);
771 }
772 
fuse_send_write_pages(struct fuse_req * req,struct file * file,struct inode * inode,loff_t pos,size_t count)773 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
774 				    struct inode *inode, loff_t pos,
775 				    size_t count)
776 {
777 	size_t res;
778 	unsigned offset;
779 	unsigned i;
780 
781 	for (i = 0; i < req->num_pages; i++)
782 		fuse_wait_on_page_writeback(inode, req->pages[i]->index);
783 
784 	res = fuse_send_write(req, file, pos, count, NULL);
785 
786 	offset = req->page_offset;
787 	count = res;
788 	for (i = 0; i < req->num_pages; i++) {
789 		struct page *page = req->pages[i];
790 
791 		if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
792 			SetPageUptodate(page);
793 
794 		if (count > PAGE_CACHE_SIZE - offset)
795 			count -= PAGE_CACHE_SIZE - offset;
796 		else
797 			count = 0;
798 		offset = 0;
799 
800 		unlock_page(page);
801 		page_cache_release(page);
802 	}
803 
804 	return res;
805 }
806 
fuse_fill_write_pages(struct fuse_req * req,struct address_space * mapping,struct iov_iter * ii,loff_t pos)807 static ssize_t fuse_fill_write_pages(struct fuse_req *req,
808 			       struct address_space *mapping,
809 			       struct iov_iter *ii, loff_t pos)
810 {
811 	struct fuse_conn *fc = get_fuse_conn(mapping->host);
812 	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
813 	size_t count = 0;
814 	int err;
815 
816 	req->in.argpages = 1;
817 	req->page_offset = offset;
818 
819 	do {
820 		size_t tmp;
821 		struct page *page;
822 		pgoff_t index = pos >> PAGE_CACHE_SHIFT;
823 		size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
824 				     iov_iter_count(ii));
825 
826 		bytes = min_t(size_t, bytes, fc->max_write - count);
827 
828  again:
829 		err = -EFAULT;
830 		if (iov_iter_fault_in_readable(ii, bytes))
831 			break;
832 
833 		err = -ENOMEM;
834 		page = grab_cache_page_write_begin(mapping, index, 0);
835 		if (!page)
836 			break;
837 
838 		if (mapping_writably_mapped(mapping))
839 			flush_dcache_page(page);
840 
841 		pagefault_disable();
842 		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
843 		pagefault_enable();
844 		flush_dcache_page(page);
845 
846 		mark_page_accessed(page);
847 
848 		if (!tmp) {
849 			unlock_page(page);
850 			page_cache_release(page);
851 			bytes = min(bytes, iov_iter_single_seg_count(ii));
852 			goto again;
853 		}
854 
855 		err = 0;
856 		req->pages[req->num_pages] = page;
857 		req->num_pages++;
858 
859 		iov_iter_advance(ii, tmp);
860 		count += tmp;
861 		pos += tmp;
862 		offset += tmp;
863 		if (offset == PAGE_CACHE_SIZE)
864 			offset = 0;
865 
866 		if (!fc->big_writes)
867 			break;
868 	} while (iov_iter_count(ii) && count < fc->max_write &&
869 		 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
870 
871 	return count > 0 ? count : err;
872 }
873 
fuse_perform_write(struct file * file,struct address_space * mapping,struct iov_iter * ii,loff_t pos)874 static ssize_t fuse_perform_write(struct file *file,
875 				  struct address_space *mapping,
876 				  struct iov_iter *ii, loff_t pos)
877 {
878 	struct inode *inode = mapping->host;
879 	struct fuse_conn *fc = get_fuse_conn(inode);
880 	int err = 0;
881 	ssize_t res = 0;
882 
883 	if (is_bad_inode(inode))
884 		return -EIO;
885 
886 	do {
887 		struct fuse_req *req;
888 		ssize_t count;
889 
890 		req = fuse_get_req(fc);
891 		if (IS_ERR(req)) {
892 			err = PTR_ERR(req);
893 			break;
894 		}
895 
896 		count = fuse_fill_write_pages(req, mapping, ii, pos);
897 		if (count <= 0) {
898 			err = count;
899 		} else {
900 			size_t num_written;
901 
902 			num_written = fuse_send_write_pages(req, file, inode,
903 							    pos, count);
904 			err = req->out.h.error;
905 			if (!err) {
906 				res += num_written;
907 				pos += num_written;
908 
909 				/* break out of the loop on short write */
910 				if (num_written != count)
911 					err = -EIO;
912 			}
913 		}
914 		fuse_put_request(fc, req);
915 	} while (!err && iov_iter_count(ii));
916 
917 	if (res > 0)
918 		fuse_write_update_size(inode, pos);
919 
920 	fuse_invalidate_attr(inode);
921 
922 	return res > 0 ? res : err;
923 }
924 
fuse_file_aio_write(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)925 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
926 				   unsigned long nr_segs, loff_t pos)
927 {
928 	struct file *file = iocb->ki_filp;
929 	struct address_space *mapping = file->f_mapping;
930 	size_t count = 0;
931 	size_t ocount = 0;
932 	ssize_t written = 0;
933 	ssize_t written_buffered = 0;
934 	struct inode *inode = mapping->host;
935 	ssize_t err;
936 	struct iov_iter i;
937 	loff_t endbyte = 0;
938 
939 	WARN_ON(iocb->ki_pos != pos);
940 
941 	ocount = 0;
942 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
943 	if (err)
944 		return err;
945 
946 	count = ocount;
947 
948 	mutex_lock(&inode->i_mutex);
949 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
950 
951 	/* We can write back this queue in page reclaim */
952 	current->backing_dev_info = mapping->backing_dev_info;
953 
954 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
955 	if (err)
956 		goto out;
957 
958 	if (count == 0)
959 		goto out;
960 
961 	err = file_remove_suid(file);
962 	if (err)
963 		goto out;
964 
965 	err = file_update_time(file);
966 	if (err)
967 		goto out;
968 
969 	if (file->f_flags & O_DIRECT) {
970 		written = generic_file_direct_write(iocb, iov, &nr_segs,
971 						    pos, &iocb->ki_pos,
972 						    count, ocount);
973 		if (written < 0 || written == count)
974 			goto out;
975 
976 		pos += written;
977 		count -= written;
978 
979 		iov_iter_init(&i, iov, nr_segs, count, written);
980 		written_buffered = fuse_perform_write(file, mapping, &i, pos);
981 		if (written_buffered < 0) {
982 			err = written_buffered;
983 			goto out;
984 		}
985 		endbyte = pos + written_buffered - 1;
986 
987 		err = filemap_write_and_wait_range(file->f_mapping, pos,
988 						   endbyte);
989 		if (err)
990 			goto out;
991 
992 		invalidate_mapping_pages(file->f_mapping,
993 					 pos >> PAGE_CACHE_SHIFT,
994 					 endbyte >> PAGE_CACHE_SHIFT);
995 
996 		written += written_buffered;
997 		iocb->ki_pos = pos + written_buffered;
998 	} else {
999 		iov_iter_init(&i, iov, nr_segs, count, 0);
1000 		written = fuse_perform_write(file, mapping, &i, pos);
1001 		if (written >= 0)
1002 			iocb->ki_pos = pos + written;
1003 	}
1004 out:
1005 	current->backing_dev_info = NULL;
1006 	mutex_unlock(&inode->i_mutex);
1007 
1008 	return written ? written : err;
1009 }
1010 
fuse_release_user_pages(struct fuse_req * req,int write)1011 static void fuse_release_user_pages(struct fuse_req *req, int write)
1012 {
1013 	unsigned i;
1014 
1015 	for (i = 0; i < req->num_pages; i++) {
1016 		struct page *page = req->pages[i];
1017 		if (write)
1018 			set_page_dirty_lock(page);
1019 		put_page(page);
1020 	}
1021 }
1022 
fuse_get_user_pages(struct fuse_req * req,const char __user * buf,size_t * nbytesp,int write)1023 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
1024 			       size_t *nbytesp, int write)
1025 {
1026 	size_t nbytes = *nbytesp;
1027 	unsigned long user_addr = (unsigned long) buf;
1028 	unsigned offset = user_addr & ~PAGE_MASK;
1029 	int npages;
1030 
1031 	/* Special case for kernel I/O: can copy directly into the buffer */
1032 	if (segment_eq(get_fs(), KERNEL_DS)) {
1033 		if (write)
1034 			req->in.args[1].value = (void *) user_addr;
1035 		else
1036 			req->out.args[0].value = (void *) user_addr;
1037 
1038 		return 0;
1039 	}
1040 
1041 	nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
1042 	npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1043 	npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
1044 	npages = get_user_pages_fast(user_addr, npages, !write, req->pages);
1045 	if (npages < 0)
1046 		return npages;
1047 
1048 	req->num_pages = npages;
1049 	req->page_offset = offset;
1050 
1051 	if (write)
1052 		req->in.argpages = 1;
1053 	else
1054 		req->out.argpages = 1;
1055 
1056 	nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
1057 	*nbytesp = min(*nbytesp, nbytes);
1058 
1059 	return 0;
1060 }
1061 
fuse_direct_io(struct file * file,const char __user * buf,size_t count,loff_t * ppos,int write)1062 ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1063 		       size_t count, loff_t *ppos, int write)
1064 {
1065 	struct fuse_file *ff = file->private_data;
1066 	struct fuse_conn *fc = ff->fc;
1067 	size_t nmax = write ? fc->max_write : fc->max_read;
1068 	loff_t pos = *ppos;
1069 	ssize_t res = 0;
1070 	struct fuse_req *req;
1071 
1072 	req = fuse_get_req(fc);
1073 	if (IS_ERR(req))
1074 		return PTR_ERR(req);
1075 
1076 	while (count) {
1077 		size_t nres;
1078 		fl_owner_t owner = current->files;
1079 		size_t nbytes = min(count, nmax);
1080 		int err = fuse_get_user_pages(req, buf, &nbytes, write);
1081 		if (err) {
1082 			res = err;
1083 			break;
1084 		}
1085 
1086 		if (write)
1087 			nres = fuse_send_write(req, file, pos, nbytes, owner);
1088 		else
1089 			nres = fuse_send_read(req, file, pos, nbytes, owner);
1090 
1091 		fuse_release_user_pages(req, !write);
1092 		if (req->out.h.error) {
1093 			if (!res)
1094 				res = req->out.h.error;
1095 			break;
1096 		} else if (nres > nbytes) {
1097 			res = -EIO;
1098 			break;
1099 		}
1100 		count -= nres;
1101 		res += nres;
1102 		pos += nres;
1103 		buf += nres;
1104 		if (nres != nbytes)
1105 			break;
1106 		if (count) {
1107 			fuse_put_request(fc, req);
1108 			req = fuse_get_req(fc);
1109 			if (IS_ERR(req))
1110 				break;
1111 		}
1112 	}
1113 	if (!IS_ERR(req))
1114 		fuse_put_request(fc, req);
1115 	if (res > 0)
1116 		*ppos = pos;
1117 
1118 	return res;
1119 }
1120 EXPORT_SYMBOL_GPL(fuse_direct_io);
1121 
fuse_direct_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1122 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1123 				     size_t count, loff_t *ppos)
1124 {
1125 	ssize_t res;
1126 	struct inode *inode = file->f_path.dentry->d_inode;
1127 
1128 	if (is_bad_inode(inode))
1129 		return -EIO;
1130 
1131 	res = fuse_direct_io(file, buf, count, ppos, 0);
1132 
1133 	fuse_invalidate_attr(inode);
1134 
1135 	return res;
1136 }
1137 
__fuse_direct_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1138 static ssize_t __fuse_direct_write(struct file *file, const char __user *buf,
1139 				   size_t count, loff_t *ppos)
1140 {
1141 	struct inode *inode = file->f_path.dentry->d_inode;
1142 	ssize_t res;
1143 
1144 	res = generic_write_checks(file, ppos, &count, 0);
1145 	if (!res) {
1146 		res = fuse_direct_io(file, buf, count, ppos, 1);
1147 		if (res > 0)
1148 			fuse_write_update_size(inode, *ppos);
1149 	}
1150 
1151 	fuse_invalidate_attr(inode);
1152 
1153 	return res;
1154 }
1155 
fuse_direct_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1156 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1157 				 size_t count, loff_t *ppos)
1158 {
1159 	struct inode *inode = file->f_path.dentry->d_inode;
1160 	ssize_t res;
1161 
1162 	if (is_bad_inode(inode))
1163 		return -EIO;
1164 
1165 	/* Don't allow parallel writes to the same file */
1166 	mutex_lock(&inode->i_mutex);
1167 	res = __fuse_direct_write(file, buf, count, ppos);
1168 	mutex_unlock(&inode->i_mutex);
1169 
1170 	return res;
1171 }
1172 
fuse_writepage_free(struct fuse_conn * fc,struct fuse_req * req)1173 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1174 {
1175 	__free_page(req->pages[0]);
1176 	fuse_file_put(req->ff, false);
1177 }
1178 
fuse_writepage_finish(struct fuse_conn * fc,struct fuse_req * req)1179 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1180 {
1181 	struct inode *inode = req->inode;
1182 	struct fuse_inode *fi = get_fuse_inode(inode);
1183 	struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1184 
1185 	list_del(&req->writepages_entry);
1186 	dec_bdi_stat(bdi, BDI_WRITEBACK);
1187 	dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
1188 	bdi_writeout_inc(bdi);
1189 	wake_up(&fi->page_waitq);
1190 }
1191 
1192 /* Called under fc->lock, may release and reacquire it */
fuse_send_writepage(struct fuse_conn * fc,struct fuse_req * req)1193 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1194 __releases(fc->lock)
1195 __acquires(fc->lock)
1196 {
1197 	struct fuse_inode *fi = get_fuse_inode(req->inode);
1198 	loff_t size = i_size_read(req->inode);
1199 	struct fuse_write_in *inarg = &req->misc.write.in;
1200 
1201 	if (!fc->connected)
1202 		goto out_free;
1203 
1204 	if (inarg->offset + PAGE_CACHE_SIZE <= size) {
1205 		inarg->size = PAGE_CACHE_SIZE;
1206 	} else if (inarg->offset < size) {
1207 		inarg->size = size & (PAGE_CACHE_SIZE - 1);
1208 	} else {
1209 		/* Got truncated off completely */
1210 		goto out_free;
1211 	}
1212 
1213 	req->in.args[1].size = inarg->size;
1214 	fi->writectr++;
1215 	fuse_request_send_background_locked(fc, req);
1216 	return;
1217 
1218  out_free:
1219 	fuse_writepage_finish(fc, req);
1220 	spin_unlock(&fc->lock);
1221 	fuse_writepage_free(fc, req);
1222 	fuse_put_request(fc, req);
1223 	spin_lock(&fc->lock);
1224 }
1225 
1226 /*
1227  * If fi->writectr is positive (no truncate or fsync going on) send
1228  * all queued writepage requests.
1229  *
1230  * Called with fc->lock
1231  */
fuse_flush_writepages(struct inode * inode)1232 void fuse_flush_writepages(struct inode *inode)
1233 __releases(fc->lock)
1234 __acquires(fc->lock)
1235 {
1236 	struct fuse_conn *fc = get_fuse_conn(inode);
1237 	struct fuse_inode *fi = get_fuse_inode(inode);
1238 	struct fuse_req *req;
1239 
1240 	while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1241 		req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1242 		list_del_init(&req->list);
1243 		fuse_send_writepage(fc, req);
1244 	}
1245 }
1246 
fuse_writepage_end(struct fuse_conn * fc,struct fuse_req * req)1247 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1248 {
1249 	struct inode *inode = req->inode;
1250 	struct fuse_inode *fi = get_fuse_inode(inode);
1251 
1252 	mapping_set_error(inode->i_mapping, req->out.h.error);
1253 	spin_lock(&fc->lock);
1254 	fi->writectr--;
1255 	fuse_writepage_finish(fc, req);
1256 	spin_unlock(&fc->lock);
1257 	fuse_writepage_free(fc, req);
1258 }
1259 
fuse_writepage_locked(struct page * page)1260 static int fuse_writepage_locked(struct page *page)
1261 {
1262 	struct address_space *mapping = page->mapping;
1263 	struct inode *inode = mapping->host;
1264 	struct fuse_conn *fc = get_fuse_conn(inode);
1265 	struct fuse_inode *fi = get_fuse_inode(inode);
1266 	struct fuse_req *req;
1267 	struct fuse_file *ff;
1268 	struct page *tmp_page;
1269 
1270 	set_page_writeback(page);
1271 
1272 	req = fuse_request_alloc_nofs();
1273 	if (!req)
1274 		goto err;
1275 
1276 	tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1277 	if (!tmp_page)
1278 		goto err_free;
1279 
1280 	spin_lock(&fc->lock);
1281 	BUG_ON(list_empty(&fi->write_files));
1282 	ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
1283 	req->ff = fuse_file_get(ff);
1284 	spin_unlock(&fc->lock);
1285 
1286 	fuse_write_fill(req, ff, page_offset(page), 0);
1287 
1288 	copy_highpage(tmp_page, page);
1289 	req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1290 	req->in.argpages = 1;
1291 	req->num_pages = 1;
1292 	req->pages[0] = tmp_page;
1293 	req->page_offset = 0;
1294 	req->end = fuse_writepage_end;
1295 	req->inode = inode;
1296 
1297 	inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1298 	inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1299 
1300 	spin_lock(&fc->lock);
1301 	list_add(&req->writepages_entry, &fi->writepages);
1302 	list_add_tail(&req->list, &fi->queued_writes);
1303 	fuse_flush_writepages(inode);
1304 	spin_unlock(&fc->lock);
1305 
1306 	end_page_writeback(page);
1307 
1308 	return 0;
1309 
1310 err_free:
1311 	fuse_request_free(req);
1312 err:
1313 	end_page_writeback(page);
1314 	return -ENOMEM;
1315 }
1316 
fuse_writepage(struct page * page,struct writeback_control * wbc)1317 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1318 {
1319 	int err;
1320 
1321 	err = fuse_writepage_locked(page);
1322 	unlock_page(page);
1323 
1324 	return err;
1325 }
1326 
fuse_launder_page(struct page * page)1327 static int fuse_launder_page(struct page *page)
1328 {
1329 	int err = 0;
1330 	if (clear_page_dirty_for_io(page)) {
1331 		struct inode *inode = page->mapping->host;
1332 		err = fuse_writepage_locked(page);
1333 		if (!err)
1334 			fuse_wait_on_page_writeback(inode, page->index);
1335 	}
1336 	return err;
1337 }
1338 
1339 /*
1340  * Write back dirty pages now, because there may not be any suitable
1341  * open files later
1342  */
fuse_vma_close(struct vm_area_struct * vma)1343 static void fuse_vma_close(struct vm_area_struct *vma)
1344 {
1345 	filemap_write_and_wait(vma->vm_file->f_mapping);
1346 }
1347 
1348 /*
1349  * Wait for writeback against this page to complete before allowing it
1350  * to be marked dirty again, and hence written back again, possibly
1351  * before the previous writepage completed.
1352  *
1353  * Block here, instead of in ->writepage(), so that the userspace fs
1354  * can only block processes actually operating on the filesystem.
1355  *
1356  * Otherwise unprivileged userspace fs would be able to block
1357  * unrelated:
1358  *
1359  * - page migration
1360  * - sync(2)
1361  * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1362  */
fuse_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)1363 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1364 {
1365 	struct page *page = vmf->page;
1366 	/*
1367 	 * Don't use page->mapping as it may become NULL from a
1368 	 * concurrent truncate.
1369 	 */
1370 	struct inode *inode = vma->vm_file->f_mapping->host;
1371 
1372 	fuse_wait_on_page_writeback(inode, page->index);
1373 	return 0;
1374 }
1375 
1376 static const struct vm_operations_struct fuse_file_vm_ops = {
1377 	.close		= fuse_vma_close,
1378 	.fault		= filemap_fault,
1379 	.page_mkwrite	= fuse_page_mkwrite,
1380 };
1381 
fuse_file_mmap(struct file * file,struct vm_area_struct * vma)1382 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1383 {
1384 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1385 		struct inode *inode = file->f_dentry->d_inode;
1386 		struct fuse_conn *fc = get_fuse_conn(inode);
1387 		struct fuse_inode *fi = get_fuse_inode(inode);
1388 		struct fuse_file *ff = file->private_data;
1389 		/*
1390 		 * file may be written through mmap, so chain it onto the
1391 		 * inodes's write_file list
1392 		 */
1393 		spin_lock(&fc->lock);
1394 		if (list_empty(&ff->write_entry))
1395 			list_add(&ff->write_entry, &fi->write_files);
1396 		spin_unlock(&fc->lock);
1397 	}
1398 	file_accessed(file);
1399 	vma->vm_ops = &fuse_file_vm_ops;
1400 	return 0;
1401 }
1402 
fuse_direct_mmap(struct file * file,struct vm_area_struct * vma)1403 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
1404 {
1405 	/* Can't provide the coherency needed for MAP_SHARED */
1406 	if (vma->vm_flags & VM_MAYSHARE)
1407 		return -ENODEV;
1408 
1409 	invalidate_inode_pages2(file->f_mapping);
1410 
1411 	return generic_file_mmap(file, vma);
1412 }
1413 
convert_fuse_file_lock(const struct fuse_file_lock * ffl,struct file_lock * fl)1414 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1415 				  struct file_lock *fl)
1416 {
1417 	switch (ffl->type) {
1418 	case F_UNLCK:
1419 		break;
1420 
1421 	case F_RDLCK:
1422 	case F_WRLCK:
1423 		if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1424 		    ffl->end < ffl->start)
1425 			return -EIO;
1426 
1427 		fl->fl_start = ffl->start;
1428 		fl->fl_end = ffl->end;
1429 		fl->fl_pid = ffl->pid;
1430 		break;
1431 
1432 	default:
1433 		return -EIO;
1434 	}
1435 	fl->fl_type = ffl->type;
1436 	return 0;
1437 }
1438 
fuse_lk_fill(struct fuse_req * req,struct file * file,const struct file_lock * fl,int opcode,pid_t pid,int flock)1439 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1440 			 const struct file_lock *fl, int opcode, pid_t pid,
1441 			 int flock)
1442 {
1443 	struct inode *inode = file->f_path.dentry->d_inode;
1444 	struct fuse_conn *fc = get_fuse_conn(inode);
1445 	struct fuse_file *ff = file->private_data;
1446 	struct fuse_lk_in *arg = &req->misc.lk_in;
1447 
1448 	arg->fh = ff->fh;
1449 	arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
1450 	arg->lk.start = fl->fl_start;
1451 	arg->lk.end = fl->fl_end;
1452 	arg->lk.type = fl->fl_type;
1453 	arg->lk.pid = pid;
1454 	if (flock)
1455 		arg->lk_flags |= FUSE_LK_FLOCK;
1456 	req->in.h.opcode = opcode;
1457 	req->in.h.nodeid = get_node_id(inode);
1458 	req->in.numargs = 1;
1459 	req->in.args[0].size = sizeof(*arg);
1460 	req->in.args[0].value = arg;
1461 }
1462 
fuse_getlk(struct file * file,struct file_lock * fl)1463 static int fuse_getlk(struct file *file, struct file_lock *fl)
1464 {
1465 	struct inode *inode = file->f_path.dentry->d_inode;
1466 	struct fuse_conn *fc = get_fuse_conn(inode);
1467 	struct fuse_req *req;
1468 	struct fuse_lk_out outarg;
1469 	int err;
1470 
1471 	req = fuse_get_req(fc);
1472 	if (IS_ERR(req))
1473 		return PTR_ERR(req);
1474 
1475 	fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
1476 	req->out.numargs = 1;
1477 	req->out.args[0].size = sizeof(outarg);
1478 	req->out.args[0].value = &outarg;
1479 	fuse_request_send(fc, req);
1480 	err = req->out.h.error;
1481 	fuse_put_request(fc, req);
1482 	if (!err)
1483 		err = convert_fuse_file_lock(&outarg.lk, fl);
1484 
1485 	return err;
1486 }
1487 
fuse_setlk(struct file * file,struct file_lock * fl,int flock)1488 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1489 {
1490 	struct inode *inode = file->f_path.dentry->d_inode;
1491 	struct fuse_conn *fc = get_fuse_conn(inode);
1492 	struct fuse_req *req;
1493 	int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1494 	pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1495 	int err;
1496 
1497 	if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
1498 		/* NLM needs asynchronous locks, which we don't support yet */
1499 		return -ENOLCK;
1500 	}
1501 
1502 	/* Unlock on close is handled by the flush method */
1503 	if (fl->fl_flags & FL_CLOSE)
1504 		return 0;
1505 
1506 	req = fuse_get_req(fc);
1507 	if (IS_ERR(req))
1508 		return PTR_ERR(req);
1509 
1510 	fuse_lk_fill(req, file, fl, opcode, pid, flock);
1511 	fuse_request_send(fc, req);
1512 	err = req->out.h.error;
1513 	/* locking is restartable */
1514 	if (err == -EINTR)
1515 		err = -ERESTARTSYS;
1516 	fuse_put_request(fc, req);
1517 	return err;
1518 }
1519 
fuse_file_lock(struct file * file,int cmd,struct file_lock * fl)1520 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1521 {
1522 	struct inode *inode = file->f_path.dentry->d_inode;
1523 	struct fuse_conn *fc = get_fuse_conn(inode);
1524 	int err;
1525 
1526 	if (cmd == F_CANCELLK) {
1527 		err = 0;
1528 	} else if (cmd == F_GETLK) {
1529 		if (fc->no_lock) {
1530 			posix_test_lock(file, fl);
1531 			err = 0;
1532 		} else
1533 			err = fuse_getlk(file, fl);
1534 	} else {
1535 		if (fc->no_lock)
1536 			err = posix_lock_file(file, fl, NULL);
1537 		else
1538 			err = fuse_setlk(file, fl, 0);
1539 	}
1540 	return err;
1541 }
1542 
fuse_file_flock(struct file * file,int cmd,struct file_lock * fl)1543 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1544 {
1545 	struct inode *inode = file->f_path.dentry->d_inode;
1546 	struct fuse_conn *fc = get_fuse_conn(inode);
1547 	int err;
1548 
1549 	if (fc->no_flock) {
1550 		err = flock_lock_file_wait(file, fl);
1551 	} else {
1552 		struct fuse_file *ff = file->private_data;
1553 
1554 		/* emulate flock with POSIX locks */
1555 		fl->fl_owner = (fl_owner_t) file;
1556 		ff->flock = true;
1557 		err = fuse_setlk(file, fl, 1);
1558 	}
1559 
1560 	return err;
1561 }
1562 
fuse_bmap(struct address_space * mapping,sector_t block)1563 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1564 {
1565 	struct inode *inode = mapping->host;
1566 	struct fuse_conn *fc = get_fuse_conn(inode);
1567 	struct fuse_req *req;
1568 	struct fuse_bmap_in inarg;
1569 	struct fuse_bmap_out outarg;
1570 	int err;
1571 
1572 	if (!inode->i_sb->s_bdev || fc->no_bmap)
1573 		return 0;
1574 
1575 	req = fuse_get_req(fc);
1576 	if (IS_ERR(req))
1577 		return 0;
1578 
1579 	memset(&inarg, 0, sizeof(inarg));
1580 	inarg.block = block;
1581 	inarg.blocksize = inode->i_sb->s_blocksize;
1582 	req->in.h.opcode = FUSE_BMAP;
1583 	req->in.h.nodeid = get_node_id(inode);
1584 	req->in.numargs = 1;
1585 	req->in.args[0].size = sizeof(inarg);
1586 	req->in.args[0].value = &inarg;
1587 	req->out.numargs = 1;
1588 	req->out.args[0].size = sizeof(outarg);
1589 	req->out.args[0].value = &outarg;
1590 	fuse_request_send(fc, req);
1591 	err = req->out.h.error;
1592 	fuse_put_request(fc, req);
1593 	if (err == -ENOSYS)
1594 		fc->no_bmap = 1;
1595 
1596 	return err ? 0 : outarg.block;
1597 }
1598 
fuse_file_llseek(struct file * file,loff_t offset,int origin)1599 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1600 {
1601 	loff_t retval;
1602 	struct inode *inode = file->f_path.dentry->d_inode;
1603 
1604 	/* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
1605 	if (origin == SEEK_CUR || origin == SEEK_SET)
1606 		return generic_file_llseek(file, offset, origin);
1607 
1608 	mutex_lock(&inode->i_mutex);
1609 	retval = fuse_update_attributes(inode, NULL, file, NULL);
1610 	if (!retval)
1611 		retval = generic_file_llseek(file, offset, origin);
1612 	mutex_unlock(&inode->i_mutex);
1613 
1614 	return retval;
1615 }
1616 
fuse_ioctl_copy_user(struct page ** pages,struct iovec * iov,unsigned int nr_segs,size_t bytes,bool to_user)1617 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1618 			unsigned int nr_segs, size_t bytes, bool to_user)
1619 {
1620 	struct iov_iter ii;
1621 	int page_idx = 0;
1622 
1623 	if (!bytes)
1624 		return 0;
1625 
1626 	iov_iter_init(&ii, iov, nr_segs, bytes, 0);
1627 
1628 	while (iov_iter_count(&ii)) {
1629 		struct page *page = pages[page_idx++];
1630 		size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
1631 		void *kaddr;
1632 
1633 		kaddr = kmap(page);
1634 
1635 		while (todo) {
1636 			char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
1637 			size_t iov_len = ii.iov->iov_len - ii.iov_offset;
1638 			size_t copy = min(todo, iov_len);
1639 			size_t left;
1640 
1641 			if (!to_user)
1642 				left = copy_from_user(kaddr, uaddr, copy);
1643 			else
1644 				left = copy_to_user(uaddr, kaddr, copy);
1645 
1646 			if (unlikely(left))
1647 				return -EFAULT;
1648 
1649 			iov_iter_advance(&ii, copy);
1650 			todo -= copy;
1651 			kaddr += copy;
1652 		}
1653 
1654 		kunmap(page);
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 /*
1661  * CUSE servers compiled on 32bit broke on 64bit kernels because the
1662  * ABI was defined to be 'struct iovec' which is different on 32bit
1663  * and 64bit.  Fortunately we can determine which structure the server
1664  * used from the size of the reply.
1665  */
fuse_copy_ioctl_iovec_old(struct iovec * dst,void * src,size_t transferred,unsigned count,bool is_compat)1666 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
1667 				     size_t transferred, unsigned count,
1668 				     bool is_compat)
1669 {
1670 #ifdef CONFIG_COMPAT
1671 	if (count * sizeof(struct compat_iovec) == transferred) {
1672 		struct compat_iovec *ciov = src;
1673 		unsigned i;
1674 
1675 		/*
1676 		 * With this interface a 32bit server cannot support
1677 		 * non-compat (i.e. ones coming from 64bit apps) ioctl
1678 		 * requests
1679 		 */
1680 		if (!is_compat)
1681 			return -EINVAL;
1682 
1683 		for (i = 0; i < count; i++) {
1684 			dst[i].iov_base = compat_ptr(ciov[i].iov_base);
1685 			dst[i].iov_len = ciov[i].iov_len;
1686 		}
1687 		return 0;
1688 	}
1689 #endif
1690 
1691 	if (count * sizeof(struct iovec) != transferred)
1692 		return -EIO;
1693 
1694 	memcpy(dst, src, transferred);
1695 	return 0;
1696 }
1697 
1698 /* Make sure iov_length() won't overflow */
fuse_verify_ioctl_iov(struct iovec * iov,size_t count)1699 static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
1700 {
1701 	size_t n;
1702 	u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
1703 
1704 	for (n = 0; n < count; n++, iov++) {
1705 		if (iov->iov_len > (size_t) max)
1706 			return -ENOMEM;
1707 		max -= iov->iov_len;
1708 	}
1709 	return 0;
1710 }
1711 
fuse_copy_ioctl_iovec(struct fuse_conn * fc,struct iovec * dst,void * src,size_t transferred,unsigned count,bool is_compat)1712 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
1713 				 void *src, size_t transferred, unsigned count,
1714 				 bool is_compat)
1715 {
1716 	unsigned i;
1717 	struct fuse_ioctl_iovec *fiov = src;
1718 
1719 	if (fc->minor < 16) {
1720 		return fuse_copy_ioctl_iovec_old(dst, src, transferred,
1721 						 count, is_compat);
1722 	}
1723 
1724 	if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
1725 		return -EIO;
1726 
1727 	for (i = 0; i < count; i++) {
1728 		/* Did the server supply an inappropriate value? */
1729 		if (fiov[i].base != (unsigned long) fiov[i].base ||
1730 		    fiov[i].len != (unsigned long) fiov[i].len)
1731 			return -EIO;
1732 
1733 		dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
1734 		dst[i].iov_len = (size_t) fiov[i].len;
1735 
1736 #ifdef CONFIG_COMPAT
1737 		if (is_compat &&
1738 		    (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
1739 		     (compat_size_t) dst[i].iov_len != fiov[i].len))
1740 			return -EIO;
1741 #endif
1742 	}
1743 
1744 	return 0;
1745 }
1746 
1747 
1748 /*
1749  * For ioctls, there is no generic way to determine how much memory
1750  * needs to be read and/or written.  Furthermore, ioctls are allowed
1751  * to dereference the passed pointer, so the parameter requires deep
1752  * copying but FUSE has no idea whatsoever about what to copy in or
1753  * out.
1754  *
1755  * This is solved by allowing FUSE server to retry ioctl with
1756  * necessary in/out iovecs.  Let's assume the ioctl implementation
1757  * needs to read in the following structure.
1758  *
1759  * struct a {
1760  *	char	*buf;
1761  *	size_t	buflen;
1762  * }
1763  *
1764  * On the first callout to FUSE server, inarg->in_size and
1765  * inarg->out_size will be NULL; then, the server completes the ioctl
1766  * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
1767  * the actual iov array to
1768  *
1769  * { { .iov_base = inarg.arg,	.iov_len = sizeof(struct a) } }
1770  *
1771  * which tells FUSE to copy in the requested area and retry the ioctl.
1772  * On the second round, the server has access to the structure and
1773  * from that it can tell what to look for next, so on the invocation,
1774  * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
1775  *
1776  * { { .iov_base = inarg.arg,	.iov_len = sizeof(struct a)	},
1777  *   { .iov_base = a.buf,	.iov_len = a.buflen		} }
1778  *
1779  * FUSE will copy both struct a and the pointed buffer from the
1780  * process doing the ioctl and retry ioctl with both struct a and the
1781  * buffer.
1782  *
1783  * This time, FUSE server has everything it needs and completes ioctl
1784  * without FUSE_IOCTL_RETRY which finishes the ioctl call.
1785  *
1786  * Copying data out works the same way.
1787  *
1788  * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
1789  * automatically initializes in and out iovs by decoding @cmd with
1790  * _IOC_* macros and the server is not allowed to request RETRY.  This
1791  * limits ioctl data transfers to well-formed ioctls and is the forced
1792  * behavior for all FUSE servers.
1793  */
fuse_do_ioctl(struct file * file,unsigned int cmd,unsigned long arg,unsigned int flags)1794 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1795 		   unsigned int flags)
1796 {
1797 	struct fuse_file *ff = file->private_data;
1798 	struct fuse_conn *fc = ff->fc;
1799 	struct fuse_ioctl_in inarg = {
1800 		.fh = ff->fh,
1801 		.cmd = cmd,
1802 		.arg = arg,
1803 		.flags = flags
1804 	};
1805 	struct fuse_ioctl_out outarg;
1806 	struct fuse_req *req = NULL;
1807 	struct page **pages = NULL;
1808 	struct iovec *iov_page = NULL;
1809 	struct iovec *in_iov = NULL, *out_iov = NULL;
1810 	unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
1811 	size_t in_size, out_size, transferred;
1812 	int err;
1813 
1814 #if BITS_PER_LONG == 32
1815 	inarg.flags |= FUSE_IOCTL_32BIT;
1816 #else
1817 	if (flags & FUSE_IOCTL_COMPAT)
1818 		inarg.flags |= FUSE_IOCTL_32BIT;
1819 #endif
1820 
1821 	/* assume all the iovs returned by client always fits in a page */
1822 	BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
1823 
1824 	err = -ENOMEM;
1825 	pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
1826 	iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
1827 	if (!pages || !iov_page)
1828 		goto out;
1829 
1830 	/*
1831 	 * If restricted, initialize IO parameters as encoded in @cmd.
1832 	 * RETRY from server is not allowed.
1833 	 */
1834 	if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
1835 		struct iovec *iov = iov_page;
1836 
1837 		iov->iov_base = (void __user *)arg;
1838 		iov->iov_len = _IOC_SIZE(cmd);
1839 
1840 		if (_IOC_DIR(cmd) & _IOC_WRITE) {
1841 			in_iov = iov;
1842 			in_iovs = 1;
1843 		}
1844 
1845 		if (_IOC_DIR(cmd) & _IOC_READ) {
1846 			out_iov = iov;
1847 			out_iovs = 1;
1848 		}
1849 	}
1850 
1851  retry:
1852 	inarg.in_size = in_size = iov_length(in_iov, in_iovs);
1853 	inarg.out_size = out_size = iov_length(out_iov, out_iovs);
1854 
1855 	/*
1856 	 * Out data can be used either for actual out data or iovs,
1857 	 * make sure there always is at least one page.
1858 	 */
1859 	out_size = max_t(size_t, out_size, PAGE_SIZE);
1860 	max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
1861 
1862 	/* make sure there are enough buffer pages and init request with them */
1863 	err = -ENOMEM;
1864 	if (max_pages > FUSE_MAX_PAGES_PER_REQ)
1865 		goto out;
1866 	while (num_pages < max_pages) {
1867 		pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
1868 		if (!pages[num_pages])
1869 			goto out;
1870 		num_pages++;
1871 	}
1872 
1873 	req = fuse_get_req(fc);
1874 	if (IS_ERR(req)) {
1875 		err = PTR_ERR(req);
1876 		req = NULL;
1877 		goto out;
1878 	}
1879 	memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
1880 	req->num_pages = num_pages;
1881 
1882 	/* okay, let's send it to the client */
1883 	req->in.h.opcode = FUSE_IOCTL;
1884 	req->in.h.nodeid = ff->nodeid;
1885 	req->in.numargs = 1;
1886 	req->in.args[0].size = sizeof(inarg);
1887 	req->in.args[0].value = &inarg;
1888 	if (in_size) {
1889 		req->in.numargs++;
1890 		req->in.args[1].size = in_size;
1891 		req->in.argpages = 1;
1892 
1893 		err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
1894 					   false);
1895 		if (err)
1896 			goto out;
1897 	}
1898 
1899 	req->out.numargs = 2;
1900 	req->out.args[0].size = sizeof(outarg);
1901 	req->out.args[0].value = &outarg;
1902 	req->out.args[1].size = out_size;
1903 	req->out.argpages = 1;
1904 	req->out.argvar = 1;
1905 
1906 	fuse_request_send(fc, req);
1907 	err = req->out.h.error;
1908 	transferred = req->out.args[1].size;
1909 	fuse_put_request(fc, req);
1910 	req = NULL;
1911 	if (err)
1912 		goto out;
1913 
1914 	/* did it ask for retry? */
1915 	if (outarg.flags & FUSE_IOCTL_RETRY) {
1916 		void *vaddr;
1917 
1918 		/* no retry if in restricted mode */
1919 		err = -EIO;
1920 		if (!(flags & FUSE_IOCTL_UNRESTRICTED))
1921 			goto out;
1922 
1923 		in_iovs = outarg.in_iovs;
1924 		out_iovs = outarg.out_iovs;
1925 
1926 		/*
1927 		 * Make sure things are in boundary, separate checks
1928 		 * are to protect against overflow.
1929 		 */
1930 		err = -ENOMEM;
1931 		if (in_iovs > FUSE_IOCTL_MAX_IOV ||
1932 		    out_iovs > FUSE_IOCTL_MAX_IOV ||
1933 		    in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
1934 			goto out;
1935 
1936 		vaddr = kmap_atomic(pages[0]);
1937 		err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
1938 					    transferred, in_iovs + out_iovs,
1939 					    (flags & FUSE_IOCTL_COMPAT) != 0);
1940 		kunmap_atomic(vaddr);
1941 		if (err)
1942 			goto out;
1943 
1944 		in_iov = iov_page;
1945 		out_iov = in_iov + in_iovs;
1946 
1947 		err = fuse_verify_ioctl_iov(in_iov, in_iovs);
1948 		if (err)
1949 			goto out;
1950 
1951 		err = fuse_verify_ioctl_iov(out_iov, out_iovs);
1952 		if (err)
1953 			goto out;
1954 
1955 		goto retry;
1956 	}
1957 
1958 	err = -EIO;
1959 	if (transferred > inarg.out_size)
1960 		goto out;
1961 
1962 	err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
1963  out:
1964 	if (req)
1965 		fuse_put_request(fc, req);
1966 	free_page((unsigned long) iov_page);
1967 	while (num_pages)
1968 		__free_page(pages[--num_pages]);
1969 	kfree(pages);
1970 
1971 	return err ? err : outarg.result;
1972 }
1973 EXPORT_SYMBOL_GPL(fuse_do_ioctl);
1974 
fuse_ioctl_common(struct file * file,unsigned int cmd,unsigned long arg,unsigned int flags)1975 long fuse_ioctl_common(struct file *file, unsigned int cmd,
1976 		       unsigned long arg, unsigned int flags)
1977 {
1978 	struct inode *inode = file->f_dentry->d_inode;
1979 	struct fuse_conn *fc = get_fuse_conn(inode);
1980 
1981 	if (!fuse_allow_task(fc, current))
1982 		return -EACCES;
1983 
1984 	if (is_bad_inode(inode))
1985 		return -EIO;
1986 
1987 	return fuse_do_ioctl(file, cmd, arg, flags);
1988 }
1989 
fuse_file_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1990 static long fuse_file_ioctl(struct file *file, unsigned int cmd,
1991 			    unsigned long arg)
1992 {
1993 	return fuse_ioctl_common(file, cmd, arg, 0);
1994 }
1995 
fuse_file_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1996 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
1997 				   unsigned long arg)
1998 {
1999 	return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
2000 }
2001 
2002 /*
2003  * All files which have been polled are linked to RB tree
2004  * fuse_conn->polled_files which is indexed by kh.  Walk the tree and
2005  * find the matching one.
2006  */
fuse_find_polled_node(struct fuse_conn * fc,u64 kh,struct rb_node ** parent_out)2007 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2008 					      struct rb_node **parent_out)
2009 {
2010 	struct rb_node **link = &fc->polled_files.rb_node;
2011 	struct rb_node *last = NULL;
2012 
2013 	while (*link) {
2014 		struct fuse_file *ff;
2015 
2016 		last = *link;
2017 		ff = rb_entry(last, struct fuse_file, polled_node);
2018 
2019 		if (kh < ff->kh)
2020 			link = &last->rb_left;
2021 		else if (kh > ff->kh)
2022 			link = &last->rb_right;
2023 		else
2024 			return link;
2025 	}
2026 
2027 	if (parent_out)
2028 		*parent_out = last;
2029 	return link;
2030 }
2031 
2032 /*
2033  * The file is about to be polled.  Make sure it's on the polled_files
2034  * RB tree.  Note that files once added to the polled_files tree are
2035  * not removed before the file is released.  This is because a file
2036  * polled once is likely to be polled again.
2037  */
fuse_register_polled_file(struct fuse_conn * fc,struct fuse_file * ff)2038 static void fuse_register_polled_file(struct fuse_conn *fc,
2039 				      struct fuse_file *ff)
2040 {
2041 	spin_lock(&fc->lock);
2042 	if (RB_EMPTY_NODE(&ff->polled_node)) {
2043 		struct rb_node **link, *parent;
2044 
2045 		link = fuse_find_polled_node(fc, ff->kh, &parent);
2046 		BUG_ON(*link);
2047 		rb_link_node(&ff->polled_node, parent, link);
2048 		rb_insert_color(&ff->polled_node, &fc->polled_files);
2049 	}
2050 	spin_unlock(&fc->lock);
2051 }
2052 
fuse_file_poll(struct file * file,poll_table * wait)2053 unsigned fuse_file_poll(struct file *file, poll_table *wait)
2054 {
2055 	struct fuse_file *ff = file->private_data;
2056 	struct fuse_conn *fc = ff->fc;
2057 	struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2058 	struct fuse_poll_out outarg;
2059 	struct fuse_req *req;
2060 	int err;
2061 
2062 	if (fc->no_poll)
2063 		return DEFAULT_POLLMASK;
2064 
2065 	poll_wait(file, &ff->poll_wait, wait);
2066 
2067 	/*
2068 	 * Ask for notification iff there's someone waiting for it.
2069 	 * The client may ignore the flag and always notify.
2070 	 */
2071 	if (waitqueue_active(&ff->poll_wait)) {
2072 		inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2073 		fuse_register_polled_file(fc, ff);
2074 	}
2075 
2076 	req = fuse_get_req(fc);
2077 	if (IS_ERR(req))
2078 		return POLLERR;
2079 
2080 	req->in.h.opcode = FUSE_POLL;
2081 	req->in.h.nodeid = ff->nodeid;
2082 	req->in.numargs = 1;
2083 	req->in.args[0].size = sizeof(inarg);
2084 	req->in.args[0].value = &inarg;
2085 	req->out.numargs = 1;
2086 	req->out.args[0].size = sizeof(outarg);
2087 	req->out.args[0].value = &outarg;
2088 	fuse_request_send(fc, req);
2089 	err = req->out.h.error;
2090 	fuse_put_request(fc, req);
2091 
2092 	if (!err)
2093 		return outarg.revents;
2094 	if (err == -ENOSYS) {
2095 		fc->no_poll = 1;
2096 		return DEFAULT_POLLMASK;
2097 	}
2098 	return POLLERR;
2099 }
2100 EXPORT_SYMBOL_GPL(fuse_file_poll);
2101 
2102 /*
2103  * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2104  * wakes up the poll waiters.
2105  */
fuse_notify_poll_wakeup(struct fuse_conn * fc,struct fuse_notify_poll_wakeup_out * outarg)2106 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2107 			    struct fuse_notify_poll_wakeup_out *outarg)
2108 {
2109 	u64 kh = outarg->kh;
2110 	struct rb_node **link;
2111 
2112 	spin_lock(&fc->lock);
2113 
2114 	link = fuse_find_polled_node(fc, kh, NULL);
2115 	if (*link) {
2116 		struct fuse_file *ff;
2117 
2118 		ff = rb_entry(*link, struct fuse_file, polled_node);
2119 		wake_up_interruptible_sync(&ff->poll_wait);
2120 	}
2121 
2122 	spin_unlock(&fc->lock);
2123 	return 0;
2124 }
2125 
fuse_loop_dio(struct file * filp,const struct iovec * iov,unsigned long nr_segs,loff_t * ppos,int rw)2126 static ssize_t fuse_loop_dio(struct file *filp, const struct iovec *iov,
2127 			     unsigned long nr_segs, loff_t *ppos, int rw)
2128 {
2129 	const struct iovec *vector = iov;
2130 	ssize_t ret = 0;
2131 
2132 	while (nr_segs > 0) {
2133 		void __user *base;
2134 		size_t len;
2135 		ssize_t nr;
2136 
2137 		base = vector->iov_base;
2138 		len = vector->iov_len;
2139 		vector++;
2140 		nr_segs--;
2141 
2142 		if (rw == WRITE)
2143 			nr = __fuse_direct_write(filp, base, len, ppos);
2144 		else
2145 			nr = fuse_direct_read(filp, base, len, ppos);
2146 
2147 		if (nr < 0) {
2148 			if (!ret)
2149 				ret = nr;
2150 			break;
2151 		}
2152 		ret += nr;
2153 		if (nr != len)
2154 			break;
2155 	}
2156 
2157 	return ret;
2158 }
2159 
2160 
2161 static ssize_t
fuse_direct_IO(int rw,struct kiocb * iocb,const struct iovec * iov,loff_t offset,unsigned long nr_segs)2162 fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2163 			loff_t offset, unsigned long nr_segs)
2164 {
2165 	ssize_t ret = 0;
2166 	struct file *file = NULL;
2167 	loff_t pos = 0;
2168 
2169 	file = iocb->ki_filp;
2170 	pos = offset;
2171 
2172 	ret = fuse_loop_dio(file, iov, nr_segs, &pos, rw);
2173 
2174 	return ret;
2175 }
2176 
2177 static const struct file_operations fuse_file_operations = {
2178 	.llseek		= fuse_file_llseek,
2179 	.read		= do_sync_read,
2180 	.aio_read	= fuse_file_aio_read,
2181 	.write		= do_sync_write,
2182 	.aio_write	= fuse_file_aio_write,
2183 	.mmap		= fuse_file_mmap,
2184 	.open		= fuse_open,
2185 	.flush		= fuse_flush,
2186 	.release	= fuse_release,
2187 	.fsync		= fuse_fsync,
2188 	.lock		= fuse_file_lock,
2189 	.flock		= fuse_file_flock,
2190 	.splice_read	= generic_file_splice_read,
2191 	.unlocked_ioctl	= fuse_file_ioctl,
2192 	.compat_ioctl	= fuse_file_compat_ioctl,
2193 	.poll		= fuse_file_poll,
2194 };
2195 
2196 static const struct file_operations fuse_direct_io_file_operations = {
2197 	.llseek		= fuse_file_llseek,
2198 	.read		= fuse_direct_read,
2199 	.write		= fuse_direct_write,
2200 	.mmap		= fuse_direct_mmap,
2201 	.open		= fuse_open,
2202 	.flush		= fuse_flush,
2203 	.release	= fuse_release,
2204 	.fsync		= fuse_fsync,
2205 	.lock		= fuse_file_lock,
2206 	.flock		= fuse_file_flock,
2207 	.unlocked_ioctl	= fuse_file_ioctl,
2208 	.compat_ioctl	= fuse_file_compat_ioctl,
2209 	.poll		= fuse_file_poll,
2210 	/* no splice_read */
2211 };
2212 
2213 static const struct address_space_operations fuse_file_aops  = {
2214 	.readpage	= fuse_readpage,
2215 	.writepage	= fuse_writepage,
2216 	.launder_page	= fuse_launder_page,
2217 	.readpages	= fuse_readpages,
2218 	.set_page_dirty	= __set_page_dirty_nobuffers,
2219 	.bmap		= fuse_bmap,
2220 	.direct_IO	= fuse_direct_IO,
2221 };
2222 
fuse_init_file_inode(struct inode * inode)2223 void fuse_init_file_inode(struct inode *inode)
2224 {
2225 	inode->i_fop = &fuse_file_operations;
2226 	inode->i_data.a_ops = &fuse_file_aops;
2227 }
2228