• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/fdtable.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/filter.h>
15 #include <linux/fs_context.h>
16 #include <linux/moduleparam.h>
17 #include <linux/sched.h>
18 #include <linux/namei.h>
19 #include <linux/slab.h>
20 #include <linux/xattr.h>
21 #include <linux/iversion.h>
22 #include <linux/posix_acl.h>
23 #include <linux/security.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 
27 static bool __read_mostly allow_sys_admin_access;
28 module_param(allow_sys_admin_access, bool, 0644);
29 MODULE_PARM_DESC(allow_sys_admin_access,
30 		 "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check");
31 
32 #include "../internal.h"
33 
fuse_advise_use_readdirplus(struct inode * dir)34 static void fuse_advise_use_readdirplus(struct inode *dir)
35 {
36 	struct fuse_inode *fi = get_fuse_inode(dir);
37 
38 	set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
39 }
40 
41 #if BITS_PER_LONG >= 64 && !defined(CONFIG_FUSE_BPF)
__fuse_dentry_settime(struct dentry * entry,u64 time)42 static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
43 {
44 	entry->d_fsdata = (void *) time;
45 }
46 
fuse_dentry_time(const struct dentry * entry)47 static inline u64 fuse_dentry_time(const struct dentry *entry)
48 {
49 	return (u64)entry->d_fsdata;
50 }
51 
52 #else
53 
__fuse_dentry_settime(struct dentry * dentry,u64 time)54 static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
55 {
56 	((struct fuse_dentry *) dentry->d_fsdata)->time = time;
57 }
58 
fuse_dentry_time(const struct dentry * entry)59 static inline u64 fuse_dentry_time(const struct dentry *entry)
60 {
61 	return ((struct fuse_dentry *) entry->d_fsdata)->time;
62 }
63 #endif
64 
fuse_dentry_settime(struct dentry * dentry,u64 time)65 static void fuse_dentry_settime(struct dentry *dentry, u64 time)
66 {
67 	struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
68 	bool delete = !time && fc->delete_stale;
69 	/*
70 	 * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
71 	 * Don't care about races, either way it's just an optimization
72 	 */
73 	if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
74 	    (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
75 		spin_lock(&dentry->d_lock);
76 		if (!delete)
77 			dentry->d_flags &= ~DCACHE_OP_DELETE;
78 		else
79 			dentry->d_flags |= DCACHE_OP_DELETE;
80 		spin_unlock(&dentry->d_lock);
81 	}
82 
83 	__fuse_dentry_settime(dentry, time);
84 }
85 
fuse_init_dentry_root(struct dentry * root,struct file * backing_dir)86 void fuse_init_dentry_root(struct dentry *root, struct file *backing_dir)
87 {
88 #ifdef CONFIG_FUSE_BPF
89 	struct fuse_dentry *fuse_dentry = root->d_fsdata;
90 
91 	if (backing_dir) {
92 		fuse_dentry->backing_path = backing_dir->f_path;
93 		path_get(&fuse_dentry->backing_path);
94 	}
95 #endif
96 }
97 
98 /*
99  * Set dentry and possibly attribute timeouts from the lookup/mk*
100  * replies
101  */
fuse_change_entry_timeout(struct dentry * entry,struct fuse_entry_out * o)102 void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
103 {
104 	fuse_dentry_settime(entry,
105 		fuse_time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
106 }
107 
fuse_invalidate_attr_mask(struct inode * inode,u32 mask)108 void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
109 {
110 	set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
111 }
112 
113 /*
114  * Mark the attributes as stale, so that at the next call to
115  * ->getattr() they will be fetched from userspace
116  */
fuse_invalidate_attr(struct inode * inode)117 void fuse_invalidate_attr(struct inode *inode)
118 {
119 	fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
120 }
121 
fuse_dir_changed(struct inode * dir)122 static void fuse_dir_changed(struct inode *dir)
123 {
124 	fuse_invalidate_attr(dir);
125 	inode_maybe_inc_iversion(dir, false);
126 }
127 
128 /*
129  * Mark the attributes as stale due to an atime change.  Avoid the invalidate if
130  * atime is not used.
131  */
fuse_invalidate_atime(struct inode * inode)132 void fuse_invalidate_atime(struct inode *inode)
133 {
134 	if (!IS_RDONLY(inode))
135 		fuse_invalidate_attr_mask(inode, STATX_ATIME);
136 }
137 
138 /*
139  * Just mark the entry as stale, so that a next attempt to look it up
140  * will result in a new lookup call to userspace
141  *
142  * This is called when a dentry is about to become negative and the
143  * timeout is unknown (unlink, rmdir, rename and in some cases
144  * lookup)
145  */
fuse_invalidate_entry_cache(struct dentry * entry)146 void fuse_invalidate_entry_cache(struct dentry *entry)
147 {
148 	fuse_dentry_settime(entry, 0);
149 }
150 
151 /*
152  * Same as fuse_invalidate_entry_cache(), but also try to remove the
153  * dentry from the hash
154  */
fuse_invalidate_entry(struct dentry * entry)155 static void fuse_invalidate_entry(struct dentry *entry)
156 {
157 	d_invalidate(entry);
158 	fuse_invalidate_entry_cache(entry);
159 }
160 
fuse_lookup_init(struct fuse_conn * fc,struct fuse_args * args,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg,struct fuse_entry_bpf_out * bpf_outarg)161 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
162 			     u64 nodeid, const struct qstr *name,
163 			     struct fuse_entry_out *outarg,
164 			     struct fuse_entry_bpf_out *bpf_outarg)
165 {
166 	memset(outarg, 0, sizeof(struct fuse_entry_out));
167 	args->opcode = FUSE_LOOKUP;
168 	args->nodeid = nodeid;
169 	args->in_numargs = 1;
170 	args->in_args[0].size = name->len + 1;
171 	args->in_args[0].value = name->name;
172 	args->out_argvar = true;
173 	args->out_numargs = 2;
174 	args->out_args[0].size = sizeof(struct fuse_entry_out);
175 	args->out_args[0].value = outarg;
176 	args->out_args[1].size = sizeof(struct fuse_entry_bpf_out);
177 	args->out_args[1].value = bpf_outarg;
178 }
179 
180 #ifdef CONFIG_FUSE_BPF
backing_data_changed(struct fuse_inode * fi,struct dentry * entry,struct fuse_entry_bpf * bpf_arg)181 static bool backing_data_changed(struct fuse_inode *fi, struct dentry *entry,
182 				 struct fuse_entry_bpf *bpf_arg)
183 {
184 	struct path new_backing_path;
185 	struct inode *new_backing_inode;
186 	struct bpf_prog *bpf = NULL;
187 	int err;
188 	bool ret = true;
189 
190 	if (!entry || !fi->backing_inode) {
191 		ret = false;
192 		goto put_backing_file;
193 	}
194 
195 	get_fuse_backing_path(entry, &new_backing_path);
196 	new_backing_inode = fi->backing_inode;
197 	ihold(new_backing_inode);
198 
199 	err = fuse_handle_backing(bpf_arg, &new_backing_inode, &new_backing_path);
200 
201 	if (err)
202 		goto put_inode;
203 
204 	err = fuse_handle_bpf_prog(bpf_arg, entry->d_parent->d_inode, &bpf);
205 	if (err)
206 		goto put_bpf;
207 
208 	ret = (bpf != fi->bpf || fi->backing_inode != new_backing_inode ||
209 			!path_equal(&get_fuse_dentry(entry)->backing_path, &new_backing_path));
210 put_bpf:
211 	if (bpf)
212 		bpf_prog_put(bpf);
213 put_inode:
214 	iput(new_backing_inode);
215 	path_put(&new_backing_path);
216 put_backing_file:
217 	if (bpf_arg->backing_file)
218 		fput(bpf_arg->backing_file);
219 	return ret;
220 }
221 #endif
222 
223 /*
224  * Check whether the dentry is still valid
225  *
226  * If the entry validity timeout has expired and the dentry is
227  * positive, try to redo the lookup.  If the lookup results in a
228  * different inode, then let the VFS invalidate the dentry and redo
229  * the lookup once more.  If the lookup results in the same inode,
230  * then refresh the attributes, timeouts and mark the dentry valid.
231  */
fuse_dentry_revalidate(struct dentry * entry,unsigned int flags)232 static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
233 {
234 	struct inode *inode;
235 	struct dentry *parent;
236 	struct fuse_mount *fm;
237 	struct fuse_inode *fi;
238 	int ret;
239 
240 	inode = d_inode_rcu(entry);
241 	if (inode && fuse_is_bad(inode))
242 		goto invalid;
243 
244 #ifdef CONFIG_FUSE_BPF
245 	/* TODO: Do we need bpf support for revalidate?
246 	 * If the lower filesystem says the entry is invalid, FUSE probably shouldn't
247 	 * try to fix that without going through the normal lookup path...
248 	 */
249 	if (get_fuse_dentry(entry)->backing_path.dentry) {
250 		ret = fuse_revalidate_backing(entry, flags);
251 		if (ret <= 0) {
252 			goto out;
253 		}
254 	}
255 #endif
256 	if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
257 		 (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
258 		struct fuse_entry_out outarg;
259 		struct fuse_entry_bpf bpf_arg;
260 		FUSE_ARGS(args);
261 		struct fuse_forget_link *forget;
262 		u64 attr_version;
263 
264 		/* For negative dentries, always do a fresh lookup */
265 		if (!inode)
266 			goto invalid;
267 
268 		ret = -ECHILD;
269 		if (flags & LOOKUP_RCU)
270 			goto out;
271 		fm = get_fuse_mount(inode);
272 
273 		parent = dget_parent(entry);
274 
275 #ifdef CONFIG_FUSE_BPF
276 		/* TODO: Once we're handling timeouts for backing inodes, do a
277 		 * bpf based lookup_revalidate here.
278 		 */
279 		if (get_fuse_inode(parent->d_inode)->backing_inode) {
280 			dput(parent);
281 			ret = 1;
282 			goto out;
283 		}
284 #endif
285 		forget = fuse_alloc_forget();
286 		ret = -ENOMEM;
287 		if (!forget) {
288 			dput(parent);
289 			goto out;
290 		}
291 
292 		attr_version = fuse_get_attr_version(fm->fc);
293 
294 		fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
295 				 &entry->d_name, &outarg, &bpf_arg.out);
296 		ret = fuse_simple_request(fm, &args);
297 		dput(parent);
298 
299 		/* Zero nodeid is same as -ENOENT */
300 		if (!ret && !outarg.nodeid)
301 			ret = -ENOENT;
302 		if (!ret || ret == sizeof(bpf_arg.out)) {
303 			fi = get_fuse_inode(inode);
304 			if (outarg.nodeid != get_node_id(inode) ||
305 #ifdef CONFIG_FUSE_BPF
306 			    (ret == sizeof(bpf_arg.out) &&
307 					    backing_data_changed(fi, entry, &bpf_arg)) ||
308 #endif
309 			    (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) {
310 				fuse_queue_forget(fm->fc, forget,
311 						  outarg.nodeid, 1);
312 				goto invalid;
313 			}
314 			spin_lock(&fi->lock);
315 			fi->nlookup++;
316 			spin_unlock(&fi->lock);
317 		}
318 		kfree(forget);
319 		if (ret == -ENOMEM || ret == -EINTR)
320 			goto out;
321 		if (ret || fuse_invalid_attr(&outarg.attr) ||
322 		    fuse_stale_inode(inode, outarg.generation, &outarg.attr))
323 			goto invalid;
324 
325 		forget_all_cached_acls(inode);
326 		fuse_change_attributes(inode, &outarg.attr, NULL,
327 				       ATTR_TIMEOUT(&outarg),
328 				       attr_version);
329 		fuse_change_entry_timeout(entry, &outarg);
330 	} else if (inode) {
331 		fi = get_fuse_inode(inode);
332 		if (flags & LOOKUP_RCU) {
333 			if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
334 				return -ECHILD;
335 		} else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
336 			parent = dget_parent(entry);
337 			fuse_advise_use_readdirplus(d_inode(parent));
338 			dput(parent);
339 		}
340 	}
341 	ret = 1;
342 out:
343 	return ret;
344 
345 invalid:
346 	ret = 0;
347 	goto out;
348 }
349 
350 #if BITS_PER_LONG < 64 || defined(CONFIG_FUSE_BPF)
fuse_dentry_init(struct dentry * dentry)351 static int fuse_dentry_init(struct dentry *dentry)
352 {
353 	dentry->d_fsdata = kzalloc(sizeof(struct fuse_dentry),
354 				   GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
355 
356 	return dentry->d_fsdata ? 0 : -ENOMEM;
357 }
fuse_dentry_release(struct dentry * dentry)358 static void fuse_dentry_release(struct dentry *dentry)
359 {
360 	struct fuse_dentry *fd = dentry->d_fsdata;
361 
362 #ifdef CONFIG_FUSE_BPF
363 	if (fd && fd->backing_path.dentry)
364 		path_put(&fd->backing_path);
365 
366 	if (fd && fd->bpf)
367 		bpf_prog_put(fd->bpf);
368 #endif
369 
370 	kfree_rcu(fd, rcu);
371 }
372 #endif
373 
fuse_dentry_delete(const struct dentry * dentry)374 static int fuse_dentry_delete(const struct dentry *dentry)
375 {
376 	return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
377 }
378 
379 /*
380  * Create a fuse_mount object with a new superblock (with path->dentry
381  * as the root), and return that mount so it can be auto-mounted on
382  * @path.
383  */
fuse_dentry_automount(struct path * path)384 static struct vfsmount *fuse_dentry_automount(struct path *path)
385 {
386 	struct fs_context *fsc;
387 	struct vfsmount *mnt;
388 	struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
389 
390 	fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
391 	if (IS_ERR(fsc))
392 		return ERR_CAST(fsc);
393 
394 	/* Pass the FUSE inode of the mount for fuse_get_tree_submount() */
395 	fsc->fs_private = mp_fi;
396 
397 	/* Create the submount */
398 	mnt = fc_mount(fsc);
399 	if (!IS_ERR(mnt))
400 		mntget(mnt);
401 
402 	put_fs_context(fsc);
403 	return mnt;
404 }
405 
406 /*
407  * Get the canonical path. Since we must translate to a path, this must be done
408  * in the context of the userspace daemon, however, the userspace daemon cannot
409  * look up paths on its own. Instead, we handle the lookup as a special case
410  * inside of the write request.
411  */
fuse_dentry_canonical_path(const struct path * path,struct path * canonical_path)412 static int fuse_dentry_canonical_path(const struct path *path,
413 				       struct path *canonical_path)
414 {
415 	struct inode *inode = d_inode(path->dentry);
416 	//struct fuse_conn *fc = get_fuse_conn(inode);
417 	struct fuse_mount *fm = get_fuse_mount_super(path->mnt->mnt_sb);
418 	FUSE_ARGS(args);
419 	char *path_name;
420 	int err;
421 
422 #ifdef CONFIG_FUSE_BPF
423 	struct fuse_err_ret fer;
424 
425 	fer = fuse_bpf_backing(inode, struct fuse_dummy_io,
426 			       fuse_canonical_path_initialize,
427 			       fuse_canonical_path_backing,
428 			       fuse_canonical_path_finalize, path,
429 			       canonical_path);
430 	if (fer.ret)
431 		return PTR_ERR(fer.result);
432 #endif
433 
434 	path_name = (char *)get_zeroed_page(GFP_KERNEL);
435 	if (!path_name)
436 		return -ENOMEM;
437 
438 	args.opcode = FUSE_CANONICAL_PATH;
439 	args.nodeid = get_node_id(inode);
440 	args.in_numargs = 0;
441 	args.out_numargs = 1;
442 	args.out_args[0].size = PATH_MAX;
443 	args.out_args[0].value = path_name;
444 	args.canonical_path = canonical_path;
445 	args.out_argvar = 1;
446 
447 	err = fuse_simple_request(fm, &args);
448 	free_page((unsigned long)path_name);
449 	if (err > 0)
450 		return 0;
451 	if (err < 0)
452 		return err;
453 
454 	canonical_path->dentry = path->dentry;
455 	canonical_path->mnt = path->mnt;
456 	path_get(canonical_path);
457 	return 0;
458 }
459 
460 const struct dentry_operations fuse_dentry_operations = {
461 	.d_revalidate	= fuse_dentry_revalidate,
462 	.d_delete	= fuse_dentry_delete,
463 #if BITS_PER_LONG < 64 || defined(CONFIG_FUSE_BPF)
464 	.d_init		= fuse_dentry_init,
465 	.d_release	= fuse_dentry_release,
466 #endif
467 	.d_automount	= fuse_dentry_automount,
468 	.d_canonical_path = fuse_dentry_canonical_path,
469 };
470 
471 const struct dentry_operations fuse_root_dentry_operations = {
472 #if BITS_PER_LONG < 64 || defined(CONFIG_FUSE_BPF)
473 	.d_init		= fuse_dentry_init,
474 	.d_release	= fuse_dentry_release,
475 #endif
476 };
477 
fuse_valid_type(int m)478 int fuse_valid_type(int m)
479 {
480 	return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
481 		S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
482 }
483 
fuse_valid_size(u64 size)484 static bool fuse_valid_size(u64 size)
485 {
486 	return size <= LLONG_MAX;
487 }
488 
fuse_invalid_attr(struct fuse_attr * attr)489 bool fuse_invalid_attr(struct fuse_attr *attr)
490 {
491 	return !fuse_valid_type(attr->mode) || !fuse_valid_size(attr->size);
492 }
493 
fuse_lookup_name(struct super_block * sb,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg,struct dentry * entry,struct inode ** inode)494 int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
495 		     struct fuse_entry_out *outarg,
496 		     struct dentry *entry,
497 		     struct inode **inode)
498 {
499 	struct fuse_mount *fm = get_fuse_mount_super(sb);
500 	FUSE_ARGS(args);
501 	struct fuse_entry_bpf bpf_arg = {0};
502 	struct fuse_forget_link *forget;
503 	u64 attr_version;
504 	int err;
505 
506 	*inode = NULL;
507 	err = -ENAMETOOLONG;
508 	if (name->len > FUSE_NAME_MAX)
509 		goto out;
510 
511 	forget = fuse_alloc_forget();
512 	err = -ENOMEM;
513 	if (!forget)
514 		goto out;
515 
516 	attr_version = fuse_get_attr_version(fm->fc);
517 
518 	fuse_lookup_init(fm->fc, &args, nodeid, name, outarg, &bpf_arg.out);
519 	err = fuse_simple_request(fm, &args);
520 
521 #ifdef CONFIG_FUSE_BPF
522 	if (err == sizeof(bpf_arg.out)) {
523 		/* TODO Make sure this handles invalid handles */
524 		struct file *backing_file;
525 		struct inode *backing_inode;
526 
527 		err = -ENOENT;
528 		if (!entry)
529 			goto out_put_forget;
530 
531 		err = -EINVAL;
532 		backing_file = bpf_arg.backing_file;
533 		if (!backing_file)
534 			goto out_put_forget;
535 
536 		if (IS_ERR(backing_file)) {
537 			err = PTR_ERR(backing_file);
538 			goto out_put_forget;
539 		}
540 
541 		backing_inode = backing_file->f_inode;
542 		*inode = fuse_iget_backing(sb, outarg->nodeid, backing_inode);
543 		if (!*inode)
544 			goto out_put_forget;
545 
546 		err = fuse_handle_backing(&bpf_arg,
547 				&get_fuse_inode(*inode)->backing_inode,
548 				&get_fuse_dentry(entry)->backing_path);
549 		if (!err)
550 			err = fuse_handle_bpf_prog(&bpf_arg, NULL,
551 					   &get_fuse_inode(*inode)->bpf);
552 		if (err) {
553 			iput(*inode);
554 			*inode = NULL;
555 			goto out_put_forget;
556 		}
557 	} else
558 #endif
559 	{
560 		/* Zero nodeid is same as -ENOENT, but with valid timeout */
561 		if (err || !outarg->nodeid)
562 			goto out_put_forget;
563 
564 		err = -EIO;
565 		if (fuse_invalid_attr(&outarg->attr))
566 			goto out_put_forget;
567 
568 		if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
569 			pr_warn_once("root generation should be zero\n");
570 			outarg->generation = 0;
571 		}
572 
573 		*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
574 				&outarg->attr, ATTR_TIMEOUT(outarg),
575 				attr_version);
576 
577 	}
578 
579 	err = -ENOMEM;
580 	if (!*inode && outarg->nodeid) {
581 		fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
582 		goto out;
583 	}
584 	err = 0;
585 
586  out_put_forget:
587 	kfree(forget);
588  out:
589 	if (bpf_arg.backing_file)
590 		fput(bpf_arg.backing_file);
591 	return err;
592 }
593 
fuse_lookup(struct inode * dir,struct dentry * entry,unsigned int flags)594 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
595 				  unsigned int flags)
596 {
597 	int err;
598 	struct fuse_entry_out outarg;
599 	struct inode *inode;
600 	struct dentry *newent;
601 	bool outarg_valid = true;
602 	bool locked;
603 
604 #ifdef CONFIG_FUSE_BPF
605 	struct fuse_err_ret fer;
606 
607 	fer = fuse_bpf_backing(dir, struct fuse_lookup_io,
608 			       fuse_lookup_initialize, fuse_lookup_backing,
609 			       fuse_lookup_finalize,
610 			       dir, entry, flags);
611 	if (fer.ret)
612 		return fer.result;
613 #endif
614 
615 	if (fuse_is_bad(dir))
616 		return ERR_PTR(-EIO);
617 
618 	locked = fuse_lock_inode(dir);
619 	err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
620 			       &outarg, entry, &inode);
621 	fuse_unlock_inode(dir, locked);
622 	if (err == -ENOENT) {
623 		outarg_valid = false;
624 		err = 0;
625 	}
626 	if (err)
627 		goto out_err;
628 
629 	err = -EIO;
630 	if (inode && get_node_id(inode) == FUSE_ROOT_ID)
631 		goto out_iput;
632 
633 	newent = d_splice_alias(inode, entry);
634 	err = PTR_ERR(newent);
635 	if (IS_ERR(newent))
636 		goto out_err;
637 
638 	entry = newent ? newent : entry;
639 	if (outarg_valid)
640 		fuse_change_entry_timeout(entry, &outarg);
641 	else
642 		fuse_invalidate_entry_cache(entry);
643 
644 	if (inode)
645 		fuse_advise_use_readdirplus(dir);
646 	return newent;
647 
648  out_iput:
649 	iput(inode);
650  out_err:
651 	return ERR_PTR(err);
652 }
653 
get_security_context(struct dentry * entry,umode_t mode,struct fuse_in_arg * ext)654 static int get_security_context(struct dentry *entry, umode_t mode,
655 				struct fuse_in_arg *ext)
656 {
657 	struct fuse_secctx *fctx;
658 	struct fuse_secctx_header *header;
659 	void *ctx = NULL, *ptr;
660 	u32 ctxlen, total_len = sizeof(*header);
661 	int err, nr_ctx = 0;
662 	const char *name;
663 	size_t namelen;
664 
665 	err = security_dentry_init_security(entry, mode, &entry->d_name,
666 					    &name, &ctx, &ctxlen);
667 	if (err) {
668 		if (err != -EOPNOTSUPP)
669 			goto out_err;
670 		/* No LSM is supporting this security hook. Ignore error */
671 		ctxlen = 0;
672 		ctx = NULL;
673 	}
674 
675 	if (ctxlen) {
676 		nr_ctx = 1;
677 		namelen = strlen(name) + 1;
678 		err = -EIO;
679 		if (WARN_ON(namelen > XATTR_NAME_MAX + 1 || ctxlen > S32_MAX))
680 			goto out_err;
681 		total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen + ctxlen);
682 	}
683 
684 	err = -ENOMEM;
685 	header = ptr = kzalloc(total_len, GFP_KERNEL);
686 	if (!ptr)
687 		goto out_err;
688 
689 	header->nr_secctx = nr_ctx;
690 	header->size = total_len;
691 	ptr += sizeof(*header);
692 	if (nr_ctx) {
693 		fctx = ptr;
694 		fctx->size = ctxlen;
695 		ptr += sizeof(*fctx);
696 
697 		strcpy(ptr, name);
698 		ptr += namelen;
699 
700 		memcpy(ptr, ctx, ctxlen);
701 	}
702 	ext->size = total_len;
703 	ext->value = header;
704 	err = 0;
705 out_err:
706 	kfree(ctx);
707 	return err;
708 }
709 
extend_arg(struct fuse_in_arg * buf,u32 bytes)710 static void *extend_arg(struct fuse_in_arg *buf, u32 bytes)
711 {
712 	void *p;
713 	u32 newlen = buf->size + bytes;
714 
715 	p = krealloc(buf->value, newlen, GFP_KERNEL);
716 	if (!p) {
717 		kfree(buf->value);
718 		buf->size = 0;
719 		buf->value = NULL;
720 		return NULL;
721 	}
722 
723 	memset(p + buf->size, 0, bytes);
724 	buf->value = p;
725 	buf->size = newlen;
726 
727 	return p + newlen - bytes;
728 }
729 
fuse_ext_size(size_t size)730 static u32 fuse_ext_size(size_t size)
731 {
732 	return FUSE_REC_ALIGN(sizeof(struct fuse_ext_header) + size);
733 }
734 
735 /*
736  * This adds just a single supplementary group that matches the parent's group.
737  */
get_create_supp_group(struct inode * dir,struct fuse_in_arg * ext)738 static int get_create_supp_group(struct inode *dir, struct fuse_in_arg *ext)
739 {
740 	struct fuse_conn *fc = get_fuse_conn(dir);
741 	struct fuse_ext_header *xh;
742 	struct fuse_supp_groups *sg;
743 	kgid_t kgid = dir->i_gid;
744 	gid_t parent_gid = from_kgid(fc->user_ns, kgid);
745 	u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
746 
747 	if (parent_gid == (gid_t) -1 || gid_eq(kgid, current_fsgid()) ||
748 	    !in_group_p(kgid))
749 		return 0;
750 
751 	xh = extend_arg(ext, sg_len);
752 	if (!xh)
753 		return -ENOMEM;
754 
755 	xh->size = sg_len;
756 	xh->type = FUSE_EXT_GROUPS;
757 
758 	sg = (struct fuse_supp_groups *) &xh[1];
759 	sg->nr_groups = 1;
760 	sg->groups[0] = parent_gid;
761 
762 	return 0;
763 }
764 
get_create_ext(struct fuse_args * args,struct inode * dir,struct dentry * dentry,umode_t mode)765 static int get_create_ext(struct fuse_args *args,
766 			  struct inode *dir, struct dentry *dentry,
767 			  umode_t mode)
768 {
769 	struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
770 	struct fuse_in_arg ext = { .size = 0, .value = NULL };
771 	int err = 0;
772 
773 	if (fc->init_security)
774 		err = get_security_context(dentry, mode, &ext);
775 	if (!err && fc->create_supp_group)
776 		err = get_create_supp_group(dir, &ext);
777 
778 	if (!err && ext.size) {
779 		WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
780 		args->is_ext = true;
781 		args->ext_idx = args->in_numargs++;
782 		args->in_args[args->ext_idx] = ext;
783 	} else {
784 		kfree(ext.value);
785 	}
786 
787 	return err;
788 }
789 
free_ext_value(struct fuse_args * args)790 static void free_ext_value(struct fuse_args *args)
791 {
792 	if (args->is_ext)
793 		kfree(args->in_args[args->ext_idx].value);
794 }
795 
796 /*
797  * Atomic create+open operation
798  *
799  * If the filesystem doesn't support this, then fall back to separate
800  * 'mknod' + 'open' requests.
801  */
fuse_create_open(struct inode * dir,struct dentry * entry,struct file * file,unsigned int flags,umode_t mode,u32 opcode)802 static int fuse_create_open(struct inode *dir, struct dentry *entry,
803 			    struct file *file, unsigned int flags,
804 			    umode_t mode, u32 opcode)
805 {
806 	int err;
807 	struct inode *inode;
808 	struct fuse_conn *fc = get_fuse_conn(dir);
809 	struct fuse_mount *fm = get_fuse_mount(dir);
810 	FUSE_ARGS(args);
811 	struct fuse_forget_link *forget;
812 	struct fuse_create_in inarg;
813 	struct fuse_open_out outopen;
814 	struct fuse_entry_out outentry;
815 	struct fuse_inode *fi;
816 	struct fuse_file *ff;
817 	bool trunc = flags & O_TRUNC;
818 
819 	/* Userspace expects S_IFREG in create mode */
820 	BUG_ON((mode & S_IFMT) != S_IFREG);
821 
822 #ifdef CONFIG_FUSE_BPF
823 	{
824 		struct fuse_err_ret fer;
825 
826 		fer = fuse_bpf_backing(dir, struct fuse_create_open_io,
827 				       fuse_create_open_initialize,
828 				       fuse_create_open_backing,
829 				       fuse_create_open_finalize,
830 				       dir, entry, file, flags, mode);
831 		if (fer.ret)
832 			return PTR_ERR(fer.result);
833 	}
834 #endif
835 
836 	forget = fuse_alloc_forget();
837 	err = -ENOMEM;
838 	if (!forget)
839 		goto out_err;
840 
841 	err = -ENOMEM;
842 	ff = fuse_file_alloc(fm);
843 	if (!ff)
844 		goto out_put_forget_req;
845 
846 	if (!fm->fc->dont_mask)
847 		mode &= ~current_umask();
848 
849 	flags &= ~O_NOCTTY;
850 	memset(&inarg, 0, sizeof(inarg));
851 	memset(&outentry, 0, sizeof(outentry));
852 	inarg.flags = flags;
853 	inarg.mode = mode;
854 	inarg.umask = current_umask();
855 
856 	if (fm->fc->handle_killpriv_v2 && trunc &&
857 	    !(flags & O_EXCL) && !capable(CAP_FSETID)) {
858 		inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
859 	}
860 
861 	args.opcode = opcode;
862 	args.nodeid = get_node_id(dir);
863 	args.in_numargs = 2;
864 	args.in_args[0].size = sizeof(inarg);
865 	args.in_args[0].value = &inarg;
866 	args.in_args[1].size = entry->d_name.len + 1;
867 	args.in_args[1].value = entry->d_name.name;
868 	args.out_numargs = 2;
869 	args.out_args[0].size = sizeof(outentry);
870 	args.out_args[0].value = &outentry;
871 	args.out_args[1].size = sizeof(outopen);
872 	args.out_args[1].value = &outopen;
873 
874 	err = get_create_ext(&args, dir, entry, mode);
875 	if (err)
876 		goto out_put_forget_req;
877 
878 	err = fuse_simple_request(fm, &args);
879 	free_ext_value(&args);
880 	if (err)
881 		goto out_free_ff;
882 
883 	err = -EIO;
884 	if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
885 	    fuse_invalid_attr(&outentry.attr))
886 		goto out_free_ff;
887 
888 	ff->fh = outopen.fh;
889 	ff->nodeid = outentry.nodeid;
890 	ff->open_flags = outopen.open_flags;
891 	fuse_passthrough_setup(fc, ff, &outopen);
892 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
893 			  &outentry.attr, ATTR_TIMEOUT(&outentry), 0);
894 	if (!inode) {
895 		flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
896 		fuse_sync_release(NULL, ff, flags);
897 		fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1);
898 		err = -ENOMEM;
899 		goto out_err;
900 	}
901 	kfree(forget);
902 	d_instantiate(entry, inode);
903 	fuse_change_entry_timeout(entry, &outentry);
904 	fuse_dir_changed(dir);
905 	err = finish_open(file, entry, generic_file_open);
906 	if (err) {
907 		fi = get_fuse_inode(inode);
908 		fuse_sync_release(fi, ff, flags);
909 	} else {
910 		file->private_data = ff;
911 		fuse_finish_open(inode, file);
912 		if (fm->fc->atomic_o_trunc && trunc)
913 			truncate_pagecache(inode, 0);
914 		else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
915 			invalidate_inode_pages2(inode->i_mapping);
916 	}
917 	return err;
918 
919 out_free_ff:
920 	fuse_file_free(ff);
921 out_put_forget_req:
922 	kfree(forget);
923 out_err:
924 	return err;
925 }
926 
927 static int fuse_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
928 		      umode_t, dev_t);
fuse_atomic_open(struct inode * dir,struct dentry * entry,struct file * file,unsigned flags,umode_t mode)929 static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
930 			    struct file *file, unsigned flags,
931 			    umode_t mode)
932 {
933 	int err;
934 	struct fuse_conn *fc = get_fuse_conn(dir);
935 	struct dentry *res = NULL;
936 
937 	if (fuse_is_bad(dir))
938 		return -EIO;
939 
940 	if (d_in_lookup(entry)) {
941 		res = fuse_lookup(dir, entry, 0);
942 		if (IS_ERR(res))
943 			return PTR_ERR(res);
944 
945 		if (res)
946 			entry = res;
947 	}
948 
949 	if (!(flags & O_CREAT) || d_really_is_positive(entry))
950 		goto no_open;
951 
952 	/* Only creates */
953 	file->f_mode |= FMODE_CREATED;
954 
955 	if (fc->no_create)
956 		goto mknod;
957 
958 	err = fuse_create_open(dir, entry, file, flags, mode, FUSE_CREATE);
959 	if (err == -ENOSYS) {
960 		fc->no_create = 1;
961 		goto mknod;
962 	} else if (err == -EEXIST)
963 		fuse_invalidate_entry(entry);
964 out_dput:
965 	dput(res);
966 	return err;
967 
968 mknod:
969 	err = fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
970 	if (err)
971 		goto out_dput;
972 no_open:
973 	return finish_no_open(file, res);
974 }
975 
976 /*
977  * Code shared between mknod, mkdir, symlink and link
978  */
create_new_entry(struct fuse_mount * fm,struct fuse_args * args,struct inode * dir,struct dentry * entry,umode_t mode)979 static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
980 			    struct inode *dir, struct dentry *entry,
981 			    umode_t mode)
982 {
983 	struct fuse_entry_out outarg;
984 	struct inode *inode;
985 	struct dentry *d;
986 	int err;
987 	struct fuse_forget_link *forget;
988 
989 	if (fuse_is_bad(dir))
990 		return -EIO;
991 
992 	forget = fuse_alloc_forget();
993 	if (!forget)
994 		return -ENOMEM;
995 
996 	memset(&outarg, 0, sizeof(outarg));
997 	args->nodeid = get_node_id(dir);
998 	args->out_numargs = 1;
999 	args->out_args[0].size = sizeof(outarg);
1000 	args->out_args[0].value = &outarg;
1001 
1002 	if (args->opcode != FUSE_LINK) {
1003 		err = get_create_ext(args, dir, entry, mode);
1004 		if (err)
1005 			goto out_put_forget_req;
1006 	}
1007 
1008 	err = fuse_simple_request(fm, args);
1009 	free_ext_value(args);
1010 	if (err)
1011 		goto out_put_forget_req;
1012 
1013 	err = -EIO;
1014 	if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
1015 		goto out_put_forget_req;
1016 
1017 	if ((outarg.attr.mode ^ mode) & S_IFMT)
1018 		goto out_put_forget_req;
1019 
1020 	inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
1021 			  &outarg.attr, ATTR_TIMEOUT(&outarg), 0);
1022 	if (!inode) {
1023 		fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
1024 		return -ENOMEM;
1025 	}
1026 	kfree(forget);
1027 
1028 	d_drop(entry);
1029 	d = d_splice_alias(inode, entry);
1030 	if (IS_ERR(d))
1031 		return PTR_ERR(d);
1032 
1033 	if (d) {
1034 		fuse_change_entry_timeout(d, &outarg);
1035 		dput(d);
1036 	} else {
1037 		fuse_change_entry_timeout(entry, &outarg);
1038 	}
1039 	fuse_dir_changed(dir);
1040 	return 0;
1041 
1042  out_put_forget_req:
1043 	if (err == -EEXIST)
1044 		fuse_invalidate_entry(entry);
1045 	kfree(forget);
1046 	return err;
1047 }
1048 
fuse_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode,dev_t rdev)1049 static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
1050 		      struct dentry *entry, umode_t mode, dev_t rdev)
1051 {
1052 	struct fuse_mknod_in inarg;
1053 	struct fuse_mount *fm = get_fuse_mount(dir);
1054 	FUSE_ARGS(args);
1055 
1056 #ifdef CONFIG_FUSE_BPF
1057 	struct fuse_err_ret fer;
1058 
1059 	fer = fuse_bpf_backing(dir, struct fuse_mknod_in,
1060 			fuse_mknod_initialize, fuse_mknod_backing,
1061 			fuse_mknod_finalize,
1062 			dir, entry, mode, rdev);
1063 	if (fer.ret)
1064 		return PTR_ERR(fer.result);
1065 #endif
1066 
1067 	if (!fm->fc->dont_mask)
1068 		mode &= ~current_umask();
1069 
1070 	memset(&inarg, 0, sizeof(inarg));
1071 	inarg.mode = mode;
1072 	inarg.rdev = new_encode_dev(rdev);
1073 	inarg.umask = current_umask();
1074 	args.opcode = FUSE_MKNOD;
1075 	args.in_numargs = 2;
1076 	args.in_args[0].size = sizeof(inarg);
1077 	args.in_args[0].value = &inarg;
1078 	args.in_args[1].size = entry->d_name.len + 1;
1079 	args.in_args[1].value = entry->d_name.name;
1080 	return create_new_entry(fm, &args, dir, entry, mode);
1081 }
1082 
fuse_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode,bool excl)1083 static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
1084 		       struct dentry *entry, umode_t mode, bool excl)
1085 {
1086 	return fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
1087 }
1088 
fuse_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)1089 static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
1090 			struct file *file, umode_t mode)
1091 {
1092 	struct fuse_conn *fc = get_fuse_conn(dir);
1093 	int err;
1094 
1095 	if (fc->no_tmpfile)
1096 		return -EOPNOTSUPP;
1097 
1098 	err = fuse_create_open(dir, file->f_path.dentry, file, file->f_flags, mode, FUSE_TMPFILE);
1099 	if (err == -ENOSYS) {
1100 		fc->no_tmpfile = 1;
1101 		err = -EOPNOTSUPP;
1102 	}
1103 	return err;
1104 }
1105 
fuse_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode)1106 static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
1107 		      struct dentry *entry, umode_t mode)
1108 {
1109 	struct fuse_mkdir_in inarg;
1110 	struct fuse_mount *fm = get_fuse_mount(dir);
1111 	FUSE_ARGS(args);
1112 
1113 #ifdef CONFIG_FUSE_BPF
1114 	struct fuse_err_ret fer;
1115 
1116 	fer = fuse_bpf_backing(dir, struct fuse_mkdir_in,
1117 			fuse_mkdir_initialize, fuse_mkdir_backing,
1118 			fuse_mkdir_finalize,
1119 			dir, entry, mode);
1120 	if (fer.ret)
1121 		return PTR_ERR(fer.result);
1122 #endif
1123 
1124 	if (!fm->fc->dont_mask)
1125 		mode &= ~current_umask();
1126 
1127 	memset(&inarg, 0, sizeof(inarg));
1128 	inarg.mode = mode;
1129 	inarg.umask = current_umask();
1130 	args.opcode = FUSE_MKDIR;
1131 	args.in_numargs = 2;
1132 	args.in_args[0].size = sizeof(inarg);
1133 	args.in_args[0].value = &inarg;
1134 	args.in_args[1].size = entry->d_name.len + 1;
1135 	args.in_args[1].value = entry->d_name.name;
1136 	return create_new_entry(fm, &args, dir, entry, S_IFDIR);
1137 }
1138 
fuse_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,const char * link)1139 static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
1140 			struct dentry *entry, const char *link)
1141 {
1142 	struct fuse_mount *fm = get_fuse_mount(dir);
1143 	unsigned len = strlen(link) + 1;
1144 	FUSE_ARGS(args);
1145 
1146 #ifdef CONFIG_FUSE_BPF
1147 	struct fuse_err_ret fer;
1148 
1149 	fer = fuse_bpf_backing(dir, struct fuse_dummy_io,
1150 			fuse_symlink_initialize, fuse_symlink_backing,
1151 			fuse_symlink_finalize,
1152 			dir, entry, link, len);
1153 	if (fer.ret)
1154 		return PTR_ERR(fer.result);
1155 #endif
1156 
1157 	args.opcode = FUSE_SYMLINK;
1158 	args.in_numargs = 2;
1159 	args.in_args[0].size = entry->d_name.len + 1;
1160 	args.in_args[0].value = entry->d_name.name;
1161 	args.in_args[1].size = len;
1162 	args.in_args[1].value = link;
1163 	return create_new_entry(fm, &args, dir, entry, S_IFLNK);
1164 }
1165 
fuse_flush_time_update(struct inode * inode)1166 void fuse_flush_time_update(struct inode *inode)
1167 {
1168 	int err = sync_inode_metadata(inode, 1);
1169 
1170 	mapping_set_error(inode->i_mapping, err);
1171 }
1172 
fuse_update_ctime_in_cache(struct inode * inode)1173 static void fuse_update_ctime_in_cache(struct inode *inode)
1174 {
1175 	if (!IS_NOCMTIME(inode)) {
1176 		inode_set_ctime_current(inode);
1177 		mark_inode_dirty_sync(inode);
1178 		fuse_flush_time_update(inode);
1179 	}
1180 }
1181 
fuse_update_ctime(struct inode * inode)1182 void fuse_update_ctime(struct inode *inode)
1183 {
1184 	fuse_invalidate_attr_mask(inode, STATX_CTIME);
1185 	fuse_update_ctime_in_cache(inode);
1186 }
1187 
fuse_entry_unlinked(struct dentry * entry)1188 static void fuse_entry_unlinked(struct dentry *entry)
1189 {
1190 	struct inode *inode = d_inode(entry);
1191 	struct fuse_conn *fc = get_fuse_conn(inode);
1192 	struct fuse_inode *fi = get_fuse_inode(inode);
1193 
1194 	spin_lock(&fi->lock);
1195 	fi->attr_version = atomic64_inc_return(&fc->attr_version);
1196 	/*
1197 	 * If i_nlink == 0 then unlink doesn't make sense, yet this can
1198 	 * happen if userspace filesystem is careless.  It would be
1199 	 * difficult to enforce correct nlink usage so just ignore this
1200 	 * condition here
1201 	 */
1202 	if (S_ISDIR(inode->i_mode))
1203 		clear_nlink(inode);
1204 	else if (inode->i_nlink > 0)
1205 		drop_nlink(inode);
1206 	spin_unlock(&fi->lock);
1207 	fuse_invalidate_entry_cache(entry);
1208 	fuse_update_ctime(inode);
1209 }
1210 
fuse_unlink(struct inode * dir,struct dentry * entry)1211 static int fuse_unlink(struct inode *dir, struct dentry *entry)
1212 {
1213 	int err;
1214 	struct fuse_mount *fm = get_fuse_mount(dir);
1215 	FUSE_ARGS(args);
1216 
1217 	if (fuse_is_bad(dir))
1218 		return -EIO;
1219 
1220 #ifdef CONFIG_FUSE_BPF
1221 	{
1222 		struct fuse_err_ret fer;
1223 
1224 		fer = fuse_bpf_backing(dir, struct fuse_dummy_io,
1225 					fuse_unlink_initialize,
1226 					fuse_unlink_backing,
1227 					fuse_unlink_finalize,
1228 					dir, entry);
1229 		if (fer.ret)
1230 			return PTR_ERR(fer.result);
1231 	}
1232 #endif
1233 
1234 	args.opcode = FUSE_UNLINK;
1235 	args.nodeid = get_node_id(dir);
1236 	args.in_numargs = 1;
1237 	args.in_args[0].size = entry->d_name.len + 1;
1238 	args.in_args[0].value = entry->d_name.name;
1239 	err = fuse_simple_request(fm, &args);
1240 	if (!err) {
1241 		fuse_dir_changed(dir);
1242 		fuse_entry_unlinked(entry);
1243 	} else if (err == -EINTR || err == -ENOENT)
1244 		fuse_invalidate_entry(entry);
1245 	return err;
1246 }
1247 
fuse_rmdir(struct inode * dir,struct dentry * entry)1248 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
1249 {
1250 	int err;
1251 	struct fuse_mount *fm = get_fuse_mount(dir);
1252 	FUSE_ARGS(args);
1253 
1254 	if (fuse_is_bad(dir))
1255 		return -EIO;
1256 
1257 #ifdef CONFIG_FUSE_BPF
1258 	{
1259 		struct fuse_err_ret fer;
1260 
1261 		fer = fuse_bpf_backing(dir, struct fuse_dummy_io,
1262 					fuse_rmdir_initialize,
1263 					fuse_rmdir_backing,
1264 					fuse_rmdir_finalize,
1265 					dir, entry);
1266 		if (fer.ret)
1267 			return PTR_ERR(fer.result);
1268 	}
1269 #endif
1270 
1271 	args.opcode = FUSE_RMDIR;
1272 	args.nodeid = get_node_id(dir);
1273 	args.in_numargs = 1;
1274 	args.in_args[0].size = entry->d_name.len + 1;
1275 	args.in_args[0].value = entry->d_name.name;
1276 	err = fuse_simple_request(fm, &args);
1277 	if (!err) {
1278 		fuse_dir_changed(dir);
1279 		fuse_entry_unlinked(entry);
1280 	} else if (err == -EINTR || err == -ENOENT)
1281 		fuse_invalidate_entry(entry);
1282 	return err;
1283 }
1284 
fuse_rename_common(struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags,int opcode,size_t argsize)1285 static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
1286 			      struct inode *newdir, struct dentry *newent,
1287 			      unsigned int flags, int opcode, size_t argsize)
1288 {
1289 	int err;
1290 	struct fuse_rename2_in inarg;
1291 	struct fuse_mount *fm = get_fuse_mount(olddir);
1292 	FUSE_ARGS(args);
1293 
1294 	memset(&inarg, 0, argsize);
1295 	inarg.newdir = get_node_id(newdir);
1296 	inarg.flags = flags;
1297 	args.opcode = opcode;
1298 	args.nodeid = get_node_id(olddir);
1299 	args.in_numargs = 3;
1300 	args.in_args[0].size = argsize;
1301 	args.in_args[0].value = &inarg;
1302 	args.in_args[1].size = oldent->d_name.len + 1;
1303 	args.in_args[1].value = oldent->d_name.name;
1304 	args.in_args[2].size = newent->d_name.len + 1;
1305 	args.in_args[2].value = newent->d_name.name;
1306 	err = fuse_simple_request(fm, &args);
1307 	if (!err) {
1308 		/* ctime changes */
1309 		fuse_update_ctime(d_inode(oldent));
1310 
1311 		if (flags & RENAME_EXCHANGE)
1312 			fuse_update_ctime(d_inode(newent));
1313 
1314 		fuse_dir_changed(olddir);
1315 		if (olddir != newdir)
1316 			fuse_dir_changed(newdir);
1317 
1318 		/* newent will end up negative */
1319 		if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent))
1320 			fuse_entry_unlinked(newent);
1321 	} else if (err == -EINTR || err == -ENOENT) {
1322 		/* If request was interrupted, DEITY only knows if the
1323 		   rename actually took place.  If the invalidation
1324 		   fails (e.g. some process has CWD under the renamed
1325 		   directory), then there can be inconsistency between
1326 		   the dcache and the real filesystem.  Tough luck. */
1327 		fuse_invalidate_entry(oldent);
1328 		if (d_really_is_positive(newent))
1329 			fuse_invalidate_entry(newent);
1330 	}
1331 
1332 	return err;
1333 }
1334 
fuse_rename2(struct mnt_idmap * idmap,struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags)1335 static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
1336 			struct dentry *oldent, struct inode *newdir,
1337 			struct dentry *newent, unsigned int flags)
1338 {
1339 	struct fuse_conn *fc = get_fuse_conn(olddir);
1340 	int err;
1341 
1342 	if (fuse_is_bad(olddir))
1343 		return -EIO;
1344 
1345 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
1346 		return -EINVAL;
1347 
1348 	if (flags) {
1349 #ifdef CONFIG_FUSE_BPF
1350 		struct fuse_err_ret fer;
1351 
1352 		fer = fuse_bpf_backing(olddir, struct fuse_rename2_in,
1353 						fuse_rename2_initialize, fuse_rename2_backing,
1354 						fuse_rename2_finalize,
1355 						olddir, oldent, newdir, newent, flags);
1356 		if (fer.ret)
1357 			return PTR_ERR(fer.result);
1358 #endif
1359 
1360 		/* TODO: how should this go with bpfs involved? */
1361 		if (fc->no_rename2 || fc->minor < 23)
1362 			return -EINVAL;
1363 
1364 		err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
1365 					 FUSE_RENAME2,
1366 					 sizeof(struct fuse_rename2_in));
1367 		if (err == -ENOSYS) {
1368 			fc->no_rename2 = 1;
1369 			err = -EINVAL;
1370 		}
1371 	} else {
1372 #ifdef CONFIG_FUSE_BPF
1373 		struct fuse_err_ret fer;
1374 
1375 		fer = fuse_bpf_backing(olddir, struct fuse_rename_in,
1376 						fuse_rename_initialize, fuse_rename_backing,
1377 						fuse_rename_finalize,
1378 						olddir, oldent, newdir, newent);
1379 		if (fer.ret)
1380 			return PTR_ERR(fer.result);
1381 #endif
1382 
1383 		err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
1384 					 FUSE_RENAME,
1385 					 sizeof(struct fuse_rename_in));
1386 	}
1387 
1388 	return err;
1389 }
1390 
fuse_link(struct dentry * entry,struct inode * newdir,struct dentry * newent)1391 static int fuse_link(struct dentry *entry, struct inode *newdir,
1392 		     struct dentry *newent)
1393 {
1394 	int err;
1395 	struct fuse_link_in inarg;
1396 	struct inode *inode = d_inode(entry);
1397 	struct fuse_mount *fm = get_fuse_mount(inode);
1398 	FUSE_ARGS(args);
1399 
1400 #ifdef CONFIG_FUSE_BPF
1401 	struct fuse_err_ret fer;
1402 
1403 	fer = fuse_bpf_backing(inode, struct fuse_link_in, fuse_link_initialize,
1404 			       fuse_link_backing, fuse_link_finalize, entry,
1405 			       newdir, newent);
1406 	if (fer.ret)
1407 		return PTR_ERR(fer.result);
1408 #endif
1409 
1410 	memset(&inarg, 0, sizeof(inarg));
1411 	inarg.oldnodeid = get_node_id(inode);
1412 	args.opcode = FUSE_LINK;
1413 	args.in_numargs = 2;
1414 	args.in_args[0].size = sizeof(inarg);
1415 	args.in_args[0].value = &inarg;
1416 	args.in_args[1].size = newent->d_name.len + 1;
1417 	args.in_args[1].value = newent->d_name.name;
1418 	err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
1419 	if (!err)
1420 		fuse_update_ctime_in_cache(inode);
1421 	else if (err == -EINTR)
1422 		fuse_invalidate_attr(inode);
1423 
1424 	return err;
1425 }
1426 
fuse_fillattr(struct inode * inode,struct fuse_attr * attr,struct kstat * stat)1427 void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
1428 			  struct kstat *stat)
1429 {
1430 	unsigned int blkbits;
1431 	struct fuse_conn *fc = get_fuse_conn(inode);
1432 
1433 	stat->dev = inode->i_sb->s_dev;
1434 	stat->ino = attr->ino;
1435 	stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
1436 	stat->nlink = attr->nlink;
1437 	stat->uid = make_kuid(fc->user_ns, attr->uid);
1438 	stat->gid = make_kgid(fc->user_ns, attr->gid);
1439 	stat->rdev = inode->i_rdev;
1440 	stat->atime.tv_sec = attr->atime;
1441 	stat->atime.tv_nsec = attr->atimensec;
1442 	stat->mtime.tv_sec = attr->mtime;
1443 	stat->mtime.tv_nsec = attr->mtimensec;
1444 	stat->ctime.tv_sec = attr->ctime;
1445 	stat->ctime.tv_nsec = attr->ctimensec;
1446 	stat->size = attr->size;
1447 	stat->blocks = attr->blocks;
1448 
1449 	if (attr->blksize != 0)
1450 		blkbits = ilog2(attr->blksize);
1451 	else
1452 		blkbits = inode->i_sb->s_blocksize_bits;
1453 
1454 	stat->blksize = 1 << blkbits;
1455 }
1456 
fuse_statx_to_attr(struct fuse_statx * sx,struct fuse_attr * attr)1457 static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
1458 {
1459 	memset(attr, 0, sizeof(*attr));
1460 	attr->ino = sx->ino;
1461 	attr->size = sx->size;
1462 	attr->blocks = sx->blocks;
1463 	attr->atime = sx->atime.tv_sec;
1464 	attr->mtime = sx->mtime.tv_sec;
1465 	attr->ctime = sx->ctime.tv_sec;
1466 	attr->atimensec = sx->atime.tv_nsec;
1467 	attr->mtimensec = sx->mtime.tv_nsec;
1468 	attr->ctimensec = sx->ctime.tv_nsec;
1469 	attr->mode = sx->mode;
1470 	attr->nlink = sx->nlink;
1471 	attr->uid = sx->uid;
1472 	attr->gid = sx->gid;
1473 	attr->rdev = new_encode_dev(MKDEV(sx->rdev_major, sx->rdev_minor));
1474 	attr->blksize = sx->blksize;
1475 }
1476 
fuse_do_statx(struct inode * inode,struct file * file,struct kstat * stat)1477 static int fuse_do_statx(struct inode *inode, struct file *file,
1478 			 struct kstat *stat)
1479 {
1480 	int err;
1481 	struct fuse_attr attr;
1482 	struct fuse_statx *sx;
1483 	struct fuse_statx_in inarg;
1484 	struct fuse_statx_out outarg;
1485 	struct fuse_mount *fm = get_fuse_mount(inode);
1486 	u64 attr_version = fuse_get_attr_version(fm->fc);
1487 	FUSE_ARGS(args);
1488 
1489 	memset(&inarg, 0, sizeof(inarg));
1490 	memset(&outarg, 0, sizeof(outarg));
1491 	/* Directories have separate file-handle space */
1492 	if (file && S_ISREG(inode->i_mode)) {
1493 		struct fuse_file *ff = file->private_data;
1494 
1495 		inarg.getattr_flags |= FUSE_GETATTR_FH;
1496 		inarg.fh = ff->fh;
1497 	}
1498 	/* For now leave sync hints as the default, request all stats. */
1499 	inarg.sx_flags = 0;
1500 	inarg.sx_mask = STATX_BASIC_STATS | STATX_BTIME;
1501 	args.opcode = FUSE_STATX;
1502 	args.nodeid = get_node_id(inode);
1503 	args.in_numargs = 1;
1504 	args.in_args[0].size = sizeof(inarg);
1505 	args.in_args[0].value = &inarg;
1506 	args.out_numargs = 1;
1507 	args.out_args[0].size = sizeof(outarg);
1508 	args.out_args[0].value = &outarg;
1509 	err = fuse_simple_request(fm, &args);
1510 	if (err)
1511 		return err;
1512 
1513 	sx = &outarg.stat;
1514 	if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
1515 	    ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
1516 					 inode_wrong_type(inode, sx->mode)))) {
1517 		fuse_make_bad(inode);
1518 		return -EIO;
1519 	}
1520 
1521 	fuse_statx_to_attr(&outarg.stat, &attr);
1522 	if ((sx->mask & STATX_BASIC_STATS) == STATX_BASIC_STATS) {
1523 		fuse_change_attributes(inode, &attr, &outarg.stat,
1524 				       ATTR_TIMEOUT(&outarg), attr_version);
1525 	}
1526 
1527 	if (stat) {
1528 		stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
1529 		stat->btime.tv_sec = sx->btime.tv_sec;
1530 		stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
1531 		fuse_fillattr(inode, &attr, stat);
1532 		stat->result_mask |= STATX_TYPE;
1533 	}
1534 
1535 	return 0;
1536 }
1537 
fuse_do_getattr(struct inode * inode,struct kstat * stat,struct file * file)1538 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
1539 			   struct file *file)
1540 {
1541 	int err;
1542 	struct fuse_getattr_in inarg;
1543 	struct fuse_attr_out outarg;
1544 	struct fuse_mount *fm = get_fuse_mount(inode);
1545 	FUSE_ARGS(args);
1546 	u64 attr_version;
1547 
1548 	attr_version = fuse_get_attr_version(fm->fc);
1549 
1550 	memset(&inarg, 0, sizeof(inarg));
1551 	memset(&outarg, 0, sizeof(outarg));
1552 	/* Directories have separate file-handle space */
1553 	if (file && S_ISREG(inode->i_mode)) {
1554 		struct fuse_file *ff = file->private_data;
1555 
1556 		inarg.getattr_flags |= FUSE_GETATTR_FH;
1557 		inarg.fh = ff->fh;
1558 	}
1559 	args.opcode = FUSE_GETATTR;
1560 	args.nodeid = get_node_id(inode);
1561 	args.in_numargs = 1;
1562 	args.in_args[0].size = sizeof(inarg);
1563 	args.in_args[0].value = &inarg;
1564 	args.out_numargs = 1;
1565 	args.out_args[0].size = sizeof(outarg);
1566 	args.out_args[0].value = &outarg;
1567 	err = fuse_simple_request(fm, &args);
1568 	if (!err)
1569 		err = finalize_attr(inode, &outarg, attr_version, stat);
1570 	return err;
1571 }
1572 
fuse_update_get_attr(struct inode * inode,struct file * file,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)1573 static int fuse_update_get_attr(struct inode *inode, struct file *file,
1574 				const struct path *path,
1575 				struct kstat *stat, u32 request_mask,
1576 				unsigned int flags)
1577 {
1578 	struct fuse_inode *fi = get_fuse_inode(inode);
1579 	struct fuse_conn *fc = get_fuse_conn(inode);
1580 	int err = 0;
1581 	bool sync;
1582 	u32 inval_mask = READ_ONCE(fi->inval_mask);
1583 	u32 cache_mask = fuse_get_cache_mask(inode);
1584 
1585 #ifdef CONFIG_FUSE_BPF
1586 	struct fuse_err_ret fer;
1587 
1588 	fer = fuse_bpf_backing(inode, struct fuse_getattr_io,
1589 			       fuse_getattr_initialize,	fuse_getattr_backing,
1590 			       fuse_getattr_finalize,
1591 			       path->dentry, stat, request_mask, flags);
1592 	if (fer.ret)
1593 		return PTR_ERR(fer.result);
1594 #endif
1595 
1596 	/* FUSE only supports basic stats and possibly btime */
1597 	request_mask &= STATX_BASIC_STATS | STATX_BTIME;
1598 retry:
1599 	if (fc->no_statx)
1600 		request_mask &= STATX_BASIC_STATS;
1601 
1602 	if (!request_mask)
1603 		sync = false;
1604 	else if (flags & AT_STATX_FORCE_SYNC)
1605 		sync = true;
1606 	else if (flags & AT_STATX_DONT_SYNC)
1607 		sync = false;
1608 	else if (request_mask & inval_mask & ~cache_mask)
1609 		sync = true;
1610 	else
1611 		sync = time_before64(fi->i_time, get_jiffies_64());
1612 
1613 	if (sync) {
1614 		forget_all_cached_acls(inode);
1615 		/* Try statx if BTIME is requested */
1616 		if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
1617 			err = fuse_do_statx(inode, file, stat);
1618 			if (err == -ENOSYS) {
1619 				fc->no_statx = 1;
1620 				err = 0;
1621 				goto retry;
1622 			}
1623 		} else {
1624 			err = fuse_do_getattr(inode, stat, file);
1625 		}
1626 	} else if (stat) {
1627 		generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
1628 		stat->mode = fi->orig_i_mode;
1629 		stat->ino = fi->orig_ino;
1630 		if (test_bit(FUSE_I_BTIME, &fi->state)) {
1631 			stat->btime = fi->i_btime;
1632 			stat->result_mask |= STATX_BTIME;
1633 		}
1634 	}
1635 
1636 	return err;
1637 }
1638 
fuse_update_attributes(struct inode * inode,struct file * file,u32 mask)1639 int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
1640 {
1641 	/* Do *not* need to get atime for internal purposes */
1642 	return fuse_update_get_attr(inode, file, &file->f_path, NULL,
1643 				    mask & ~STATX_ATIME, 0);
1644 }
1645 
fuse_reverse_inval_entry(struct fuse_conn * fc,u64 parent_nodeid,u64 child_nodeid,struct qstr * name,u32 flags)1646 int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
1647 			     u64 child_nodeid, struct qstr *name, u32 flags)
1648 {
1649 	int err = -ENOTDIR;
1650 	struct inode *parent;
1651 	struct dentry *dir;
1652 	struct dentry *entry;
1653 
1654 	parent = fuse_ilookup(fc, parent_nodeid, NULL);
1655 	if (!parent)
1656 		return -ENOENT;
1657 
1658 	inode_lock_nested(parent, I_MUTEX_PARENT);
1659 	if (!S_ISDIR(parent->i_mode))
1660 		goto unlock;
1661 
1662 	err = -ENOENT;
1663 	dir = d_find_alias(parent);
1664 	if (!dir)
1665 		goto unlock;
1666 
1667 	name->hash = full_name_hash(dir, name->name, name->len);
1668 	entry = d_lookup(dir, name);
1669 	dput(dir);
1670 	if (!entry)
1671 		goto unlock;
1672 
1673 	fuse_dir_changed(parent);
1674 	if (!(flags & FUSE_EXPIRE_ONLY))
1675 		d_invalidate(entry);
1676 	fuse_invalidate_entry_cache(entry);
1677 
1678 	if (child_nodeid != 0 && d_really_is_positive(entry)) {
1679 		inode_lock(d_inode(entry));
1680 		if (get_node_id(d_inode(entry)) != child_nodeid) {
1681 			err = -ENOENT;
1682 			goto badentry;
1683 		}
1684 		if (d_mountpoint(entry)) {
1685 			err = -EBUSY;
1686 			goto badentry;
1687 		}
1688 		if (d_is_dir(entry)) {
1689 			shrink_dcache_parent(entry);
1690 			if (!simple_empty(entry)) {
1691 				err = -ENOTEMPTY;
1692 				goto badentry;
1693 			}
1694 			d_inode(entry)->i_flags |= S_DEAD;
1695 		}
1696 		dont_mount(entry);
1697 		clear_nlink(d_inode(entry));
1698 		err = 0;
1699  badentry:
1700 		inode_unlock(d_inode(entry));
1701 		if (!err)
1702 			d_delete(entry);
1703 	} else {
1704 		err = 0;
1705 	}
1706 	dput(entry);
1707 
1708  unlock:
1709 	inode_unlock(parent);
1710 	iput(parent);
1711 	return err;
1712 }
1713 
fuse_permissible_uidgid(struct fuse_conn * fc)1714 static inline bool fuse_permissible_uidgid(struct fuse_conn *fc)
1715 {
1716 	const struct cred *cred = current_cred();
1717 
1718 	return (uid_eq(cred->euid, fc->user_id) &&
1719 		uid_eq(cred->suid, fc->user_id) &&
1720 		uid_eq(cred->uid,  fc->user_id) &&
1721 		gid_eq(cred->egid, fc->group_id) &&
1722 		gid_eq(cred->sgid, fc->group_id) &&
1723 		gid_eq(cred->gid,  fc->group_id));
1724 }
1725 
1726 /*
1727  * Calling into a user-controlled filesystem gives the filesystem
1728  * daemon ptrace-like capabilities over the current process.  This
1729  * means, that the filesystem daemon is able to record the exact
1730  * filesystem operations performed, and can also control the behavior
1731  * of the requester process in otherwise impossible ways.  For example
1732  * it can delay the operation for arbitrary length of time allowing
1733  * DoS against the requester.
1734  *
1735  * For this reason only those processes can call into the filesystem,
1736  * for which the owner of the mount has ptrace privilege.  This
1737  * excludes processes started by other users, suid or sgid processes.
1738  */
fuse_allow_current_process(struct fuse_conn * fc)1739 bool fuse_allow_current_process(struct fuse_conn *fc)
1740 {
1741 	bool allow;
1742 
1743 	if (fc->allow_other)
1744 		allow = current_in_userns(fc->user_ns);
1745 	else
1746 		allow = fuse_permissible_uidgid(fc);
1747 
1748 	if (!allow && allow_sys_admin_access && capable(CAP_SYS_ADMIN))
1749 		allow = true;
1750 
1751 	return allow;
1752 }
1753 
fuse_access(struct inode * inode,int mask)1754 static int fuse_access(struct inode *inode, int mask)
1755 {
1756 	struct fuse_mount *fm = get_fuse_mount(inode);
1757 	FUSE_ARGS(args);
1758 	struct fuse_access_in inarg;
1759 	int err;
1760 
1761 #ifdef CONFIG_FUSE_BPF
1762 	struct fuse_err_ret fer;
1763 
1764 	fer = fuse_bpf_backing(inode, struct fuse_access_in,
1765 			       fuse_access_initialize, fuse_access_backing,
1766 			       fuse_access_finalize, inode, mask);
1767 	if (fer.ret)
1768 		return PTR_ERR(fer.result);
1769 #endif
1770 
1771 	BUG_ON(mask & MAY_NOT_BLOCK);
1772 
1773 	if (fm->fc->no_access)
1774 		return 0;
1775 
1776 	memset(&inarg, 0, sizeof(inarg));
1777 	inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
1778 	args.opcode = FUSE_ACCESS;
1779 	args.nodeid = get_node_id(inode);
1780 	args.in_numargs = 1;
1781 	args.in_args[0].size = sizeof(inarg);
1782 	args.in_args[0].value = &inarg;
1783 	err = fuse_simple_request(fm, &args);
1784 	if (err == -ENOSYS) {
1785 		fm->fc->no_access = 1;
1786 		err = 0;
1787 	}
1788 	return err;
1789 }
1790 
fuse_perm_getattr(struct inode * inode,int mask)1791 static int fuse_perm_getattr(struct inode *inode, int mask)
1792 {
1793 	if (mask & MAY_NOT_BLOCK)
1794 		return -ECHILD;
1795 
1796 	forget_all_cached_acls(inode);
1797 	return fuse_do_getattr(inode, NULL, NULL);
1798 }
1799 
1800 /*
1801  * Check permission.  The two basic access models of FUSE are:
1802  *
1803  * 1) Local access checking ('default_permissions' mount option) based
1804  * on file mode.  This is the plain old disk filesystem permission
1805  * modell.
1806  *
1807  * 2) "Remote" access checking, where server is responsible for
1808  * checking permission in each inode operation.  An exception to this
1809  * is if ->permission() was invoked from sys_access() in which case an
1810  * access request is sent.  Execute permission is still checked
1811  * locally based on file mode.
1812  */
fuse_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)1813 static int fuse_permission(struct mnt_idmap *idmap,
1814 			   struct inode *inode, int mask)
1815 {
1816 	struct fuse_conn *fc = get_fuse_conn(inode);
1817 	bool refreshed = false;
1818 	int err = 0;
1819 	struct fuse_inode *fi = get_fuse_inode(inode);
1820 #ifdef CONFIG_FUSE_BPF
1821 	struct fuse_err_ret fer;
1822 #endif
1823 
1824 	if (fuse_is_bad(inode))
1825 		return -EIO;
1826 
1827 	if (!fuse_allow_current_process(fc))
1828 		return -EACCES;
1829 
1830 #ifdef CONFIG_FUSE_BPF
1831 	fer = fuse_bpf_backing(inode, struct fuse_access_in,
1832 			       fuse_access_initialize, fuse_access_backing,
1833 			       fuse_access_finalize, inode, mask);
1834 	if (fer.ret)
1835 		return PTR_ERR(fer.result);
1836 #endif
1837 
1838 	/*
1839 	 * If attributes are needed, refresh them before proceeding
1840 	 */
1841 	if (fc->default_permissions ||
1842 	    ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1843 		u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
1844 
1845 		if (perm_mask & READ_ONCE(fi->inval_mask) ||
1846 		    time_before64(fi->i_time, get_jiffies_64())) {
1847 			refreshed = true;
1848 
1849 			err = fuse_perm_getattr(inode, mask);
1850 			if (err)
1851 				return err;
1852 		}
1853 	}
1854 
1855 	if (fc->default_permissions) {
1856 		err = generic_permission(&nop_mnt_idmap, inode, mask);
1857 
1858 		/* If permission is denied, try to refresh file
1859 		   attributes.  This is also needed, because the root
1860 		   node will at first have no permissions */
1861 		if (err == -EACCES && !refreshed) {
1862 			err = fuse_perm_getattr(inode, mask);
1863 			if (!err)
1864 				err = generic_permission(&nop_mnt_idmap,
1865 							 inode, mask);
1866 		}
1867 
1868 		/* Note: the opposite of the above test does not
1869 		   exist.  So if permissions are revoked this won't be
1870 		   noticed immediately, only after the attribute
1871 		   timeout has expired */
1872 	} else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1873 		err = fuse_access(inode, mask);
1874 	} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1875 		if (!(inode->i_mode & S_IXUGO)) {
1876 			if (refreshed)
1877 				return -EACCES;
1878 
1879 			err = fuse_perm_getattr(inode, mask);
1880 			if (!err && !(inode->i_mode & S_IXUGO))
1881 				return -EACCES;
1882 		}
1883 	}
1884 	return err;
1885 }
1886 
fuse_readlink_page(struct inode * inode,struct page * page)1887 static int fuse_readlink_page(struct inode *inode, struct page *page)
1888 {
1889 	struct fuse_mount *fm = get_fuse_mount(inode);
1890 	struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
1891 	struct fuse_args_pages ap = {
1892 		.num_pages = 1,
1893 		.pages = &page,
1894 		.descs = &desc,
1895 	};
1896 	char *link;
1897 	ssize_t res;
1898 
1899 	ap.args.opcode = FUSE_READLINK;
1900 	ap.args.nodeid = get_node_id(inode);
1901 	ap.args.out_pages = true;
1902 	ap.args.out_argvar = true;
1903 	ap.args.page_zeroing = true;
1904 	ap.args.out_numargs = 1;
1905 	ap.args.out_args[0].size = desc.length;
1906 	res = fuse_simple_request(fm, &ap.args);
1907 
1908 	fuse_invalidate_atime(inode);
1909 
1910 	if (res < 0)
1911 		return res;
1912 
1913 	if (WARN_ON(res >= PAGE_SIZE))
1914 		return -EIO;
1915 
1916 	link = page_address(page);
1917 	link[res] = '\0';
1918 
1919 	return 0;
1920 }
1921 
fuse_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * callback)1922 static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
1923 				 struct delayed_call *callback)
1924 {
1925 	struct fuse_conn *fc = get_fuse_conn(inode);
1926 	struct page *page;
1927 	int err;
1928 
1929 	err = -EIO;
1930 	if (fuse_is_bad(inode))
1931 		goto out_err;
1932 
1933 #ifdef CONFIG_FUSE_BPF
1934 	{
1935 		struct fuse_err_ret fer;
1936 		const char *out = NULL;
1937 
1938 		fer = fuse_bpf_backing(inode, struct fuse_dummy_io,
1939 				       fuse_get_link_initialize,
1940 				       fuse_get_link_backing,
1941 				       fuse_get_link_finalize,
1942 				       inode, dentry, callback, &out);
1943 		if (fer.ret)
1944 			return fer.result ?: out;
1945 	}
1946 #endif
1947 
1948 	if (fc->cache_symlinks)
1949 		return page_get_link(dentry, inode, callback);
1950 
1951 	err = -ECHILD;
1952 	if (!dentry)
1953 		goto out_err;
1954 
1955 	page = alloc_page(GFP_KERNEL);
1956 	err = -ENOMEM;
1957 	if (!page)
1958 		goto out_err;
1959 
1960 	err = fuse_readlink_page(inode, page);
1961 	if (err) {
1962 		__free_page(page);
1963 		goto out_err;
1964 	}
1965 
1966 	set_delayed_call(callback, page_put_link, page);
1967 
1968 	return page_address(page);
1969 
1970 out_err:
1971 	return ERR_PTR(err);
1972 }
1973 
fuse_dir_open(struct inode * inode,struct file * file)1974 static int fuse_dir_open(struct inode *inode, struct file *file)
1975 {
1976 	return fuse_open_common(inode, file, true);
1977 }
1978 
fuse_dir_release(struct inode * inode,struct file * file)1979 static int fuse_dir_release(struct inode *inode, struct file *file)
1980 {
1981 	fuse_release_common(file, true);
1982 	return 0;
1983 }
1984 
fuse_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1985 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1986 			  int datasync)
1987 {
1988 	struct inode *inode = file->f_mapping->host;
1989 	struct fuse_conn *fc = get_fuse_conn(inode);
1990 	int err;
1991 
1992 	if (fuse_is_bad(inode))
1993 		return -EIO;
1994 
1995 #ifdef CONFIG_FUSE_BPF
1996 	{
1997 		struct fuse_err_ret fer;
1998 
1999 		fer = fuse_bpf_backing(inode, struct fuse_fsync_in,
2000 				fuse_dir_fsync_initialize, fuse_fsync_backing,
2001 				fuse_fsync_finalize,
2002 				file, start, end, datasync);
2003 		if (fer.ret)
2004 			return PTR_ERR(fer.result);
2005 	}
2006 #endif
2007 
2008 	if (fc->no_fsyncdir)
2009 		return 0;
2010 
2011 	inode_lock(inode);
2012 	err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
2013 	if (err == -ENOSYS) {
2014 		fc->no_fsyncdir = 1;
2015 		err = 0;
2016 	}
2017 	inode_unlock(inode);
2018 
2019 	return err;
2020 }
2021 
fuse_dir_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2022 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
2023 			    unsigned long arg)
2024 {
2025 	struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
2026 
2027 	/* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
2028 	if (fc->minor < 18)
2029 		return -ENOTTY;
2030 
2031 	return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
2032 }
2033 
fuse_dir_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2034 static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
2035 				   unsigned long arg)
2036 {
2037 	struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
2038 
2039 	if (fc->minor < 18)
2040 		return -ENOTTY;
2041 
2042 	return fuse_ioctl_common(file, cmd, arg,
2043 				 FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
2044 }
2045 
2046 /*
2047  * Prevent concurrent writepages on inode
2048  *
2049  * This is done by adding a negative bias to the inode write counter
2050  * and waiting for all pending writes to finish.
2051  */
fuse_set_nowrite(struct inode * inode)2052 void fuse_set_nowrite(struct inode *inode)
2053 {
2054 	struct fuse_inode *fi = get_fuse_inode(inode);
2055 
2056 	BUG_ON(!inode_is_locked(inode));
2057 
2058 	spin_lock(&fi->lock);
2059 	BUG_ON(fi->writectr < 0);
2060 	fi->writectr += FUSE_NOWRITE;
2061 	spin_unlock(&fi->lock);
2062 	wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
2063 }
2064 
2065 /*
2066  * Allow writepages on inode
2067  *
2068  * Remove the bias from the writecounter and send any queued
2069  * writepages.
2070  */
__fuse_release_nowrite(struct inode * inode)2071 static void __fuse_release_nowrite(struct inode *inode)
2072 {
2073 	struct fuse_inode *fi = get_fuse_inode(inode);
2074 
2075 	BUG_ON(fi->writectr != FUSE_NOWRITE);
2076 	fi->writectr = 0;
2077 	fuse_flush_writepages(inode);
2078 }
2079 
fuse_release_nowrite(struct inode * inode)2080 void fuse_release_nowrite(struct inode *inode)
2081 {
2082 	struct fuse_inode *fi = get_fuse_inode(inode);
2083 
2084 	spin_lock(&fi->lock);
2085 	__fuse_release_nowrite(inode);
2086 	spin_unlock(&fi->lock);
2087 }
2088 
fuse_setattr_fill(struct fuse_conn * fc,struct fuse_args * args,struct inode * inode,struct fuse_setattr_in * inarg_p,struct fuse_attr_out * outarg_p)2089 static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
2090 			      struct inode *inode,
2091 			      struct fuse_setattr_in *inarg_p,
2092 			      struct fuse_attr_out *outarg_p)
2093 {
2094 	args->opcode = FUSE_SETATTR;
2095 	args->nodeid = get_node_id(inode);
2096 	args->in_numargs = 1;
2097 	args->in_args[0].size = sizeof(*inarg_p);
2098 	args->in_args[0].value = inarg_p;
2099 	args->out_numargs = 1;
2100 	args->out_args[0].size = sizeof(*outarg_p);
2101 	args->out_args[0].value = outarg_p;
2102 }
2103 
2104 /*
2105  * Flush inode->i_mtime to the server
2106  */
fuse_flush_times(struct inode * inode,struct fuse_file * ff)2107 int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
2108 {
2109 	struct fuse_mount *fm = get_fuse_mount(inode);
2110 	FUSE_ARGS(args);
2111 	struct fuse_setattr_in inarg;
2112 	struct fuse_attr_out outarg;
2113 
2114 	memset(&inarg, 0, sizeof(inarg));
2115 	memset(&outarg, 0, sizeof(outarg));
2116 
2117 	inarg.valid = FATTR_MTIME;
2118 	inarg.mtime = inode->i_mtime.tv_sec;
2119 	inarg.mtimensec = inode->i_mtime.tv_nsec;
2120 	if (fm->fc->minor >= 23) {
2121 		inarg.valid |= FATTR_CTIME;
2122 		inarg.ctime = inode_get_ctime(inode).tv_sec;
2123 		inarg.ctimensec = inode_get_ctime(inode).tv_nsec;
2124 	}
2125 	if (ff) {
2126 		inarg.valid |= FATTR_FH;
2127 		inarg.fh = ff->fh;
2128 	}
2129 	fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg);
2130 
2131 	return fuse_simple_request(fm, &args);
2132 }
2133 
2134 /*
2135  * Set attributes, and at the same time refresh them.
2136  *
2137  * Truncation is slightly complicated, because the 'truncate' request
2138  * may fail, in which case we don't want to touch the mapping.
2139  * vmtruncate() doesn't allow for this case, so do the rlimit checking
2140  * and the actual truncation by hand.
2141  */
fuse_do_setattr(struct dentry * dentry,struct iattr * attr,struct file * file)2142 int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
2143 		    struct file *file)
2144 {
2145 	struct inode *inode = d_inode(dentry);
2146 	struct fuse_mount *fm = get_fuse_mount(inode);
2147 	struct fuse_conn *fc = fm->fc;
2148 	struct fuse_inode *fi = get_fuse_inode(inode);
2149 	struct address_space *mapping = inode->i_mapping;
2150 	FUSE_ARGS(args);
2151 	struct fuse_setattr_in inarg;
2152 	struct fuse_attr_out outarg;
2153 	bool is_truncate = false;
2154 	bool is_wb = fc->writeback_cache && S_ISREG(inode->i_mode);
2155 	loff_t oldsize;
2156 	int err;
2157 	bool trust_local_cmtime = is_wb;
2158 	bool fault_blocked = false;
2159 
2160 #ifdef CONFIG_FUSE_BPF
2161 	struct fuse_err_ret fer;
2162 
2163 	fer = fuse_bpf_backing(inode, struct fuse_setattr_io,
2164 			       fuse_setattr_initialize, fuse_setattr_backing,
2165 			       fuse_setattr_finalize, dentry, attr, file);
2166 	if (fer.ret)
2167 		return PTR_ERR(fer.result);
2168 #endif
2169 
2170 	if (!fc->default_permissions)
2171 		attr->ia_valid |= ATTR_FORCE;
2172 
2173 	err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
2174 	if (err)
2175 		return err;
2176 
2177 	if (attr->ia_valid & ATTR_SIZE) {
2178 		if (WARN_ON(!S_ISREG(inode->i_mode)))
2179 			return -EIO;
2180 		is_truncate = true;
2181 	}
2182 
2183 	if (FUSE_IS_DAX(inode) && is_truncate) {
2184 		filemap_invalidate_lock(mapping);
2185 		fault_blocked = true;
2186 		err = fuse_dax_break_layouts(inode, 0, 0);
2187 		if (err) {
2188 			filemap_invalidate_unlock(mapping);
2189 			return err;
2190 		}
2191 	}
2192 
2193 	if (attr->ia_valid & ATTR_OPEN) {
2194 		/* This is coming from open(..., ... | O_TRUNC); */
2195 		WARN_ON(!(attr->ia_valid & ATTR_SIZE));
2196 		WARN_ON(attr->ia_size != 0);
2197 		if (fc->atomic_o_trunc) {
2198 			/*
2199 			 * No need to send request to userspace, since actual
2200 			 * truncation has already been done by OPEN.  But still
2201 			 * need to truncate page cache.
2202 			 */
2203 			i_size_write(inode, 0);
2204 			truncate_pagecache(inode, 0);
2205 			goto out;
2206 		}
2207 		file = NULL;
2208 	}
2209 
2210 	/* Flush dirty data/metadata before non-truncate SETATTR */
2211 	if (is_wb &&
2212 	    attr->ia_valid &
2213 			(ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
2214 			 ATTR_TIMES_SET)) {
2215 		err = write_inode_now(inode, true);
2216 		if (err)
2217 			return err;
2218 
2219 		fuse_set_nowrite(inode);
2220 		fuse_release_nowrite(inode);
2221 	}
2222 
2223 	if (is_truncate) {
2224 		fuse_set_nowrite(inode);
2225 		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2226 		if (trust_local_cmtime && attr->ia_size != inode->i_size)
2227 			attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2228 	}
2229 
2230 	memset(&inarg, 0, sizeof(inarg));
2231 	memset(&outarg, 0, sizeof(outarg));
2232 	iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
2233 	if (file) {
2234 		struct fuse_file *ff = file->private_data;
2235 		inarg.valid |= FATTR_FH;
2236 		inarg.fh = ff->fh;
2237 	}
2238 
2239 	/* Kill suid/sgid for non-directory chown unconditionally */
2240 	if (fc->handle_killpriv_v2 && !S_ISDIR(inode->i_mode) &&
2241 	    attr->ia_valid & (ATTR_UID | ATTR_GID))
2242 		inarg.valid |= FATTR_KILL_SUIDGID;
2243 
2244 	if (attr->ia_valid & ATTR_SIZE) {
2245 		/* For mandatory locking in truncate */
2246 		inarg.valid |= FATTR_LOCKOWNER;
2247 		inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
2248 
2249 		/* Kill suid/sgid for truncate only if no CAP_FSETID */
2250 		if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
2251 			inarg.valid |= FATTR_KILL_SUIDGID;
2252 	}
2253 	fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
2254 	err = fuse_simple_request(fm, &args);
2255 	if (err) {
2256 		if (err == -EINTR)
2257 			fuse_invalidate_attr(inode);
2258 		goto error;
2259 	}
2260 
2261 	if (fuse_invalid_attr(&outarg.attr) ||
2262 	    inode_wrong_type(inode, outarg.attr.mode)) {
2263 		fuse_make_bad(inode);
2264 		err = -EIO;
2265 		goto error;
2266 	}
2267 
2268 	spin_lock(&fi->lock);
2269 	/* the kernel maintains i_mtime locally */
2270 	if (trust_local_cmtime) {
2271 		if (attr->ia_valid & ATTR_MTIME)
2272 			inode->i_mtime = attr->ia_mtime;
2273 		if (attr->ia_valid & ATTR_CTIME)
2274 			inode_set_ctime_to_ts(inode, attr->ia_ctime);
2275 		/* FIXME: clear I_DIRTY_SYNC? */
2276 	}
2277 
2278 	fuse_change_attributes_common(inode, &outarg.attr, NULL,
2279 				      ATTR_TIMEOUT(&outarg),
2280 				      fuse_get_cache_mask(inode));
2281 	oldsize = inode->i_size;
2282 	/* see the comment in fuse_change_attributes() */
2283 	if (!is_wb || is_truncate)
2284 		i_size_write(inode, outarg.attr.size);
2285 
2286 	if (is_truncate) {
2287 		/* NOTE: this may release/reacquire fi->lock */
2288 		__fuse_release_nowrite(inode);
2289 	}
2290 	spin_unlock(&fi->lock);
2291 
2292 	/*
2293 	 * Only call invalidate_inode_pages2() after removing
2294 	 * FUSE_NOWRITE, otherwise fuse_launder_folio() would deadlock.
2295 	 */
2296 	if ((is_truncate || !is_wb) &&
2297 	    S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
2298 		truncate_pagecache(inode, outarg.attr.size);
2299 		invalidate_inode_pages2(mapping);
2300 	}
2301 
2302 	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2303 out:
2304 	if (fault_blocked)
2305 		filemap_invalidate_unlock(mapping);
2306 
2307 	return 0;
2308 
2309 error:
2310 	if (is_truncate)
2311 		fuse_release_nowrite(inode);
2312 
2313 	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2314 
2315 	if (fault_blocked)
2316 		filemap_invalidate_unlock(mapping);
2317 	return err;
2318 }
2319 
fuse_setattr(struct mnt_idmap * idmap,struct dentry * entry,struct iattr * attr)2320 static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
2321 			struct iattr *attr)
2322 {
2323 	struct inode *inode = d_inode(entry);
2324 	struct fuse_conn *fc = get_fuse_conn(inode);
2325 	struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
2326 	int ret;
2327 
2328 	if (fuse_is_bad(inode))
2329 		return -EIO;
2330 
2331 	if (!fuse_allow_current_process(get_fuse_conn(inode)))
2332 		return -EACCES;
2333 
2334 	if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
2335 		attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
2336 				    ATTR_MODE);
2337 
2338 		/*
2339 		 * The only sane way to reliably kill suid/sgid is to do it in
2340 		 * the userspace filesystem
2341 		 *
2342 		 * This should be done on write(), truncate() and chown().
2343 		 */
2344 		if (!fc->handle_killpriv && !fc->handle_killpriv_v2) {
2345 #ifdef CONFIG_FUSE_BPF
2346 			struct fuse_err_ret fer;
2347 
2348 			/*
2349 			 * ia_mode calculation may have used stale i_mode.
2350 			 * Refresh and recalculate.
2351 			 */
2352 			fer = fuse_bpf_backing(inode, struct fuse_getattr_io,
2353 					       fuse_getattr_initialize,	fuse_getattr_backing,
2354 					       fuse_getattr_finalize,
2355 					       entry, NULL, 0, 0);
2356 			if (fer.ret)
2357 				ret = PTR_ERR(fer.result);
2358 			else
2359 #endif
2360 				ret = fuse_do_getattr(inode, NULL, file);
2361 			if (ret)
2362 				return ret;
2363 
2364 			attr->ia_mode = inode->i_mode;
2365 			if (inode->i_mode & S_ISUID) {
2366 				attr->ia_valid |= ATTR_MODE;
2367 				attr->ia_mode &= ~S_ISUID;
2368 			}
2369 			if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
2370 				attr->ia_valid |= ATTR_MODE;
2371 				attr->ia_mode &= ~S_ISGID;
2372 			}
2373 		}
2374 	}
2375 	if (!attr->ia_valid)
2376 		return 0;
2377 
2378 	ret = fuse_do_setattr(entry, attr, file);
2379 	if (!ret) {
2380 		/*
2381 		 * If filesystem supports acls it may have updated acl xattrs in
2382 		 * the filesystem, so forget cached acls for the inode.
2383 		 */
2384 		if (fc->posix_acl)
2385 			forget_all_cached_acls(inode);
2386 
2387 		/* Directory mode changed, may need to revalidate access */
2388 		if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
2389 			fuse_invalidate_entry_cache(entry);
2390 	}
2391 	return ret;
2392 }
2393 
fuse_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)2394 static int fuse_getattr(struct mnt_idmap *idmap,
2395 			const struct path *path, struct kstat *stat,
2396 			u32 request_mask, unsigned int flags)
2397 {
2398 	struct inode *inode = d_inode(path->dentry);
2399 	struct fuse_conn *fc = get_fuse_conn(inode);
2400 
2401 	if (fuse_is_bad(inode))
2402 		return -EIO;
2403 
2404 	if (!fuse_allow_current_process(fc)) {
2405 		if (!request_mask) {
2406 			/*
2407 			 * If user explicitly requested *nothing* then don't
2408 			 * error out, but return st_dev only.
2409 			 */
2410 			stat->result_mask = 0;
2411 			stat->dev = inode->i_sb->s_dev;
2412 			return 0;
2413 		}
2414 		return -EACCES;
2415 	}
2416 
2417 	return fuse_update_get_attr(inode, NULL, path, stat, request_mask,
2418 				    flags);
2419 }
2420 
2421 static const struct inode_operations fuse_dir_inode_operations = {
2422 	.lookup		= fuse_lookup,
2423 	.mkdir		= fuse_mkdir,
2424 	.symlink	= fuse_symlink,
2425 	.unlink		= fuse_unlink,
2426 	.rmdir		= fuse_rmdir,
2427 	.rename		= fuse_rename2,
2428 	.link		= fuse_link,
2429 	.setattr	= fuse_setattr,
2430 	.create		= fuse_create,
2431 	.atomic_open	= fuse_atomic_open,
2432 	.tmpfile	= fuse_tmpfile,
2433 	.mknod		= fuse_mknod,
2434 	.permission	= fuse_permission,
2435 	.getattr	= fuse_getattr,
2436 	.listxattr	= fuse_listxattr,
2437 	.get_inode_acl	= fuse_get_inode_acl,
2438 	.get_acl	= fuse_get_acl,
2439 	.set_acl	= fuse_set_acl,
2440 	.fileattr_get	= fuse_fileattr_get,
2441 	.fileattr_set	= fuse_fileattr_set,
2442 };
2443 
2444 static const struct file_operations fuse_dir_operations = {
2445 	.llseek		= generic_file_llseek,
2446 	.read		= generic_read_dir,
2447 	.iterate_shared	= fuse_readdir,
2448 	.open		= fuse_dir_open,
2449 	.release	= fuse_dir_release,
2450 	.fsync		= fuse_dir_fsync,
2451 	.unlocked_ioctl	= fuse_dir_ioctl,
2452 	.compat_ioctl	= fuse_dir_compat_ioctl,
2453 };
2454 
2455 static const struct inode_operations fuse_common_inode_operations = {
2456 	.setattr	= fuse_setattr,
2457 	.permission	= fuse_permission,
2458 	.getattr	= fuse_getattr,
2459 	.listxattr	= fuse_listxattr,
2460 	.get_inode_acl	= fuse_get_inode_acl,
2461 	.get_acl	= fuse_get_acl,
2462 	.set_acl	= fuse_set_acl,
2463 	.fileattr_get	= fuse_fileattr_get,
2464 	.fileattr_set	= fuse_fileattr_set,
2465 };
2466 
2467 static const struct inode_operations fuse_symlink_inode_operations = {
2468 	.setattr	= fuse_setattr,
2469 	.get_link	= fuse_get_link,
2470 	.getattr	= fuse_getattr,
2471 	.listxattr	= fuse_listxattr,
2472 };
2473 
fuse_init_common(struct inode * inode)2474 void fuse_init_common(struct inode *inode)
2475 {
2476 	inode->i_op = &fuse_common_inode_operations;
2477 }
2478 
fuse_init_dir(struct inode * inode)2479 void fuse_init_dir(struct inode *inode)
2480 {
2481 	struct fuse_inode *fi = get_fuse_inode(inode);
2482 
2483 	inode->i_op = &fuse_dir_inode_operations;
2484 	inode->i_fop = &fuse_dir_operations;
2485 
2486 	spin_lock_init(&fi->rdc.lock);
2487 	fi->rdc.cached = false;
2488 	fi->rdc.size = 0;
2489 	fi->rdc.pos = 0;
2490 	fi->rdc.version = 0;
2491 }
2492 
fuse_symlink_read_folio(struct file * null,struct folio * folio)2493 static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
2494 {
2495 	int err = fuse_readlink_page(folio->mapping->host, &folio->page);
2496 
2497 	if (!err)
2498 		folio_mark_uptodate(folio);
2499 
2500 	folio_unlock(folio);
2501 
2502 	return err;
2503 }
2504 
2505 static const struct address_space_operations fuse_symlink_aops = {
2506 	.read_folio	= fuse_symlink_read_folio,
2507 };
2508 
fuse_init_symlink(struct inode * inode)2509 void fuse_init_symlink(struct inode *inode)
2510 {
2511 	inode->i_op = &fuse_symlink_inode_operations;
2512 	inode->i_data.a_ops = &fuse_symlink_aops;
2513 	inode_nohighmem(inode);
2514 }
2515