1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/fdtable.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/filter.h>
15 #include <linux/fs_context.h>
16 #include <linux/moduleparam.h>
17 #include <linux/sched.h>
18 #include <linux/namei.h>
19 #include <linux/slab.h>
20 #include <linux/xattr.h>
21 #include <linux/iversion.h>
22 #include <linux/posix_acl.h>
23 #include <linux/security.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26
27 static bool __read_mostly allow_sys_admin_access;
28 module_param(allow_sys_admin_access, bool, 0644);
29 MODULE_PARM_DESC(allow_sys_admin_access,
30 "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check");
31
32 #include "../internal.h"
33
fuse_advise_use_readdirplus(struct inode * dir)34 static void fuse_advise_use_readdirplus(struct inode *dir)
35 {
36 struct fuse_inode *fi = get_fuse_inode(dir);
37
38 set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
39 }
40
41 #if BITS_PER_LONG >= 64 && !defined(CONFIG_FUSE_BPF)
__fuse_dentry_settime(struct dentry * entry,u64 time)42 static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
43 {
44 entry->d_fsdata = (void *) time;
45 }
46
fuse_dentry_time(const struct dentry * entry)47 static inline u64 fuse_dentry_time(const struct dentry *entry)
48 {
49 return (u64)entry->d_fsdata;
50 }
51
52 #else
53
__fuse_dentry_settime(struct dentry * dentry,u64 time)54 static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
55 {
56 ((struct fuse_dentry *) dentry->d_fsdata)->time = time;
57 }
58
fuse_dentry_time(const struct dentry * entry)59 static inline u64 fuse_dentry_time(const struct dentry *entry)
60 {
61 return ((struct fuse_dentry *) entry->d_fsdata)->time;
62 }
63 #endif
64
fuse_dentry_settime(struct dentry * dentry,u64 time)65 static void fuse_dentry_settime(struct dentry *dentry, u64 time)
66 {
67 struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
68 bool delete = !time && fc->delete_stale;
69 /*
70 * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
71 * Don't care about races, either way it's just an optimization
72 */
73 if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
74 (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
75 spin_lock(&dentry->d_lock);
76 if (!delete)
77 dentry->d_flags &= ~DCACHE_OP_DELETE;
78 else
79 dentry->d_flags |= DCACHE_OP_DELETE;
80 spin_unlock(&dentry->d_lock);
81 }
82
83 __fuse_dentry_settime(dentry, time);
84 }
85
fuse_init_dentry_root(struct dentry * root,struct file * backing_dir)86 void fuse_init_dentry_root(struct dentry *root, struct file *backing_dir)
87 {
88 #ifdef CONFIG_FUSE_BPF
89 struct fuse_dentry *fuse_dentry = root->d_fsdata;
90
91 if (backing_dir) {
92 fuse_dentry->backing_path = backing_dir->f_path;
93 path_get(&fuse_dentry->backing_path);
94 }
95 #endif
96 }
97
98 /*
99 * Set dentry and possibly attribute timeouts from the lookup/mk*
100 * replies
101 */
fuse_change_entry_timeout(struct dentry * entry,struct fuse_entry_out * o)102 void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
103 {
104 fuse_dentry_settime(entry,
105 fuse_time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
106 }
107
fuse_invalidate_attr_mask(struct inode * inode,u32 mask)108 void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
109 {
110 set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
111 }
112
113 /*
114 * Mark the attributes as stale, so that at the next call to
115 * ->getattr() they will be fetched from userspace
116 */
fuse_invalidate_attr(struct inode * inode)117 void fuse_invalidate_attr(struct inode *inode)
118 {
119 fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
120 }
121
fuse_dir_changed(struct inode * dir)122 static void fuse_dir_changed(struct inode *dir)
123 {
124 fuse_invalidate_attr(dir);
125 inode_maybe_inc_iversion(dir, false);
126 }
127
128 /*
129 * Mark the attributes as stale due to an atime change. Avoid the invalidate if
130 * atime is not used.
131 */
fuse_invalidate_atime(struct inode * inode)132 void fuse_invalidate_atime(struct inode *inode)
133 {
134 if (!IS_RDONLY(inode))
135 fuse_invalidate_attr_mask(inode, STATX_ATIME);
136 }
137
138 /*
139 * Just mark the entry as stale, so that a next attempt to look it up
140 * will result in a new lookup call to userspace
141 *
142 * This is called when a dentry is about to become negative and the
143 * timeout is unknown (unlink, rmdir, rename and in some cases
144 * lookup)
145 */
fuse_invalidate_entry_cache(struct dentry * entry)146 void fuse_invalidate_entry_cache(struct dentry *entry)
147 {
148 fuse_dentry_settime(entry, 0);
149 }
150
151 /*
152 * Same as fuse_invalidate_entry_cache(), but also try to remove the
153 * dentry from the hash
154 */
fuse_invalidate_entry(struct dentry * entry)155 static void fuse_invalidate_entry(struct dentry *entry)
156 {
157 d_invalidate(entry);
158 fuse_invalidate_entry_cache(entry);
159 }
160
fuse_lookup_init(struct fuse_conn * fc,struct fuse_args * args,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg,struct fuse_entry_bpf_out * bpf_outarg)161 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
162 u64 nodeid, const struct qstr *name,
163 struct fuse_entry_out *outarg,
164 struct fuse_entry_bpf_out *bpf_outarg)
165 {
166 memset(outarg, 0, sizeof(struct fuse_entry_out));
167 args->opcode = FUSE_LOOKUP;
168 args->nodeid = nodeid;
169 args->in_numargs = 1;
170 args->in_args[0].size = name->len + 1;
171 args->in_args[0].value = name->name;
172 args->out_argvar = true;
173 args->out_numargs = 2;
174 args->out_args[0].size = sizeof(struct fuse_entry_out);
175 args->out_args[0].value = outarg;
176 args->out_args[1].size = sizeof(struct fuse_entry_bpf_out);
177 args->out_args[1].value = bpf_outarg;
178 }
179
180 #ifdef CONFIG_FUSE_BPF
backing_data_changed(struct fuse_inode * fi,struct dentry * entry,struct fuse_entry_bpf * bpf_arg)181 static bool backing_data_changed(struct fuse_inode *fi, struct dentry *entry,
182 struct fuse_entry_bpf *bpf_arg)
183 {
184 struct path new_backing_path;
185 struct inode *new_backing_inode;
186 struct bpf_prog *bpf = NULL;
187 int err;
188 bool ret = true;
189
190 if (!entry || !fi->backing_inode) {
191 ret = false;
192 goto put_backing_file;
193 }
194
195 get_fuse_backing_path(entry, &new_backing_path);
196 new_backing_inode = fi->backing_inode;
197 ihold(new_backing_inode);
198
199 err = fuse_handle_backing(bpf_arg, &new_backing_inode, &new_backing_path);
200
201 if (err)
202 goto put_inode;
203
204 err = fuse_handle_bpf_prog(bpf_arg, entry->d_parent->d_inode, &bpf);
205 if (err)
206 goto put_bpf;
207
208 ret = (bpf != fi->bpf || fi->backing_inode != new_backing_inode ||
209 !path_equal(&get_fuse_dentry(entry)->backing_path, &new_backing_path));
210 put_bpf:
211 if (bpf)
212 bpf_prog_put(bpf);
213 put_inode:
214 iput(new_backing_inode);
215 path_put(&new_backing_path);
216 put_backing_file:
217 if (bpf_arg->backing_file)
218 fput(bpf_arg->backing_file);
219 return ret;
220 }
221 #endif
222
223 /*
224 * Check whether the dentry is still valid
225 *
226 * If the entry validity timeout has expired and the dentry is
227 * positive, try to redo the lookup. If the lookup results in a
228 * different inode, then let the VFS invalidate the dentry and redo
229 * the lookup once more. If the lookup results in the same inode,
230 * then refresh the attributes, timeouts and mark the dentry valid.
231 */
fuse_dentry_revalidate(struct dentry * entry,unsigned int flags)232 static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
233 {
234 struct inode *inode;
235 struct dentry *parent;
236 struct fuse_mount *fm;
237 struct fuse_inode *fi;
238 int ret;
239
240 inode = d_inode_rcu(entry);
241 if (inode && fuse_is_bad(inode))
242 goto invalid;
243
244 #ifdef CONFIG_FUSE_BPF
245 /* TODO: Do we need bpf support for revalidate?
246 * If the lower filesystem says the entry is invalid, FUSE probably shouldn't
247 * try to fix that without going through the normal lookup path...
248 */
249 if (get_fuse_dentry(entry)->backing_path.dentry) {
250 ret = fuse_revalidate_backing(entry, flags);
251 if (ret <= 0) {
252 goto out;
253 }
254 }
255 #endif
256 if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
257 (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
258 struct fuse_entry_out outarg;
259 struct fuse_entry_bpf bpf_arg;
260 FUSE_ARGS(args);
261 struct fuse_forget_link *forget;
262 u64 attr_version;
263
264 /* For negative dentries, always do a fresh lookup */
265 if (!inode)
266 goto invalid;
267
268 ret = -ECHILD;
269 if (flags & LOOKUP_RCU)
270 goto out;
271 fm = get_fuse_mount(inode);
272
273 parent = dget_parent(entry);
274
275 #ifdef CONFIG_FUSE_BPF
276 /* TODO: Once we're handling timeouts for backing inodes, do a
277 * bpf based lookup_revalidate here.
278 */
279 if (get_fuse_inode(parent->d_inode)->backing_inode) {
280 dput(parent);
281 ret = 1;
282 goto out;
283 }
284 #endif
285 forget = fuse_alloc_forget();
286 ret = -ENOMEM;
287 if (!forget) {
288 dput(parent);
289 goto out;
290 }
291
292 attr_version = fuse_get_attr_version(fm->fc);
293
294 fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
295 &entry->d_name, &outarg, &bpf_arg.out);
296 ret = fuse_simple_request(fm, &args);
297 dput(parent);
298
299 /* Zero nodeid is same as -ENOENT */
300 if (!ret && !outarg.nodeid)
301 ret = -ENOENT;
302 if (!ret || ret == sizeof(bpf_arg.out)) {
303 fi = get_fuse_inode(inode);
304 if (outarg.nodeid != get_node_id(inode) ||
305 #ifdef CONFIG_FUSE_BPF
306 (ret == sizeof(bpf_arg.out) &&
307 backing_data_changed(fi, entry, &bpf_arg)) ||
308 #endif
309 (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) {
310 fuse_queue_forget(fm->fc, forget,
311 outarg.nodeid, 1);
312 goto invalid;
313 }
314 spin_lock(&fi->lock);
315 fi->nlookup++;
316 spin_unlock(&fi->lock);
317 }
318 kfree(forget);
319 if (ret == -ENOMEM || ret == -EINTR)
320 goto out;
321 if (ret || fuse_invalid_attr(&outarg.attr) ||
322 fuse_stale_inode(inode, outarg.generation, &outarg.attr))
323 goto invalid;
324
325 forget_all_cached_acls(inode);
326 fuse_change_attributes(inode, &outarg.attr, NULL,
327 ATTR_TIMEOUT(&outarg),
328 attr_version);
329 fuse_change_entry_timeout(entry, &outarg);
330 } else if (inode) {
331 fi = get_fuse_inode(inode);
332 if (flags & LOOKUP_RCU) {
333 if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
334 return -ECHILD;
335 } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
336 parent = dget_parent(entry);
337 fuse_advise_use_readdirplus(d_inode(parent));
338 dput(parent);
339 }
340 }
341 ret = 1;
342 out:
343 return ret;
344
345 invalid:
346 ret = 0;
347 goto out;
348 }
349
350 #if BITS_PER_LONG < 64 || defined(CONFIG_FUSE_BPF)
fuse_dentry_init(struct dentry * dentry)351 static int fuse_dentry_init(struct dentry *dentry)
352 {
353 dentry->d_fsdata = kzalloc(sizeof(struct fuse_dentry),
354 GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
355
356 return dentry->d_fsdata ? 0 : -ENOMEM;
357 }
fuse_dentry_release(struct dentry * dentry)358 static void fuse_dentry_release(struct dentry *dentry)
359 {
360 struct fuse_dentry *fd = dentry->d_fsdata;
361
362 #ifdef CONFIG_FUSE_BPF
363 if (fd && fd->backing_path.dentry)
364 path_put(&fd->backing_path);
365
366 if (fd && fd->bpf)
367 bpf_prog_put(fd->bpf);
368 #endif
369
370 kfree_rcu(fd, rcu);
371 }
372 #endif
373
fuse_dentry_delete(const struct dentry * dentry)374 static int fuse_dentry_delete(const struct dentry *dentry)
375 {
376 return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
377 }
378
379 /*
380 * Create a fuse_mount object with a new superblock (with path->dentry
381 * as the root), and return that mount so it can be auto-mounted on
382 * @path.
383 */
fuse_dentry_automount(struct path * path)384 static struct vfsmount *fuse_dentry_automount(struct path *path)
385 {
386 struct fs_context *fsc;
387 struct vfsmount *mnt;
388 struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
389
390 fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
391 if (IS_ERR(fsc))
392 return ERR_CAST(fsc);
393
394 /* Pass the FUSE inode of the mount for fuse_get_tree_submount() */
395 fsc->fs_private = mp_fi;
396
397 /* Create the submount */
398 mnt = fc_mount(fsc);
399 if (!IS_ERR(mnt))
400 mntget(mnt);
401
402 put_fs_context(fsc);
403 return mnt;
404 }
405
406 /*
407 * Get the canonical path. Since we must translate to a path, this must be done
408 * in the context of the userspace daemon, however, the userspace daemon cannot
409 * look up paths on its own. Instead, we handle the lookup as a special case
410 * inside of the write request.
411 */
fuse_dentry_canonical_path(const struct path * path,struct path * canonical_path)412 static int fuse_dentry_canonical_path(const struct path *path,
413 struct path *canonical_path)
414 {
415 struct inode *inode = d_inode(path->dentry);
416 //struct fuse_conn *fc = get_fuse_conn(inode);
417 struct fuse_mount *fm = get_fuse_mount_super(path->mnt->mnt_sb);
418 FUSE_ARGS(args);
419 char *path_name;
420 int err;
421
422 #ifdef CONFIG_FUSE_BPF
423 struct fuse_err_ret fer;
424
425 fer = fuse_bpf_backing(inode, struct fuse_dummy_io,
426 fuse_canonical_path_initialize,
427 fuse_canonical_path_backing,
428 fuse_canonical_path_finalize, path,
429 canonical_path);
430 if (fer.ret)
431 return PTR_ERR(fer.result);
432 #endif
433
434 if (fm->fc->no_dentry_canonical_path)
435 goto out;
436
437 path_name = (char *)get_zeroed_page(GFP_KERNEL);
438 if (!path_name)
439 return -ENOMEM;
440
441 args.opcode = FUSE_CANONICAL_PATH;
442 args.nodeid = get_node_id(inode);
443 args.in_numargs = 0;
444 args.out_numargs = 1;
445 args.out_args[0].size = PATH_MAX;
446 args.out_args[0].value = path_name;
447 args.canonical_path = canonical_path;
448 args.out_argvar = 1;
449
450 err = fuse_simple_request(fm, &args);
451 free_page((unsigned long)path_name);
452 if (err > 0)
453 return 0;
454 if (err < 0 && err != -ENOSYS)
455 return err;
456
457 if (err == -ENOSYS)
458 fm->fc->no_dentry_canonical_path = 1;
459
460 out:
461 canonical_path->dentry = path->dentry;
462 canonical_path->mnt = path->mnt;
463 path_get(canonical_path);
464 return 0;
465 }
466
467 const struct dentry_operations fuse_dentry_operations = {
468 .d_revalidate = fuse_dentry_revalidate,
469 .d_delete = fuse_dentry_delete,
470 #if BITS_PER_LONG < 64 || defined(CONFIG_FUSE_BPF)
471 .d_init = fuse_dentry_init,
472 .d_release = fuse_dentry_release,
473 #endif
474 .d_automount = fuse_dentry_automount,
475 .d_canonical_path = fuse_dentry_canonical_path,
476 };
477
478 const struct dentry_operations fuse_root_dentry_operations = {
479 #if BITS_PER_LONG < 64 || defined(CONFIG_FUSE_BPF)
480 .d_init = fuse_dentry_init,
481 .d_release = fuse_dentry_release,
482 #endif
483 };
484
fuse_valid_type(int m)485 int fuse_valid_type(int m)
486 {
487 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
488 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
489 }
490
fuse_valid_size(u64 size)491 static bool fuse_valid_size(u64 size)
492 {
493 return size <= LLONG_MAX;
494 }
495
fuse_invalid_attr(struct fuse_attr * attr)496 bool fuse_invalid_attr(struct fuse_attr *attr)
497 {
498 return !fuse_valid_type(attr->mode) || !fuse_valid_size(attr->size);
499 }
500
fuse_lookup_name(struct super_block * sb,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg,struct dentry * entry,struct inode ** inode)501 int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
502 struct fuse_entry_out *outarg,
503 struct dentry *entry,
504 struct inode **inode)
505 {
506 struct fuse_mount *fm = get_fuse_mount_super(sb);
507 FUSE_ARGS(args);
508 struct fuse_entry_bpf bpf_arg = {0};
509 struct fuse_forget_link *forget;
510 u64 attr_version;
511 int err;
512
513 *inode = NULL;
514 err = -ENAMETOOLONG;
515 if (name->len > FUSE_NAME_MAX)
516 goto out;
517
518 forget = fuse_alloc_forget();
519 err = -ENOMEM;
520 if (!forget)
521 goto out;
522
523 attr_version = fuse_get_attr_version(fm->fc);
524
525 fuse_lookup_init(fm->fc, &args, nodeid, name, outarg, &bpf_arg.out);
526 err = fuse_simple_request(fm, &args);
527
528 #ifdef CONFIG_FUSE_BPF
529 if (err == sizeof(bpf_arg.out)) {
530 /* TODO Make sure this handles invalid handles */
531 struct file *backing_file;
532 struct inode *backing_inode;
533
534 err = -ENOENT;
535 if (!entry)
536 goto out_put_forget;
537
538 err = -EINVAL;
539 backing_file = bpf_arg.backing_file;
540 if (!backing_file)
541 goto out_put_forget;
542
543 if (IS_ERR(backing_file)) {
544 err = PTR_ERR(backing_file);
545 goto out_put_forget;
546 }
547
548 backing_inode = backing_file->f_inode;
549 *inode = fuse_iget_backing(sb, outarg->nodeid, backing_inode);
550 if (!*inode)
551 goto out_put_forget;
552
553 err = fuse_handle_backing(&bpf_arg,
554 &get_fuse_inode(*inode)->backing_inode,
555 &get_fuse_dentry(entry)->backing_path);
556 if (!err)
557 err = fuse_handle_bpf_prog(&bpf_arg, NULL,
558 &get_fuse_inode(*inode)->bpf);
559 if (err) {
560 iput(*inode);
561 *inode = NULL;
562 goto out_put_forget;
563 }
564 } else
565 #endif
566 {
567 /* Zero nodeid is same as -ENOENT, but with valid timeout */
568 if (err || !outarg->nodeid)
569 goto out_put_forget;
570
571 err = -EIO;
572 if (fuse_invalid_attr(&outarg->attr))
573 goto out_put_forget;
574 if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
575 pr_warn_once("root generation should be zero\n");
576 outarg->generation = 0;
577 }
578
579 *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
580 &outarg->attr, ATTR_TIMEOUT(outarg),
581 attr_version);
582 }
583
584 err = -ENOMEM;
585 if (!*inode && outarg->nodeid) {
586 fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
587 goto out;
588 }
589 err = 0;
590
591 out_put_forget:
592 kfree(forget);
593 out:
594 if (bpf_arg.backing_file)
595 fput(bpf_arg.backing_file);
596 return err;
597 }
598
fuse_lookup(struct inode * dir,struct dentry * entry,unsigned int flags)599 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
600 unsigned int flags)
601 {
602 int err;
603 struct fuse_entry_out outarg;
604 struct inode *inode;
605 struct dentry *newent;
606 bool outarg_valid = true;
607 bool locked;
608
609 #ifdef CONFIG_FUSE_BPF
610 struct fuse_err_ret fer;
611
612 fer = fuse_bpf_backing(dir, struct fuse_lookup_io,
613 fuse_lookup_initialize, fuse_lookup_backing,
614 fuse_lookup_finalize,
615 dir, entry, flags);
616 if (fer.ret)
617 return fer.result;
618 #endif
619
620 if (fuse_is_bad(dir))
621 return ERR_PTR(-EIO);
622
623 locked = fuse_lock_inode(dir);
624 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
625 &outarg, entry, &inode);
626 fuse_unlock_inode(dir, locked);
627 if (err == -ENOENT) {
628 outarg_valid = false;
629 err = 0;
630 }
631 if (err)
632 goto out_err;
633
634 err = -EIO;
635 if (inode && get_node_id(inode) == FUSE_ROOT_ID)
636 goto out_iput;
637
638 newent = d_splice_alias(inode, entry);
639 err = PTR_ERR(newent);
640 if (IS_ERR(newent))
641 goto out_err;
642
643 entry = newent ? newent : entry;
644 if (outarg_valid)
645 fuse_change_entry_timeout(entry, &outarg);
646 else
647 fuse_invalidate_entry_cache(entry);
648
649 if (inode)
650 fuse_advise_use_readdirplus(dir);
651 return newent;
652
653 out_iput:
654 iput(inode);
655 out_err:
656 return ERR_PTR(err);
657 }
658
get_security_context(struct dentry * entry,umode_t mode,struct fuse_in_arg * ext)659 static int get_security_context(struct dentry *entry, umode_t mode,
660 struct fuse_in_arg *ext)
661 {
662 struct fuse_secctx *fctx;
663 struct fuse_secctx_header *header;
664 void *ctx = NULL, *ptr;
665 u32 ctxlen, total_len = sizeof(*header);
666 int err, nr_ctx = 0;
667 const char *name;
668 size_t namelen;
669
670 err = security_dentry_init_security(entry, mode, &entry->d_name,
671 &name, &ctx, &ctxlen);
672 if (err) {
673 if (err != -EOPNOTSUPP)
674 goto out_err;
675 /* No LSM is supporting this security hook. Ignore error */
676 ctxlen = 0;
677 ctx = NULL;
678 }
679
680 if (ctxlen) {
681 nr_ctx = 1;
682 namelen = strlen(name) + 1;
683 err = -EIO;
684 if (WARN_ON(namelen > XATTR_NAME_MAX + 1 || ctxlen > S32_MAX))
685 goto out_err;
686 total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen + ctxlen);
687 }
688
689 err = -ENOMEM;
690 header = ptr = kzalloc(total_len, GFP_KERNEL);
691 if (!ptr)
692 goto out_err;
693
694 header->nr_secctx = nr_ctx;
695 header->size = total_len;
696 ptr += sizeof(*header);
697 if (nr_ctx) {
698 fctx = ptr;
699 fctx->size = ctxlen;
700 ptr += sizeof(*fctx);
701
702 strcpy(ptr, name);
703 ptr += namelen;
704
705 memcpy(ptr, ctx, ctxlen);
706 }
707 ext->size = total_len;
708 ext->value = header;
709 err = 0;
710 out_err:
711 kfree(ctx);
712 return err;
713 }
714
extend_arg(struct fuse_in_arg * buf,u32 bytes)715 static void *extend_arg(struct fuse_in_arg *buf, u32 bytes)
716 {
717 void *p;
718 u32 newlen = buf->size + bytes;
719
720 p = krealloc(buf->value, newlen, GFP_KERNEL);
721 if (!p) {
722 kfree(buf->value);
723 buf->size = 0;
724 buf->value = NULL;
725 return NULL;
726 }
727
728 memset(p + buf->size, 0, bytes);
729 buf->value = p;
730 buf->size = newlen;
731
732 return p + newlen - bytes;
733 }
734
fuse_ext_size(size_t size)735 static u32 fuse_ext_size(size_t size)
736 {
737 return FUSE_REC_ALIGN(sizeof(struct fuse_ext_header) + size);
738 }
739
740 /*
741 * This adds just a single supplementary group that matches the parent's group.
742 */
get_create_supp_group(struct mnt_idmap * idmap,struct inode * dir,struct fuse_in_arg * ext)743 static int get_create_supp_group(struct mnt_idmap *idmap,
744 struct inode *dir,
745 struct fuse_in_arg *ext)
746 {
747 struct fuse_conn *fc = get_fuse_conn(dir);
748 struct fuse_ext_header *xh;
749 struct fuse_supp_groups *sg;
750 kgid_t kgid = dir->i_gid;
751 vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid);
752 gid_t parent_gid = from_kgid(fc->user_ns, kgid);
753
754 u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
755
756 if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) ||
757 !vfsgid_in_group_p(vfsgid))
758 return 0;
759
760 xh = extend_arg(ext, sg_len);
761 if (!xh)
762 return -ENOMEM;
763
764 xh->size = sg_len;
765 xh->type = FUSE_EXT_GROUPS;
766
767 sg = (struct fuse_supp_groups *) &xh[1];
768 sg->nr_groups = 1;
769 sg->groups[0] = parent_gid;
770
771 return 0;
772 }
773
get_create_ext(struct mnt_idmap * idmap,struct fuse_args * args,struct inode * dir,struct dentry * dentry,umode_t mode)774 static int get_create_ext(struct mnt_idmap *idmap,
775 struct fuse_args *args,
776 struct inode *dir, struct dentry *dentry,
777 umode_t mode)
778 {
779 struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
780 struct fuse_in_arg ext = { .size = 0, .value = NULL };
781 int err = 0;
782
783 if (fc->init_security)
784 err = get_security_context(dentry, mode, &ext);
785 if (!err && fc->create_supp_group)
786 err = get_create_supp_group(idmap, dir, &ext);
787
788 if (!err && ext.size) {
789 WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
790 args->is_ext = true;
791 args->ext_idx = args->in_numargs++;
792 args->in_args[args->ext_idx] = ext;
793 } else {
794 kfree(ext.value);
795 }
796
797 return err;
798 }
799
free_ext_value(struct fuse_args * args)800 static void free_ext_value(struct fuse_args *args)
801 {
802 if (args->is_ext)
803 kfree(args->in_args[args->ext_idx].value);
804 }
805
806 /*
807 * Atomic create+open operation
808 *
809 * If the filesystem doesn't support this, then fall back to separate
810 * 'mknod' + 'open' requests.
811 */
fuse_create_open(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,struct file * file,unsigned int flags,umode_t mode,u32 opcode)812 static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
813 struct dentry *entry, struct file *file,
814 unsigned int flags, umode_t mode, u32 opcode)
815 {
816 int err;
817 struct inode *inode;
818 struct fuse_mount *fm = get_fuse_mount(dir);
819 FUSE_ARGS(args);
820 struct fuse_forget_link *forget;
821 struct fuse_create_in inarg;
822 struct fuse_open_out *outopenp;
823 struct fuse_entry_out outentry;
824 struct fuse_inode *fi;
825 struct fuse_file *ff;
826 bool trunc = flags & O_TRUNC;
827
828 /* Userspace expects S_IFREG in create mode */
829 BUG_ON((mode & S_IFMT) != S_IFREG);
830
831 #ifdef CONFIG_FUSE_BPF
832 {
833 struct fuse_err_ret fer;
834
835 fer = fuse_bpf_backing(dir, struct fuse_create_open_io,
836 fuse_create_open_initialize,
837 fuse_create_open_backing,
838 fuse_create_open_finalize,
839 dir, entry, file, flags, mode);
840 if (fer.ret)
841 return PTR_ERR(fer.result);
842 }
843 #endif
844
845 forget = fuse_alloc_forget();
846 err = -ENOMEM;
847 if (!forget)
848 goto out_err;
849
850 err = -ENOMEM;
851 ff = fuse_file_alloc(fm, true);
852 if (!ff)
853 goto out_put_forget_req;
854
855 if (!fm->fc->dont_mask)
856 mode &= ~current_umask();
857
858 flags &= ~O_NOCTTY;
859 memset(&inarg, 0, sizeof(inarg));
860 memset(&outentry, 0, sizeof(outentry));
861 inarg.flags = flags;
862 inarg.mode = mode;
863 inarg.umask = current_umask();
864
865 if (fm->fc->handle_killpriv_v2 && trunc &&
866 !(flags & O_EXCL) && !capable(CAP_FSETID)) {
867 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
868 }
869
870 args.opcode = opcode;
871 args.nodeid = get_node_id(dir);
872 args.in_numargs = 2;
873 args.in_args[0].size = sizeof(inarg);
874 args.in_args[0].value = &inarg;
875 args.in_args[1].size = entry->d_name.len + 1;
876 args.in_args[1].value = entry->d_name.name;
877 args.out_numargs = 2;
878 args.out_args[0].size = sizeof(outentry);
879 args.out_args[0].value = &outentry;
880 /* Store outarg for fuse_finish_open() */
881 outopenp = &ff->args->open_outarg;
882 args.out_args[1].size = sizeof(*outopenp);
883 args.out_args[1].value = outopenp;
884
885 err = get_create_ext(idmap, &args, dir, entry, mode);
886 if (err)
887 goto out_free_ff;
888
889 err = fuse_simple_idmap_request(idmap, fm, &args);
890 free_ext_value(&args);
891 if (err)
892 goto out_free_ff;
893
894 err = -EIO;
895 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
896 fuse_invalid_attr(&outentry.attr))
897 goto out_free_ff;
898
899 ff->fh = outopenp->fh;
900 ff->nodeid = outentry.nodeid;
901 ff->open_flags = outopenp->open_flags;
902 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
903 &outentry.attr, ATTR_TIMEOUT(&outentry), 0);
904 if (!inode) {
905 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
906 fuse_sync_release(NULL, ff, flags);
907 fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1);
908 err = -ENOMEM;
909 goto out_err;
910 }
911 kfree(forget);
912 d_instantiate(entry, inode);
913 fuse_change_entry_timeout(entry, &outentry);
914 fuse_dir_changed(dir);
915 err = generic_file_open(inode, file);
916 if (!err) {
917 file->private_data = ff;
918 err = finish_open(file, entry, fuse_finish_open);
919 }
920 if (err) {
921 fi = get_fuse_inode(inode);
922 fuse_sync_release(fi, ff, flags);
923 } else {
924 if (fm->fc->atomic_o_trunc && trunc)
925 truncate_pagecache(inode, 0);
926 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
927 invalidate_inode_pages2(inode->i_mapping);
928 }
929 return err;
930
931 out_free_ff:
932 fuse_file_free(ff);
933 out_put_forget_req:
934 kfree(forget);
935 out_err:
936 return err;
937 }
938
939 static int fuse_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
940 umode_t, dev_t);
fuse_atomic_open(struct inode * dir,struct dentry * entry,struct file * file,unsigned flags,umode_t mode)941 static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
942 struct file *file, unsigned flags,
943 umode_t mode)
944 {
945 int err;
946 struct mnt_idmap *idmap = file_mnt_idmap(file);
947 struct fuse_conn *fc = get_fuse_conn(dir);
948 struct dentry *res = NULL;
949
950 if (fuse_is_bad(dir))
951 return -EIO;
952
953 if (d_in_lookup(entry)) {
954 res = fuse_lookup(dir, entry, 0);
955 if (IS_ERR(res))
956 return PTR_ERR(res);
957
958 if (res)
959 entry = res;
960 }
961
962 if (!(flags & O_CREAT) || d_really_is_positive(entry))
963 goto no_open;
964
965 /* Only creates */
966 file->f_mode |= FMODE_CREATED;
967
968 if (fc->no_create)
969 goto mknod;
970
971 err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE);
972 if (err == -ENOSYS) {
973 fc->no_create = 1;
974 goto mknod;
975 } else if (err == -EEXIST)
976 fuse_invalidate_entry(entry);
977 out_dput:
978 dput(res);
979 return err;
980
981 mknod:
982 err = fuse_mknod(idmap, dir, entry, mode, 0);
983 if (err)
984 goto out_dput;
985 no_open:
986 return finish_no_open(file, res);
987 }
988
989 /*
990 * Code shared between mknod, mkdir, symlink and link
991 */
create_new_entry(struct mnt_idmap * idmap,struct fuse_mount * fm,struct fuse_args * args,struct inode * dir,struct dentry * entry,umode_t mode)992 static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
993 struct fuse_args *args, struct inode *dir,
994 struct dentry *entry, umode_t mode)
995 {
996 struct fuse_entry_out outarg;
997 struct inode *inode;
998 struct dentry *d;
999 int err;
1000 struct fuse_forget_link *forget;
1001
1002 if (fuse_is_bad(dir))
1003 return -EIO;
1004
1005 forget = fuse_alloc_forget();
1006 if (!forget)
1007 return -ENOMEM;
1008
1009 memset(&outarg, 0, sizeof(outarg));
1010 args->nodeid = get_node_id(dir);
1011 args->out_numargs = 1;
1012 args->out_args[0].size = sizeof(outarg);
1013 args->out_args[0].value = &outarg;
1014
1015 if (args->opcode != FUSE_LINK) {
1016 err = get_create_ext(idmap, args, dir, entry, mode);
1017 if (err)
1018 goto out_put_forget_req;
1019 }
1020
1021 err = fuse_simple_idmap_request(idmap, fm, args);
1022 free_ext_value(args);
1023 if (err)
1024 goto out_put_forget_req;
1025
1026 err = -EIO;
1027 if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
1028 goto out_put_forget_req;
1029
1030 if ((outarg.attr.mode ^ mode) & S_IFMT)
1031 goto out_put_forget_req;
1032
1033 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
1034 &outarg.attr, ATTR_TIMEOUT(&outarg), 0);
1035 if (!inode) {
1036 fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
1037 return -ENOMEM;
1038 }
1039 kfree(forget);
1040
1041 d_drop(entry);
1042 d = d_splice_alias(inode, entry);
1043 if (IS_ERR(d))
1044 return PTR_ERR(d);
1045
1046 if (d) {
1047 fuse_change_entry_timeout(d, &outarg);
1048 dput(d);
1049 } else {
1050 fuse_change_entry_timeout(entry, &outarg);
1051 }
1052 fuse_dir_changed(dir);
1053 return 0;
1054
1055 out_put_forget_req:
1056 if (err == -EEXIST)
1057 fuse_invalidate_entry(entry);
1058 kfree(forget);
1059 return err;
1060 }
1061
fuse_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode,dev_t rdev)1062 static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
1063 struct dentry *entry, umode_t mode, dev_t rdev)
1064 {
1065 struct fuse_mknod_in inarg;
1066 struct fuse_mount *fm = get_fuse_mount(dir);
1067 FUSE_ARGS(args);
1068
1069 #ifdef CONFIG_FUSE_BPF
1070 struct fuse_err_ret fer;
1071
1072 fer = fuse_bpf_backing(dir, struct fuse_mknod_in,
1073 fuse_mknod_initialize, fuse_mknod_backing,
1074 fuse_mknod_finalize,
1075 dir, entry, mode, rdev);
1076 if (fer.ret)
1077 return PTR_ERR(fer.result);
1078 #endif
1079
1080 if (!fm->fc->dont_mask)
1081 mode &= ~current_umask();
1082
1083 memset(&inarg, 0, sizeof(inarg));
1084 inarg.mode = mode;
1085 inarg.rdev = new_encode_dev(rdev);
1086 inarg.umask = current_umask();
1087 args.opcode = FUSE_MKNOD;
1088 args.in_numargs = 2;
1089 args.in_args[0].size = sizeof(inarg);
1090 args.in_args[0].value = &inarg;
1091 args.in_args[1].size = entry->d_name.len + 1;
1092 args.in_args[1].value = entry->d_name.name;
1093 return create_new_entry(idmap, fm, &args, dir, entry, mode);
1094 }
1095
fuse_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode,bool excl)1096 static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
1097 struct dentry *entry, umode_t mode, bool excl)
1098 {
1099 return fuse_mknod(idmap, dir, entry, mode, 0);
1100 }
1101
fuse_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)1102 static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
1103 struct file *file, umode_t mode)
1104 {
1105 struct fuse_conn *fc = get_fuse_conn(dir);
1106 int err;
1107
1108 if (fc->no_tmpfile)
1109 return -EOPNOTSUPP;
1110
1111 err = fuse_create_open(idmap, dir, file->f_path.dentry, file,
1112 file->f_flags, mode, FUSE_TMPFILE);
1113 if (err == -ENOSYS) {
1114 fc->no_tmpfile = 1;
1115 err = -EOPNOTSUPP;
1116 }
1117 return err;
1118 }
1119
fuse_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode)1120 static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
1121 struct dentry *entry, umode_t mode)
1122 {
1123 struct fuse_mkdir_in inarg;
1124 struct fuse_mount *fm = get_fuse_mount(dir);
1125 FUSE_ARGS(args);
1126
1127 #ifdef CONFIG_FUSE_BPF
1128 struct fuse_err_ret fer;
1129
1130 fer = fuse_bpf_backing(dir, struct fuse_mkdir_in,
1131 fuse_mkdir_initialize, fuse_mkdir_backing,
1132 fuse_mkdir_finalize,
1133 dir, entry, mode);
1134 if (fer.ret)
1135 return PTR_ERR(fer.result);
1136 #endif
1137
1138 if (!fm->fc->dont_mask)
1139 mode &= ~current_umask();
1140
1141 memset(&inarg, 0, sizeof(inarg));
1142 inarg.mode = mode;
1143 inarg.umask = current_umask();
1144 args.opcode = FUSE_MKDIR;
1145 args.in_numargs = 2;
1146 args.in_args[0].size = sizeof(inarg);
1147 args.in_args[0].value = &inarg;
1148 args.in_args[1].size = entry->d_name.len + 1;
1149 args.in_args[1].value = entry->d_name.name;
1150 return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR);
1151 }
1152
fuse_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,const char * link)1153 static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
1154 struct dentry *entry, const char *link)
1155 {
1156 struct fuse_mount *fm = get_fuse_mount(dir);
1157 unsigned len = strlen(link) + 1;
1158 FUSE_ARGS(args);
1159
1160 #ifdef CONFIG_FUSE_BPF
1161 struct fuse_err_ret fer;
1162
1163 fer = fuse_bpf_backing(dir, struct fuse_dummy_io,
1164 fuse_symlink_initialize, fuse_symlink_backing,
1165 fuse_symlink_finalize,
1166 dir, entry, link, len);
1167 if (fer.ret)
1168 return PTR_ERR(fer.result);
1169 #endif
1170
1171 args.opcode = FUSE_SYMLINK;
1172 args.in_numargs = 2;
1173 args.in_args[0].size = entry->d_name.len + 1;
1174 args.in_args[0].value = entry->d_name.name;
1175 args.in_args[1].size = len;
1176 args.in_args[1].value = link;
1177 return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
1178 }
1179
fuse_flush_time_update(struct inode * inode)1180 void fuse_flush_time_update(struct inode *inode)
1181 {
1182 int err = sync_inode_metadata(inode, 1);
1183
1184 mapping_set_error(inode->i_mapping, err);
1185 }
1186
fuse_update_ctime_in_cache(struct inode * inode)1187 static void fuse_update_ctime_in_cache(struct inode *inode)
1188 {
1189 if (!IS_NOCMTIME(inode)) {
1190 inode_set_ctime_current(inode);
1191 mark_inode_dirty_sync(inode);
1192 fuse_flush_time_update(inode);
1193 }
1194 }
1195
fuse_update_ctime(struct inode * inode)1196 void fuse_update_ctime(struct inode *inode)
1197 {
1198 fuse_invalidate_attr_mask(inode, STATX_CTIME);
1199 fuse_update_ctime_in_cache(inode);
1200 }
1201
fuse_entry_unlinked(struct dentry * entry)1202 static void fuse_entry_unlinked(struct dentry *entry)
1203 {
1204 struct inode *inode = d_inode(entry);
1205 struct fuse_conn *fc = get_fuse_conn(inode);
1206 struct fuse_inode *fi = get_fuse_inode(inode);
1207
1208 spin_lock(&fi->lock);
1209 fi->attr_version = atomic64_inc_return(&fc->attr_version);
1210 /*
1211 * If i_nlink == 0 then unlink doesn't make sense, yet this can
1212 * happen if userspace filesystem is careless. It would be
1213 * difficult to enforce correct nlink usage so just ignore this
1214 * condition here
1215 */
1216 if (S_ISDIR(inode->i_mode))
1217 clear_nlink(inode);
1218 else if (inode->i_nlink > 0)
1219 drop_nlink(inode);
1220 spin_unlock(&fi->lock);
1221 fuse_invalidate_entry_cache(entry);
1222 fuse_update_ctime(inode);
1223 }
1224
fuse_unlink(struct inode * dir,struct dentry * entry)1225 static int fuse_unlink(struct inode *dir, struct dentry *entry)
1226 {
1227 int err;
1228 struct fuse_mount *fm = get_fuse_mount(dir);
1229 FUSE_ARGS(args);
1230
1231 if (fuse_is_bad(dir))
1232 return -EIO;
1233
1234 #ifdef CONFIG_FUSE_BPF
1235 {
1236 struct fuse_err_ret fer;
1237
1238 fer = fuse_bpf_backing(dir, struct fuse_dummy_io,
1239 fuse_unlink_initialize,
1240 fuse_unlink_backing,
1241 fuse_unlink_finalize,
1242 dir, entry);
1243 if (fer.ret)
1244 return PTR_ERR(fer.result);
1245 }
1246 #endif
1247
1248 args.opcode = FUSE_UNLINK;
1249 args.nodeid = get_node_id(dir);
1250 args.in_numargs = 1;
1251 args.in_args[0].size = entry->d_name.len + 1;
1252 args.in_args[0].value = entry->d_name.name;
1253 err = fuse_simple_request(fm, &args);
1254 if (!err) {
1255 fuse_dir_changed(dir);
1256 fuse_entry_unlinked(entry);
1257 } else if (err == -EINTR || err == -ENOENT)
1258 fuse_invalidate_entry(entry);
1259 return err;
1260 }
1261
fuse_rmdir(struct inode * dir,struct dentry * entry)1262 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
1263 {
1264 int err;
1265 struct fuse_mount *fm = get_fuse_mount(dir);
1266 FUSE_ARGS(args);
1267
1268 if (fuse_is_bad(dir))
1269 return -EIO;
1270
1271 #ifdef CONFIG_FUSE_BPF
1272 {
1273 struct fuse_err_ret fer;
1274
1275 fer = fuse_bpf_backing(dir, struct fuse_dummy_io,
1276 fuse_rmdir_initialize,
1277 fuse_rmdir_backing,
1278 fuse_rmdir_finalize,
1279 dir, entry);
1280 if (fer.ret)
1281 return PTR_ERR(fer.result);
1282 }
1283 #endif
1284
1285 args.opcode = FUSE_RMDIR;
1286 args.nodeid = get_node_id(dir);
1287 args.in_numargs = 1;
1288 args.in_args[0].size = entry->d_name.len + 1;
1289 args.in_args[0].value = entry->d_name.name;
1290 err = fuse_simple_request(fm, &args);
1291 if (!err) {
1292 fuse_dir_changed(dir);
1293 fuse_entry_unlinked(entry);
1294 } else if (err == -EINTR || err == -ENOENT)
1295 fuse_invalidate_entry(entry);
1296 return err;
1297 }
1298
fuse_rename_common(struct mnt_idmap * idmap,struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags,int opcode,size_t argsize)1299 static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent,
1300 struct inode *newdir, struct dentry *newent,
1301 unsigned int flags, int opcode, size_t argsize)
1302 {
1303 int err;
1304 struct fuse_rename2_in inarg;
1305 struct fuse_mount *fm = get_fuse_mount(olddir);
1306 FUSE_ARGS(args);
1307
1308 memset(&inarg, 0, argsize);
1309 inarg.newdir = get_node_id(newdir);
1310 inarg.flags = flags;
1311 args.opcode = opcode;
1312 args.nodeid = get_node_id(olddir);
1313 args.in_numargs = 3;
1314 args.in_args[0].size = argsize;
1315 args.in_args[0].value = &inarg;
1316 args.in_args[1].size = oldent->d_name.len + 1;
1317 args.in_args[1].value = oldent->d_name.name;
1318 args.in_args[2].size = newent->d_name.len + 1;
1319 args.in_args[2].value = newent->d_name.name;
1320 err = fuse_simple_idmap_request(idmap, fm, &args);
1321 if (!err) {
1322 /* ctime changes */
1323 fuse_update_ctime(d_inode(oldent));
1324
1325 if (flags & RENAME_EXCHANGE)
1326 fuse_update_ctime(d_inode(newent));
1327
1328 fuse_dir_changed(olddir);
1329 if (olddir != newdir)
1330 fuse_dir_changed(newdir);
1331
1332 /* newent will end up negative */
1333 if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent))
1334 fuse_entry_unlinked(newent);
1335 } else if (err == -EINTR || err == -ENOENT) {
1336 /* If request was interrupted, DEITY only knows if the
1337 rename actually took place. If the invalidation
1338 fails (e.g. some process has CWD under the renamed
1339 directory), then there can be inconsistency between
1340 the dcache and the real filesystem. Tough luck. */
1341 fuse_invalidate_entry(oldent);
1342 if (d_really_is_positive(newent))
1343 fuse_invalidate_entry(newent);
1344 }
1345
1346 return err;
1347 }
1348
fuse_rename2(struct mnt_idmap * idmap,struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags)1349 static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
1350 struct dentry *oldent, struct inode *newdir,
1351 struct dentry *newent, unsigned int flags)
1352 {
1353 struct fuse_conn *fc = get_fuse_conn(olddir);
1354 int err;
1355
1356 if (fuse_is_bad(olddir))
1357 return -EIO;
1358
1359 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
1360 return -EINVAL;
1361
1362 if (flags) {
1363 #ifdef CONFIG_FUSE_BPF
1364 struct fuse_err_ret fer;
1365
1366 fer = fuse_bpf_backing(olddir, struct fuse_rename2_in,
1367 fuse_rename2_initialize, fuse_rename2_backing,
1368 fuse_rename2_finalize,
1369 olddir, oldent, newdir, newent, flags);
1370 if (fer.ret)
1371 return PTR_ERR(fer.result);
1372 #endif
1373
1374 /* TODO: how should this go with bpfs involved? */
1375 if (fc->no_rename2 || fc->minor < 23)
1376 return -EINVAL;
1377
1378 err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap,
1379 olddir, oldent, newdir, newent, flags,
1380 FUSE_RENAME2,
1381 sizeof(struct fuse_rename2_in));
1382 if (err == -ENOSYS) {
1383 fc->no_rename2 = 1;
1384 err = -EINVAL;
1385 }
1386 } else {
1387 #ifdef CONFIG_FUSE_BPF
1388 struct fuse_err_ret fer;
1389
1390 fer = fuse_bpf_backing(olddir, struct fuse_rename_in,
1391 fuse_rename_initialize, fuse_rename_backing,
1392 fuse_rename_finalize,
1393 olddir, oldent, newdir, newent);
1394 if (fer.ret)
1395 return PTR_ERR(fer.result);
1396 #endif
1397
1398 err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0,
1399 FUSE_RENAME,
1400 sizeof(struct fuse_rename_in));
1401 }
1402
1403 return err;
1404 }
1405
fuse_link(struct dentry * entry,struct inode * newdir,struct dentry * newent)1406 static int fuse_link(struct dentry *entry, struct inode *newdir,
1407 struct dentry *newent)
1408 {
1409 int err;
1410 struct fuse_link_in inarg;
1411 struct inode *inode = d_inode(entry);
1412 struct fuse_mount *fm = get_fuse_mount(inode);
1413 FUSE_ARGS(args);
1414
1415 #ifdef CONFIG_FUSE_BPF
1416 struct fuse_err_ret fer;
1417
1418 fer = fuse_bpf_backing(inode, struct fuse_link_in, fuse_link_initialize,
1419 fuse_link_backing, fuse_link_finalize, entry,
1420 newdir, newent);
1421 if (fer.ret)
1422 return PTR_ERR(fer.result);
1423 #endif
1424
1425 memset(&inarg, 0, sizeof(inarg));
1426 inarg.oldnodeid = get_node_id(inode);
1427 args.opcode = FUSE_LINK;
1428 args.in_numargs = 2;
1429 args.in_args[0].size = sizeof(inarg);
1430 args.in_args[0].value = &inarg;
1431 args.in_args[1].size = newent->d_name.len + 1;
1432 args.in_args[1].value = newent->d_name.name;
1433 err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
1434 if (!err)
1435 fuse_update_ctime_in_cache(inode);
1436 else if (err == -EINTR)
1437 fuse_invalidate_attr(inode);
1438
1439 if (err == -ENOSYS)
1440 err = -EPERM;
1441 return err;
1442 }
1443
fuse_fillattr(struct mnt_idmap * idmap,struct inode * inode,struct fuse_attr * attr,struct kstat * stat)1444 void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
1445 struct fuse_attr *attr, struct kstat *stat)
1446 {
1447 unsigned int blkbits;
1448 struct fuse_conn *fc = get_fuse_conn(inode);
1449 vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns,
1450 make_kuid(fc->user_ns, attr->uid));
1451 vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns,
1452 make_kgid(fc->user_ns, attr->gid));
1453
1454 stat->dev = inode->i_sb->s_dev;
1455 stat->ino = attr->ino;
1456 stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
1457 stat->nlink = attr->nlink;
1458 stat->uid = vfsuid_into_kuid(vfsuid);
1459 stat->gid = vfsgid_into_kgid(vfsgid);
1460 stat->rdev = inode->i_rdev;
1461 stat->atime.tv_sec = attr->atime;
1462 stat->atime.tv_nsec = attr->atimensec;
1463 stat->mtime.tv_sec = attr->mtime;
1464 stat->mtime.tv_nsec = attr->mtimensec;
1465 stat->ctime.tv_sec = attr->ctime;
1466 stat->ctime.tv_nsec = attr->ctimensec;
1467 stat->size = attr->size;
1468 stat->blocks = attr->blocks;
1469
1470 if (attr->blksize != 0)
1471 blkbits = ilog2(attr->blksize);
1472 else
1473 blkbits = inode->i_sb->s_blocksize_bits;
1474
1475 stat->blksize = 1 << blkbits;
1476 }
1477
fuse_statx_to_attr(struct fuse_statx * sx,struct fuse_attr * attr)1478 static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
1479 {
1480 memset(attr, 0, sizeof(*attr));
1481 attr->ino = sx->ino;
1482 attr->size = sx->size;
1483 attr->blocks = sx->blocks;
1484 attr->atime = sx->atime.tv_sec;
1485 attr->mtime = sx->mtime.tv_sec;
1486 attr->ctime = sx->ctime.tv_sec;
1487 attr->atimensec = sx->atime.tv_nsec;
1488 attr->mtimensec = sx->mtime.tv_nsec;
1489 attr->ctimensec = sx->ctime.tv_nsec;
1490 attr->mode = sx->mode;
1491 attr->nlink = sx->nlink;
1492 attr->uid = sx->uid;
1493 attr->gid = sx->gid;
1494 attr->rdev = new_encode_dev(MKDEV(sx->rdev_major, sx->rdev_minor));
1495 attr->blksize = sx->blksize;
1496 }
1497
fuse_do_statx(struct mnt_idmap * idmap,struct inode * inode,struct file * file,struct kstat * stat)1498 static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode,
1499 struct file *file, struct kstat *stat)
1500 {
1501 int err;
1502 struct fuse_attr attr;
1503 struct fuse_statx *sx;
1504 struct fuse_statx_in inarg;
1505 struct fuse_statx_out outarg;
1506 struct fuse_mount *fm = get_fuse_mount(inode);
1507 u64 attr_version = fuse_get_attr_version(fm->fc);
1508 FUSE_ARGS(args);
1509
1510 memset(&inarg, 0, sizeof(inarg));
1511 memset(&outarg, 0, sizeof(outarg));
1512 /* Directories have separate file-handle space */
1513 if (file && S_ISREG(inode->i_mode)) {
1514 struct fuse_file *ff = file->private_data;
1515
1516 inarg.getattr_flags |= FUSE_GETATTR_FH;
1517 inarg.fh = ff->fh;
1518 }
1519 /* For now leave sync hints as the default, request all stats. */
1520 inarg.sx_flags = 0;
1521 inarg.sx_mask = STATX_BASIC_STATS | STATX_BTIME;
1522 args.opcode = FUSE_STATX;
1523 args.nodeid = get_node_id(inode);
1524 args.in_numargs = 1;
1525 args.in_args[0].size = sizeof(inarg);
1526 args.in_args[0].value = &inarg;
1527 args.out_numargs = 1;
1528 args.out_args[0].size = sizeof(outarg);
1529 args.out_args[0].value = &outarg;
1530 err = fuse_simple_request(fm, &args);
1531 if (err)
1532 return err;
1533
1534 sx = &outarg.stat;
1535 if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
1536 ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
1537 inode_wrong_type(inode, sx->mode)))) {
1538 fuse_make_bad(inode);
1539 return -EIO;
1540 }
1541
1542 fuse_statx_to_attr(&outarg.stat, &attr);
1543 if ((sx->mask & STATX_BASIC_STATS) == STATX_BASIC_STATS) {
1544 fuse_change_attributes(inode, &attr, &outarg.stat,
1545 ATTR_TIMEOUT(&outarg), attr_version);
1546 }
1547
1548 if (stat) {
1549 stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
1550 stat->btime.tv_sec = sx->btime.tv_sec;
1551 stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
1552 fuse_fillattr(idmap, inode, &attr, stat);
1553 stat->result_mask |= STATX_TYPE;
1554 }
1555
1556 return 0;
1557 }
1558
fuse_do_getattr(struct mnt_idmap * idmap,struct inode * inode,struct kstat * stat,struct file * file)1559 static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode,
1560 struct kstat *stat, struct file *file)
1561 {
1562 int err;
1563 struct fuse_getattr_in inarg;
1564 struct fuse_attr_out outarg;
1565 struct fuse_mount *fm = get_fuse_mount(inode);
1566 FUSE_ARGS(args);
1567 u64 attr_version;
1568
1569 attr_version = fuse_get_attr_version(fm->fc);
1570
1571 memset(&inarg, 0, sizeof(inarg));
1572 memset(&outarg, 0, sizeof(outarg));
1573 /* Directories have separate file-handle space */
1574 if (file && S_ISREG(inode->i_mode)) {
1575 struct fuse_file *ff = file->private_data;
1576
1577 inarg.getattr_flags |= FUSE_GETATTR_FH;
1578 inarg.fh = ff->fh;
1579 }
1580 args.opcode = FUSE_GETATTR;
1581 args.nodeid = get_node_id(inode);
1582 args.in_numargs = 1;
1583 args.in_args[0].size = sizeof(inarg);
1584 args.in_args[0].value = &inarg;
1585 args.out_numargs = 1;
1586 args.out_args[0].size = sizeof(outarg);
1587 args.out_args[0].value = &outarg;
1588 err = fuse_simple_request(fm, &args);
1589 if (!err)
1590 err = finalize_attr(inode, &outarg, attr_version, stat);
1591 return err;
1592 }
1593
fuse_update_get_attr(struct mnt_idmap * idmap,struct inode * inode,struct file * file,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)1594 static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode,
1595 struct file *file, const struct path *path, struct kstat *stat,
1596 u32 request_mask, unsigned int flags)
1597 {
1598 struct fuse_inode *fi = get_fuse_inode(inode);
1599 struct fuse_conn *fc = get_fuse_conn(inode);
1600 int err = 0;
1601 bool sync;
1602 u32 inval_mask = READ_ONCE(fi->inval_mask);
1603 u32 cache_mask = fuse_get_cache_mask(inode);
1604
1605 #ifdef CONFIG_FUSE_BPF
1606 struct fuse_err_ret fer;
1607
1608 fer = fuse_bpf_backing(inode, struct fuse_getattr_io,
1609 fuse_getattr_initialize, fuse_getattr_backing,
1610 fuse_getattr_finalize,
1611 path->dentry, stat, request_mask, flags);
1612 if (fer.ret)
1613 return PTR_ERR(fer.result);
1614 #endif
1615
1616 /* FUSE only supports basic stats and possibly btime */
1617 request_mask &= STATX_BASIC_STATS | STATX_BTIME;
1618 retry:
1619 if (fc->no_statx)
1620 request_mask &= STATX_BASIC_STATS;
1621
1622 if (!request_mask)
1623 sync = false;
1624 else if (flags & AT_STATX_FORCE_SYNC)
1625 sync = true;
1626 else if (flags & AT_STATX_DONT_SYNC)
1627 sync = false;
1628 else if (request_mask & inval_mask & ~cache_mask)
1629 sync = true;
1630 else
1631 sync = time_before64(fi->i_time, get_jiffies_64());
1632
1633 if (sync) {
1634 forget_all_cached_acls(inode);
1635 /* Try statx if BTIME is requested */
1636 if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
1637 err = fuse_do_statx(idmap, inode, file, stat);
1638 if (err == -ENOSYS) {
1639 fc->no_statx = 1;
1640 err = 0;
1641 goto retry;
1642 }
1643 } else {
1644 err = fuse_do_getattr(idmap, inode, stat, file);
1645 }
1646 } else if (stat) {
1647 generic_fillattr(idmap, request_mask, inode, stat);
1648 stat->mode = fi->orig_i_mode;
1649 stat->ino = fi->orig_ino;
1650 if (test_bit(FUSE_I_BTIME, &fi->state)) {
1651 stat->btime = fi->i_btime;
1652 stat->result_mask |= STATX_BTIME;
1653 }
1654 }
1655
1656 return err;
1657 }
1658
fuse_update_attributes(struct inode * inode,struct file * file,u32 mask)1659 int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
1660 {
1661 /* Do *not* need to get atime for internal purposes */
1662 return fuse_update_get_attr(&nop_mnt_idmap, inode, file, &file->f_path, NULL,
1663 mask & ~STATX_ATIME, 0);
1664 }
1665
fuse_reverse_inval_entry(struct fuse_conn * fc,u64 parent_nodeid,u64 child_nodeid,struct qstr * name,u32 flags)1666 int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
1667 u64 child_nodeid, struct qstr *name, u32 flags)
1668 {
1669 int err = -ENOTDIR;
1670 struct inode *parent;
1671 struct dentry *dir;
1672 struct dentry *entry;
1673
1674 parent = fuse_ilookup(fc, parent_nodeid, NULL);
1675 if (!parent)
1676 return -ENOENT;
1677
1678 inode_lock_nested(parent, I_MUTEX_PARENT);
1679 if (!S_ISDIR(parent->i_mode))
1680 goto unlock;
1681
1682 err = -ENOENT;
1683 dir = d_find_alias(parent);
1684 if (!dir)
1685 goto unlock;
1686
1687 name->hash = full_name_hash(dir, name->name, name->len);
1688 entry = d_lookup(dir, name);
1689 dput(dir);
1690 if (!entry)
1691 goto unlock;
1692
1693 fuse_dir_changed(parent);
1694 if (!(flags & FUSE_EXPIRE_ONLY))
1695 d_invalidate(entry);
1696 fuse_invalidate_entry_cache(entry);
1697
1698 if (child_nodeid != 0 && d_really_is_positive(entry)) {
1699 inode_lock(d_inode(entry));
1700 if (get_node_id(d_inode(entry)) != child_nodeid) {
1701 err = -ENOENT;
1702 goto badentry;
1703 }
1704 if (d_mountpoint(entry)) {
1705 err = -EBUSY;
1706 goto badentry;
1707 }
1708 if (d_is_dir(entry)) {
1709 shrink_dcache_parent(entry);
1710 if (!simple_empty(entry)) {
1711 err = -ENOTEMPTY;
1712 goto badentry;
1713 }
1714 d_inode(entry)->i_flags |= S_DEAD;
1715 }
1716 dont_mount(entry);
1717 clear_nlink(d_inode(entry));
1718 err = 0;
1719 badentry:
1720 inode_unlock(d_inode(entry));
1721 if (!err)
1722 d_delete(entry);
1723 } else {
1724 err = 0;
1725 }
1726 dput(entry);
1727
1728 unlock:
1729 inode_unlock(parent);
1730 iput(parent);
1731 return err;
1732 }
1733
fuse_permissible_uidgid(struct fuse_conn * fc)1734 static inline bool fuse_permissible_uidgid(struct fuse_conn *fc)
1735 {
1736 const struct cred *cred = current_cred();
1737
1738 return (uid_eq(cred->euid, fc->user_id) &&
1739 uid_eq(cred->suid, fc->user_id) &&
1740 uid_eq(cred->uid, fc->user_id) &&
1741 gid_eq(cred->egid, fc->group_id) &&
1742 gid_eq(cred->sgid, fc->group_id) &&
1743 gid_eq(cred->gid, fc->group_id));
1744 }
1745
1746 /*
1747 * Calling into a user-controlled filesystem gives the filesystem
1748 * daemon ptrace-like capabilities over the current process. This
1749 * means, that the filesystem daemon is able to record the exact
1750 * filesystem operations performed, and can also control the behavior
1751 * of the requester process in otherwise impossible ways. For example
1752 * it can delay the operation for arbitrary length of time allowing
1753 * DoS against the requester.
1754 *
1755 * For this reason only those processes can call into the filesystem,
1756 * for which the owner of the mount has ptrace privilege. This
1757 * excludes processes started by other users, suid or sgid processes.
1758 */
fuse_allow_current_process(struct fuse_conn * fc)1759 bool fuse_allow_current_process(struct fuse_conn *fc)
1760 {
1761 bool allow;
1762
1763 if (fc->allow_other)
1764 allow = current_in_userns(fc->user_ns);
1765 else
1766 allow = fuse_permissible_uidgid(fc);
1767
1768 if (!allow && allow_sys_admin_access && capable(CAP_SYS_ADMIN))
1769 allow = true;
1770
1771 return allow;
1772 }
1773
fuse_access(struct inode * inode,int mask)1774 static int fuse_access(struct inode *inode, int mask)
1775 {
1776 struct fuse_mount *fm = get_fuse_mount(inode);
1777 FUSE_ARGS(args);
1778 struct fuse_access_in inarg;
1779 int err;
1780
1781 #ifdef CONFIG_FUSE_BPF
1782 struct fuse_err_ret fer;
1783
1784 fer = fuse_bpf_backing(inode, struct fuse_access_in,
1785 fuse_access_initialize, fuse_access_backing,
1786 fuse_access_finalize, inode, mask);
1787 if (fer.ret)
1788 return PTR_ERR(fer.result);
1789 #endif
1790
1791 BUG_ON(mask & MAY_NOT_BLOCK);
1792
1793 /*
1794 * We should not send FUSE_ACCESS to the userspace
1795 * when idmapped mounts are enabled as for this case
1796 * we have fc->default_permissions = 1 and access
1797 * permission checks are done on the kernel side.
1798 */
1799 WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP));
1800
1801 if (fm->fc->no_access)
1802 return 0;
1803
1804 memset(&inarg, 0, sizeof(inarg));
1805 inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
1806 args.opcode = FUSE_ACCESS;
1807 args.nodeid = get_node_id(inode);
1808 args.in_numargs = 1;
1809 args.in_args[0].size = sizeof(inarg);
1810 args.in_args[0].value = &inarg;
1811 err = fuse_simple_request(fm, &args);
1812 if (err == -ENOSYS) {
1813 fm->fc->no_access = 1;
1814 err = 0;
1815 }
1816 return err;
1817 }
1818
fuse_perm_getattr(struct inode * inode,int mask)1819 static int fuse_perm_getattr(struct inode *inode, int mask)
1820 {
1821 if (mask & MAY_NOT_BLOCK)
1822 return -ECHILD;
1823
1824 forget_all_cached_acls(inode);
1825 return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL);
1826 }
1827
1828 /*
1829 * Check permission. The two basic access models of FUSE are:
1830 *
1831 * 1) Local access checking ('default_permissions' mount option) based
1832 * on file mode. This is the plain old disk filesystem permission
1833 * model.
1834 *
1835 * 2) "Remote" access checking, where server is responsible for
1836 * checking permission in each inode operation. An exception to this
1837 * is if ->permission() was invoked from sys_access() in which case an
1838 * access request is sent. Execute permission is still checked
1839 * locally based on file mode.
1840 */
fuse_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)1841 static int fuse_permission(struct mnt_idmap *idmap,
1842 struct inode *inode, int mask)
1843 {
1844 struct fuse_conn *fc = get_fuse_conn(inode);
1845 bool refreshed = false;
1846 int err = 0;
1847 struct fuse_inode *fi = get_fuse_inode(inode);
1848 #ifdef CONFIG_FUSE_BPF
1849 struct fuse_err_ret fer;
1850 #endif
1851
1852 if (fuse_is_bad(inode))
1853 return -EIO;
1854
1855 if (!fuse_allow_current_process(fc))
1856 return -EACCES;
1857
1858 #ifdef CONFIG_FUSE_BPF
1859 fer = fuse_bpf_backing(inode, struct fuse_access_in,
1860 fuse_access_initialize, fuse_access_backing,
1861 fuse_access_finalize, inode, mask);
1862 if (fer.ret)
1863 return PTR_ERR(fer.result);
1864 #endif
1865
1866 /*
1867 * If attributes are needed, refresh them before proceeding
1868 */
1869 if (fc->default_permissions ||
1870 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1871 u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
1872
1873 if (perm_mask & READ_ONCE(fi->inval_mask) ||
1874 time_before64(fi->i_time, get_jiffies_64())) {
1875 refreshed = true;
1876
1877 err = fuse_perm_getattr(inode, mask);
1878 if (err)
1879 return err;
1880 }
1881 }
1882
1883 if (fc->default_permissions) {
1884 err = generic_permission(idmap, inode, mask);
1885
1886 /* If permission is denied, try to refresh file
1887 attributes. This is also needed, because the root
1888 node will at first have no permissions */
1889 if (err == -EACCES && !refreshed) {
1890 err = fuse_perm_getattr(inode, mask);
1891 if (!err)
1892 err = generic_permission(idmap,
1893 inode, mask);
1894 }
1895
1896 /* Note: the opposite of the above test does not
1897 exist. So if permissions are revoked this won't be
1898 noticed immediately, only after the attribute
1899 timeout has expired */
1900 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1901 err = fuse_access(inode, mask);
1902 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1903 if (!(inode->i_mode & S_IXUGO)) {
1904 if (refreshed)
1905 return -EACCES;
1906
1907 err = fuse_perm_getattr(inode, mask);
1908 if (!err && !(inode->i_mode & S_IXUGO))
1909 return -EACCES;
1910 }
1911 }
1912 return err;
1913 }
1914
fuse_readlink_page(struct inode * inode,struct page * page)1915 static int fuse_readlink_page(struct inode *inode, struct page *page)
1916 {
1917 struct fuse_mount *fm = get_fuse_mount(inode);
1918 struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
1919 struct fuse_args_pages ap = {
1920 .num_pages = 1,
1921 .pages = &page,
1922 .descs = &desc,
1923 };
1924 char *link;
1925 ssize_t res;
1926
1927 ap.args.opcode = FUSE_READLINK;
1928 ap.args.nodeid = get_node_id(inode);
1929 ap.args.out_pages = true;
1930 ap.args.out_argvar = true;
1931 ap.args.page_zeroing = true;
1932 ap.args.out_numargs = 1;
1933 ap.args.out_args[0].size = desc.length;
1934 res = fuse_simple_request(fm, &ap.args);
1935
1936 fuse_invalidate_atime(inode);
1937
1938 if (res < 0)
1939 return res;
1940
1941 if (WARN_ON(res >= PAGE_SIZE))
1942 return -EIO;
1943
1944 link = page_address(page);
1945 link[res] = '\0';
1946
1947 return 0;
1948 }
1949
fuse_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * callback)1950 static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
1951 struct delayed_call *callback)
1952 {
1953 struct fuse_conn *fc = get_fuse_conn(inode);
1954 struct page *page;
1955 int err;
1956
1957 err = -EIO;
1958 if (fuse_is_bad(inode))
1959 goto out_err;
1960
1961 #ifdef CONFIG_FUSE_BPF
1962 {
1963 struct fuse_err_ret fer;
1964 const char *out = NULL;
1965
1966 fer = fuse_bpf_backing(inode, struct fuse_dummy_io,
1967 fuse_get_link_initialize,
1968 fuse_get_link_backing,
1969 fuse_get_link_finalize,
1970 inode, dentry, callback, &out);
1971 if (fer.ret)
1972 return fer.result ?: out;
1973 }
1974 #endif
1975
1976 if (fc->cache_symlinks)
1977 return page_get_link_raw(dentry, inode, callback);
1978
1979 err = -ECHILD;
1980 if (!dentry)
1981 goto out_err;
1982
1983 page = alloc_page(GFP_KERNEL);
1984 err = -ENOMEM;
1985 if (!page)
1986 goto out_err;
1987
1988 err = fuse_readlink_page(inode, page);
1989 if (err) {
1990 __free_page(page);
1991 goto out_err;
1992 }
1993
1994 set_delayed_call(callback, page_put_link, page);
1995
1996 return page_address(page);
1997
1998 out_err:
1999 return ERR_PTR(err);
2000 }
2001
fuse_dir_open(struct inode * inode,struct file * file)2002 static int fuse_dir_open(struct inode *inode, struct file *file)
2003 {
2004 struct fuse_mount *fm = get_fuse_mount(inode);
2005 int err;
2006
2007 if (fuse_is_bad(inode))
2008 return -EIO;
2009
2010 err = generic_file_open(inode, file);
2011 if (err)
2012 return err;
2013
2014 #ifdef CONFIG_FUSE_BPF
2015 {
2016 struct fuse_err_ret fer;
2017
2018 fer = fuse_bpf_backing(inode, struct fuse_open_io,
2019 fuse_open_initialize,
2020 fuse_open_backing,
2021 fuse_open_finalize,
2022 inode, file, true);
2023 if (fer.ret)
2024 return PTR_ERR(fer.result);
2025 }
2026 #endif
2027
2028 err = fuse_do_open(fm, get_node_id(inode), file, true);
2029 if (!err) {
2030 struct fuse_file *ff = file->private_data;
2031
2032 /*
2033 * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for
2034 * directories for backward compatibility, though it's unlikely
2035 * to be useful.
2036 */
2037 if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
2038 nonseekable_open(inode, file);
2039 if (!(ff->open_flags & FOPEN_KEEP_CACHE))
2040 invalidate_inode_pages2(inode->i_mapping);
2041 }
2042
2043 return err;
2044 }
2045
fuse_dir_release(struct inode * inode,struct file * file)2046 static int fuse_dir_release(struct inode *inode, struct file *file)
2047 {
2048 fuse_release_common(file, true);
2049 return 0;
2050 }
2051
fuse_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)2052 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
2053 int datasync)
2054 {
2055 struct inode *inode = file->f_mapping->host;
2056 struct fuse_conn *fc = get_fuse_conn(inode);
2057 int err;
2058
2059 if (fuse_is_bad(inode))
2060 return -EIO;
2061
2062 #ifdef CONFIG_FUSE_BPF
2063 {
2064 struct fuse_err_ret fer;
2065
2066 fer = fuse_bpf_backing(inode, struct fuse_fsync_in,
2067 fuse_dir_fsync_initialize, fuse_fsync_backing,
2068 fuse_fsync_finalize,
2069 file, start, end, datasync);
2070 if (fer.ret)
2071 return PTR_ERR(fer.result);
2072 }
2073 #endif
2074
2075 if (fc->no_fsyncdir)
2076 return 0;
2077
2078 inode_lock(inode);
2079 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
2080 if (err == -ENOSYS) {
2081 fc->no_fsyncdir = 1;
2082 err = 0;
2083 }
2084 inode_unlock(inode);
2085
2086 return err;
2087 }
2088
fuse_dir_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2089 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
2090 unsigned long arg)
2091 {
2092 struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
2093
2094 /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
2095 if (fc->minor < 18)
2096 return -ENOTTY;
2097
2098 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
2099 }
2100
fuse_dir_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2101 static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
2102 unsigned long arg)
2103 {
2104 struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
2105
2106 if (fc->minor < 18)
2107 return -ENOTTY;
2108
2109 return fuse_ioctl_common(file, cmd, arg,
2110 FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
2111 }
2112
2113 /*
2114 * Prevent concurrent writepages on inode
2115 *
2116 * This is done by adding a negative bias to the inode write counter
2117 * and waiting for all pending writes to finish.
2118 */
fuse_set_nowrite(struct inode * inode)2119 void fuse_set_nowrite(struct inode *inode)
2120 {
2121 struct fuse_inode *fi = get_fuse_inode(inode);
2122
2123 BUG_ON(!inode_is_locked(inode));
2124
2125 spin_lock(&fi->lock);
2126 BUG_ON(fi->writectr < 0);
2127 fi->writectr += FUSE_NOWRITE;
2128 spin_unlock(&fi->lock);
2129 wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
2130 }
2131
2132 /*
2133 * Allow writepages on inode
2134 *
2135 * Remove the bias from the writecounter and send any queued
2136 * writepages.
2137 */
__fuse_release_nowrite(struct inode * inode)2138 static void __fuse_release_nowrite(struct inode *inode)
2139 {
2140 struct fuse_inode *fi = get_fuse_inode(inode);
2141
2142 BUG_ON(fi->writectr != FUSE_NOWRITE);
2143 fi->writectr = 0;
2144 fuse_flush_writepages(inode);
2145 }
2146
fuse_release_nowrite(struct inode * inode)2147 void fuse_release_nowrite(struct inode *inode)
2148 {
2149 struct fuse_inode *fi = get_fuse_inode(inode);
2150
2151 spin_lock(&fi->lock);
2152 __fuse_release_nowrite(inode);
2153 spin_unlock(&fi->lock);
2154 }
2155
fuse_setattr_fill(struct fuse_conn * fc,struct fuse_args * args,struct inode * inode,struct fuse_setattr_in * inarg_p,struct fuse_attr_out * outarg_p)2156 static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
2157 struct inode *inode,
2158 struct fuse_setattr_in *inarg_p,
2159 struct fuse_attr_out *outarg_p)
2160 {
2161 args->opcode = FUSE_SETATTR;
2162 args->nodeid = get_node_id(inode);
2163 args->in_numargs = 1;
2164 args->in_args[0].size = sizeof(*inarg_p);
2165 args->in_args[0].value = inarg_p;
2166 args->out_numargs = 1;
2167 args->out_args[0].size = sizeof(*outarg_p);
2168 args->out_args[0].value = outarg_p;
2169 }
2170
2171 /*
2172 * Flush inode->i_mtime to the server
2173 */
fuse_flush_times(struct inode * inode,struct fuse_file * ff)2174 int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
2175 {
2176 struct fuse_mount *fm = get_fuse_mount(inode);
2177 FUSE_ARGS(args);
2178 struct fuse_setattr_in inarg;
2179 struct fuse_attr_out outarg;
2180
2181 memset(&inarg, 0, sizeof(inarg));
2182 memset(&outarg, 0, sizeof(outarg));
2183
2184 inarg.valid = FATTR_MTIME;
2185 inarg.mtime = inode_get_mtime_sec(inode);
2186 inarg.mtimensec = inode_get_mtime_nsec(inode);
2187 if (fm->fc->minor >= 23) {
2188 inarg.valid |= FATTR_CTIME;
2189 inarg.ctime = inode_get_ctime_sec(inode);
2190 inarg.ctimensec = inode_get_ctime_nsec(inode);
2191 }
2192 if (ff) {
2193 inarg.valid |= FATTR_FH;
2194 inarg.fh = ff->fh;
2195 }
2196 fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg);
2197
2198 return fuse_simple_request(fm, &args);
2199 }
2200
2201 /*
2202 * Set attributes, and at the same time refresh them.
2203 *
2204 * Truncation is slightly complicated, because the 'truncate' request
2205 * may fail, in which case we don't want to touch the mapping.
2206 * vmtruncate() doesn't allow for this case, so do the rlimit checking
2207 * and the actual truncation by hand.
2208 */
fuse_do_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr,struct file * file)2209 int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2210 struct iattr *attr, struct file *file)
2211 {
2212 struct inode *inode = d_inode(dentry);
2213 struct fuse_mount *fm = get_fuse_mount(inode);
2214 struct fuse_conn *fc = fm->fc;
2215 struct fuse_inode *fi = get_fuse_inode(inode);
2216 struct address_space *mapping = inode->i_mapping;
2217 FUSE_ARGS(args);
2218 struct fuse_setattr_in inarg;
2219 struct fuse_attr_out outarg;
2220 bool is_truncate = false;
2221 bool is_wb = fc->writeback_cache && S_ISREG(inode->i_mode);
2222 loff_t oldsize;
2223 int err;
2224 bool trust_local_cmtime = is_wb;
2225 bool fault_blocked = false;
2226 u64 attr_version;
2227
2228 #ifdef CONFIG_FUSE_BPF
2229 struct fuse_err_ret fer;
2230
2231 fer = fuse_bpf_backing(inode, struct fuse_setattr_io,
2232 fuse_setattr_initialize, fuse_setattr_backing,
2233 fuse_setattr_finalize, dentry, attr, file);
2234 if (fer.ret)
2235 return PTR_ERR(fer.result);
2236 #endif
2237
2238 if (!fc->default_permissions)
2239 attr->ia_valid |= ATTR_FORCE;
2240
2241 err = setattr_prepare(idmap, dentry, attr);
2242 if (err)
2243 return err;
2244
2245 if (attr->ia_valid & ATTR_SIZE) {
2246 if (WARN_ON(!S_ISREG(inode->i_mode)))
2247 return -EIO;
2248 is_truncate = true;
2249 }
2250
2251 if (FUSE_IS_DAX(inode) && is_truncate) {
2252 filemap_invalidate_lock(mapping);
2253 fault_blocked = true;
2254 err = fuse_dax_break_layouts(inode, 0, -1);
2255 if (err) {
2256 filemap_invalidate_unlock(mapping);
2257 return err;
2258 }
2259 }
2260
2261 if (attr->ia_valid & ATTR_OPEN) {
2262 /* This is coming from open(..., ... | O_TRUNC); */
2263 WARN_ON(!(attr->ia_valid & ATTR_SIZE));
2264 WARN_ON(attr->ia_size != 0);
2265 if (fc->atomic_o_trunc) {
2266 /*
2267 * No need to send request to userspace, since actual
2268 * truncation has already been done by OPEN. But still
2269 * need to truncate page cache.
2270 */
2271 i_size_write(inode, 0);
2272 truncate_pagecache(inode, 0);
2273 goto out;
2274 }
2275 file = NULL;
2276 }
2277
2278 /* Flush dirty data/metadata before non-truncate SETATTR */
2279 if (is_wb &&
2280 attr->ia_valid &
2281 (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
2282 ATTR_TIMES_SET)) {
2283 err = write_inode_now(inode, true);
2284 if (err)
2285 return err;
2286
2287 fuse_set_nowrite(inode);
2288 fuse_release_nowrite(inode);
2289 }
2290
2291 if (is_truncate) {
2292 fuse_set_nowrite(inode);
2293 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2294 if (trust_local_cmtime && attr->ia_size != inode->i_size)
2295 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2296 }
2297
2298 memset(&inarg, 0, sizeof(inarg));
2299 memset(&outarg, 0, sizeof(outarg));
2300 iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime);
2301 if (file) {
2302 struct fuse_file *ff = file->private_data;
2303 inarg.valid |= FATTR_FH;
2304 inarg.fh = ff->fh;
2305 }
2306
2307 /* Kill suid/sgid for non-directory chown unconditionally */
2308 if (fc->handle_killpriv_v2 && !S_ISDIR(inode->i_mode) &&
2309 attr->ia_valid & (ATTR_UID | ATTR_GID))
2310 inarg.valid |= FATTR_KILL_SUIDGID;
2311
2312 if (attr->ia_valid & ATTR_SIZE) {
2313 /* For mandatory locking in truncate */
2314 inarg.valid |= FATTR_LOCKOWNER;
2315 inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
2316
2317 /* Kill suid/sgid for truncate only if no CAP_FSETID */
2318 if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
2319 inarg.valid |= FATTR_KILL_SUIDGID;
2320 }
2321
2322 attr_version = fuse_get_attr_version(fm->fc);
2323 fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
2324 err = fuse_simple_request(fm, &args);
2325 if (err) {
2326 if (err == -EINTR)
2327 fuse_invalidate_attr(inode);
2328 goto error;
2329 }
2330
2331 if (fuse_invalid_attr(&outarg.attr) ||
2332 inode_wrong_type(inode, outarg.attr.mode)) {
2333 fuse_make_bad(inode);
2334 err = -EIO;
2335 goto error;
2336 }
2337
2338 spin_lock(&fi->lock);
2339 /* the kernel maintains i_mtime locally */
2340 if (trust_local_cmtime) {
2341 if (attr->ia_valid & ATTR_MTIME)
2342 inode_set_mtime_to_ts(inode, attr->ia_mtime);
2343 if (attr->ia_valid & ATTR_CTIME)
2344 inode_set_ctime_to_ts(inode, attr->ia_ctime);
2345 /* FIXME: clear I_DIRTY_SYNC? */
2346 }
2347
2348 if (fi->attr_version > attr_version) {
2349 /*
2350 * Apply attributes, for example for fsnotify_change(), but set
2351 * attribute timeout to zero.
2352 */
2353 outarg.attr_valid = outarg.attr_valid_nsec = 0;
2354 }
2355
2356 fuse_change_attributes_common(inode, &outarg.attr, NULL,
2357 ATTR_TIMEOUT(&outarg),
2358 fuse_get_cache_mask(inode));
2359 oldsize = inode->i_size;
2360 /* see the comment in fuse_change_attributes() */
2361 if (!is_wb || is_truncate)
2362 i_size_write(inode, outarg.attr.size);
2363
2364 if (is_truncate) {
2365 /* NOTE: this may release/reacquire fi->lock */
2366 __fuse_release_nowrite(inode);
2367 }
2368 spin_unlock(&fi->lock);
2369
2370 /*
2371 * Only call invalidate_inode_pages2() after removing
2372 * FUSE_NOWRITE, otherwise fuse_launder_folio() would deadlock.
2373 */
2374 if ((is_truncate || !is_wb) &&
2375 S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
2376 truncate_pagecache(inode, outarg.attr.size);
2377 invalidate_inode_pages2(mapping);
2378 }
2379
2380 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2381 out:
2382 if (fault_blocked)
2383 filemap_invalidate_unlock(mapping);
2384
2385 return 0;
2386
2387 error:
2388 if (is_truncate)
2389 fuse_release_nowrite(inode);
2390
2391 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2392
2393 if (fault_blocked)
2394 filemap_invalidate_unlock(mapping);
2395 return err;
2396 }
2397
fuse_setattr(struct mnt_idmap * idmap,struct dentry * entry,struct iattr * attr)2398 static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
2399 struct iattr *attr)
2400 {
2401 struct inode *inode = d_inode(entry);
2402 struct fuse_conn *fc = get_fuse_conn(inode);
2403 struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
2404 int ret;
2405
2406 if (fuse_is_bad(inode))
2407 return -EIO;
2408
2409 if (!fuse_allow_current_process(get_fuse_conn(inode)))
2410 return -EACCES;
2411
2412 if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
2413 attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
2414 ATTR_MODE);
2415
2416 /*
2417 * The only sane way to reliably kill suid/sgid is to do it in
2418 * the userspace filesystem
2419 *
2420 * This should be done on write(), truncate() and chown().
2421 */
2422 if (!fc->handle_killpriv && !fc->handle_killpriv_v2) {
2423 #ifdef CONFIG_FUSE_BPF
2424 struct fuse_err_ret fer;
2425
2426 /*
2427 * ia_mode calculation may have used stale i_mode.
2428 * Refresh and recalculate.
2429 */
2430 fer = fuse_bpf_backing(inode, struct fuse_getattr_io,
2431 fuse_getattr_initialize, fuse_getattr_backing,
2432 fuse_getattr_finalize,
2433 entry, NULL, 0, 0);
2434 if (fer.ret)
2435 ret = PTR_ERR(fer.result);
2436 else
2437 #endif
2438 ret = fuse_do_getattr(idmap, inode, NULL, file);
2439 if (ret)
2440 return ret;
2441
2442 attr->ia_mode = inode->i_mode;
2443 if (inode->i_mode & S_ISUID) {
2444 attr->ia_valid |= ATTR_MODE;
2445 attr->ia_mode &= ~S_ISUID;
2446 }
2447 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
2448 attr->ia_valid |= ATTR_MODE;
2449 attr->ia_mode &= ~S_ISGID;
2450 }
2451 }
2452 }
2453 if (!attr->ia_valid)
2454 return 0;
2455
2456 ret = fuse_do_setattr(idmap, entry, attr, file);
2457 if (!ret) {
2458 /*
2459 * If filesystem supports acls it may have updated acl xattrs in
2460 * the filesystem, so forget cached acls for the inode.
2461 */
2462 if (fc->posix_acl)
2463 forget_all_cached_acls(inode);
2464
2465 /* Directory mode changed, may need to revalidate access */
2466 if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
2467 fuse_invalidate_entry_cache(entry);
2468 }
2469 return ret;
2470 }
2471
fuse_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)2472 static int fuse_getattr(struct mnt_idmap *idmap,
2473 const struct path *path, struct kstat *stat,
2474 u32 request_mask, unsigned int flags)
2475 {
2476 struct inode *inode = d_inode(path->dentry);
2477 struct fuse_conn *fc = get_fuse_conn(inode);
2478
2479 if (fuse_is_bad(inode))
2480 return -EIO;
2481
2482 if (!fuse_allow_current_process(fc)) {
2483 if (!request_mask) {
2484 /*
2485 * If user explicitly requested *nothing* then don't
2486 * error out, but return st_dev only.
2487 */
2488 stat->result_mask = 0;
2489 stat->dev = inode->i_sb->s_dev;
2490 return 0;
2491 }
2492 return -EACCES;
2493 }
2494
2495 return fuse_update_get_attr(idmap, inode, NULL, path, stat, request_mask,
2496 flags);
2497 }
2498
2499 static const struct inode_operations fuse_dir_inode_operations = {
2500 .lookup = fuse_lookup,
2501 .mkdir = fuse_mkdir,
2502 .symlink = fuse_symlink,
2503 .unlink = fuse_unlink,
2504 .rmdir = fuse_rmdir,
2505 .rename = fuse_rename2,
2506 .link = fuse_link,
2507 .setattr = fuse_setattr,
2508 .create = fuse_create,
2509 .atomic_open = fuse_atomic_open,
2510 .tmpfile = fuse_tmpfile,
2511 .mknod = fuse_mknod,
2512 .permission = fuse_permission,
2513 .getattr = fuse_getattr,
2514 .listxattr = fuse_listxattr,
2515 .get_inode_acl = fuse_get_inode_acl,
2516 .get_acl = fuse_get_acl,
2517 .set_acl = fuse_set_acl,
2518 .fileattr_get = fuse_fileattr_get,
2519 .fileattr_set = fuse_fileattr_set,
2520 };
2521
2522 static const struct file_operations fuse_dir_operations = {
2523 .llseek = generic_file_llseek,
2524 .read = generic_read_dir,
2525 .iterate_shared = fuse_readdir,
2526 .open = fuse_dir_open,
2527 .release = fuse_dir_release,
2528 .fsync = fuse_dir_fsync,
2529 .unlocked_ioctl = fuse_dir_ioctl,
2530 .compat_ioctl = fuse_dir_compat_ioctl,
2531 };
2532
2533 static const struct inode_operations fuse_common_inode_operations = {
2534 .setattr = fuse_setattr,
2535 .permission = fuse_permission,
2536 .getattr = fuse_getattr,
2537 .listxattr = fuse_listxattr,
2538 .get_inode_acl = fuse_get_inode_acl,
2539 .get_acl = fuse_get_acl,
2540 .set_acl = fuse_set_acl,
2541 .fileattr_get = fuse_fileattr_get,
2542 .fileattr_set = fuse_fileattr_set,
2543 };
2544
2545 static const struct inode_operations fuse_symlink_inode_operations = {
2546 .setattr = fuse_setattr,
2547 .get_link = fuse_get_link,
2548 .getattr = fuse_getattr,
2549 .listxattr = fuse_listxattr,
2550 };
2551
fuse_init_common(struct inode * inode)2552 void fuse_init_common(struct inode *inode)
2553 {
2554 inode->i_op = &fuse_common_inode_operations;
2555 }
2556
fuse_init_dir(struct inode * inode)2557 void fuse_init_dir(struct inode *inode)
2558 {
2559 struct fuse_inode *fi = get_fuse_inode(inode);
2560
2561 inode->i_op = &fuse_dir_inode_operations;
2562 inode->i_fop = &fuse_dir_operations;
2563
2564 spin_lock_init(&fi->rdc.lock);
2565 fi->rdc.cached = false;
2566 fi->rdc.size = 0;
2567 fi->rdc.pos = 0;
2568 fi->rdc.version = 0;
2569 }
2570
fuse_symlink_read_folio(struct file * null,struct folio * folio)2571 static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
2572 {
2573 int err = fuse_readlink_page(folio->mapping->host, &folio->page);
2574
2575 if (!err)
2576 folio_mark_uptodate(folio);
2577
2578 folio_unlock(folio);
2579
2580 return err;
2581 }
2582
2583 static const struct address_space_operations fuse_symlink_aops = {
2584 .read_folio = fuse_symlink_read_folio,
2585 };
2586
fuse_init_symlink(struct inode * inode)2587 void fuse_init_symlink(struct inode *inode)
2588 {
2589 inode->i_op = &fuse_symlink_inode_operations;
2590 inode->i_data.a_ops = &fuse_symlink_aops;
2591 inode_nohighmem(inode);
2592 }
2593