• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/namespace.c
4  *
5  * (C) Copyright Al Viro 2000, 2001
6  *
7  * Based on code from fs/super.c, copyright Linus Torvalds and others.
8  * Heavily rewritten.
9  */
10 
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h>		/* init_rootfs */
21 #include <linux/fs_struct.h>	/* get_fs_root et.al. */
22 #include <linux/fsnotify.h>	/* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/task_work.h>
29 #include <linux/sched/task.h>
30 #include <uapi/linux/mount.h>
31 #include <linux/fs_context.h>
32 #include <linux/shmem_fs.h>
33 
34 #include "pnode.h"
35 #include "internal.h"
36 
37 /* Maximum number of mounts in a mount namespace */
38 unsigned int sysctl_mount_max __read_mostly = 100000;
39 
40 static unsigned int m_hash_mask __read_mostly;
41 static unsigned int m_hash_shift __read_mostly;
42 static unsigned int mp_hash_mask __read_mostly;
43 static unsigned int mp_hash_shift __read_mostly;
44 
45 static __initdata unsigned long mhash_entries;
set_mhash_entries(char * str)46 static int __init set_mhash_entries(char *str)
47 {
48 	if (!str)
49 		return 0;
50 	mhash_entries = simple_strtoul(str, &str, 0);
51 	return 1;
52 }
53 __setup("mhash_entries=", set_mhash_entries);
54 
55 static __initdata unsigned long mphash_entries;
set_mphash_entries(char * str)56 static int __init set_mphash_entries(char *str)
57 {
58 	if (!str)
59 		return 0;
60 	mphash_entries = simple_strtoul(str, &str, 0);
61 	return 1;
62 }
63 __setup("mphash_entries=", set_mphash_entries);
64 
65 static u64 event;
66 static DEFINE_IDA(mnt_id_ida);
67 static DEFINE_IDA(mnt_group_ida);
68 
69 static struct hlist_head *mount_hashtable __read_mostly;
70 static struct hlist_head *mountpoint_hashtable __read_mostly;
71 static struct kmem_cache *mnt_cache __read_mostly;
72 static DECLARE_RWSEM(namespace_sem);
73 static HLIST_HEAD(unmounted);	/* protected by namespace_sem */
74 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
75 
76 /* /sys/fs */
77 struct kobject *fs_kobj;
78 EXPORT_SYMBOL_GPL(fs_kobj);
79 
80 /*
81  * vfsmount lock may be taken for read to prevent changes to the
82  * vfsmount hash, ie. during mountpoint lookups or walking back
83  * up the tree.
84  *
85  * It should be taken for write in all cases where the vfsmount
86  * tree or hash is modified or when a vfsmount structure is modified.
87  */
88 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
89 
m_hash(struct vfsmount * mnt,struct dentry * dentry)90 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
91 {
92 	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
93 	tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
94 	tmp = tmp + (tmp >> m_hash_shift);
95 	return &mount_hashtable[tmp & m_hash_mask];
96 }
97 
mp_hash(struct dentry * dentry)98 static inline struct hlist_head *mp_hash(struct dentry *dentry)
99 {
100 	unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
101 	tmp = tmp + (tmp >> mp_hash_shift);
102 	return &mountpoint_hashtable[tmp & mp_hash_mask];
103 }
104 
mnt_alloc_id(struct mount * mnt)105 static int mnt_alloc_id(struct mount *mnt)
106 {
107 	int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
108 
109 	if (res < 0)
110 		return res;
111 	mnt->mnt_id = res;
112 	return 0;
113 }
114 
mnt_free_id(struct mount * mnt)115 static void mnt_free_id(struct mount *mnt)
116 {
117 	ida_free(&mnt_id_ida, mnt->mnt_id);
118 }
119 
120 /*
121  * Allocate a new peer group ID
122  */
mnt_alloc_group_id(struct mount * mnt)123 static int mnt_alloc_group_id(struct mount *mnt)
124 {
125 	int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
126 
127 	if (res < 0)
128 		return res;
129 	mnt->mnt_group_id = res;
130 	return 0;
131 }
132 
133 /*
134  * Release a peer group ID
135  */
mnt_release_group_id(struct mount * mnt)136 void mnt_release_group_id(struct mount *mnt)
137 {
138 	ida_free(&mnt_group_ida, mnt->mnt_group_id);
139 	mnt->mnt_group_id = 0;
140 }
141 
142 /*
143  * vfsmount lock must be held for read
144  */
mnt_add_count(struct mount * mnt,int n)145 static inline void mnt_add_count(struct mount *mnt, int n)
146 {
147 #ifdef CONFIG_SMP
148 	this_cpu_add(mnt->mnt_pcp->mnt_count, n);
149 #else
150 	preempt_disable();
151 	mnt->mnt_count += n;
152 	preempt_enable();
153 #endif
154 }
155 
156 /*
157  * vfsmount lock must be held for write
158  */
mnt_get_count(struct mount * mnt)159 int mnt_get_count(struct mount *mnt)
160 {
161 #ifdef CONFIG_SMP
162 	int count = 0;
163 	int cpu;
164 
165 	for_each_possible_cpu(cpu) {
166 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
167 	}
168 
169 	return count;
170 #else
171 	return mnt->mnt_count;
172 #endif
173 }
174 
alloc_vfsmnt(const char * name)175 static struct mount *alloc_vfsmnt(const char *name)
176 {
177 	struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
178 	if (mnt) {
179 		int err;
180 
181 		err = mnt_alloc_id(mnt);
182 		if (err)
183 			goto out_free_cache;
184 
185 		if (name) {
186 			mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
187 			if (!mnt->mnt_devname)
188 				goto out_free_id;
189 		}
190 
191 #ifdef CONFIG_SMP
192 		mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
193 		if (!mnt->mnt_pcp)
194 			goto out_free_devname;
195 
196 		this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
197 #else
198 		mnt->mnt_count = 1;
199 		mnt->mnt_writers = 0;
200 #endif
201 
202 		INIT_HLIST_NODE(&mnt->mnt_hash);
203 		INIT_LIST_HEAD(&mnt->mnt_child);
204 		INIT_LIST_HEAD(&mnt->mnt_mounts);
205 		INIT_LIST_HEAD(&mnt->mnt_list);
206 		INIT_LIST_HEAD(&mnt->mnt_expire);
207 		INIT_LIST_HEAD(&mnt->mnt_share);
208 		INIT_LIST_HEAD(&mnt->mnt_slave_list);
209 		INIT_LIST_HEAD(&mnt->mnt_slave);
210 		INIT_HLIST_NODE(&mnt->mnt_mp_list);
211 		INIT_LIST_HEAD(&mnt->mnt_umounting);
212 		INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
213 	}
214 	return mnt;
215 
216 #ifdef CONFIG_SMP
217 out_free_devname:
218 	kfree_const(mnt->mnt_devname);
219 #endif
220 out_free_id:
221 	mnt_free_id(mnt);
222 out_free_cache:
223 	kmem_cache_free(mnt_cache, mnt);
224 	return NULL;
225 }
226 
227 /*
228  * Most r/o checks on a fs are for operations that take
229  * discrete amounts of time, like a write() or unlink().
230  * We must keep track of when those operations start
231  * (for permission checks) and when they end, so that
232  * we can determine when writes are able to occur to
233  * a filesystem.
234  */
235 /*
236  * __mnt_is_readonly: check whether a mount is read-only
237  * @mnt: the mount to check for its write status
238  *
239  * This shouldn't be used directly ouside of the VFS.
240  * It does not guarantee that the filesystem will stay
241  * r/w, just that it is right *now*.  This can not and
242  * should not be used in place of IS_RDONLY(inode).
243  * mnt_want/drop_write() will _keep_ the filesystem
244  * r/w.
245  */
__mnt_is_readonly(struct vfsmount * mnt)246 bool __mnt_is_readonly(struct vfsmount *mnt)
247 {
248 	return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
249 }
250 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
251 
mnt_inc_writers(struct mount * mnt)252 static inline void mnt_inc_writers(struct mount *mnt)
253 {
254 #ifdef CONFIG_SMP
255 	this_cpu_inc(mnt->mnt_pcp->mnt_writers);
256 #else
257 	mnt->mnt_writers++;
258 #endif
259 }
260 
mnt_dec_writers(struct mount * mnt)261 static inline void mnt_dec_writers(struct mount *mnt)
262 {
263 #ifdef CONFIG_SMP
264 	this_cpu_dec(mnt->mnt_pcp->mnt_writers);
265 #else
266 	mnt->mnt_writers--;
267 #endif
268 }
269 
mnt_get_writers(struct mount * mnt)270 static unsigned int mnt_get_writers(struct mount *mnt)
271 {
272 #ifdef CONFIG_SMP
273 	unsigned int count = 0;
274 	int cpu;
275 
276 	for_each_possible_cpu(cpu) {
277 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
278 	}
279 
280 	return count;
281 #else
282 	return mnt->mnt_writers;
283 #endif
284 }
285 
mnt_is_readonly(struct vfsmount * mnt)286 static int mnt_is_readonly(struct vfsmount *mnt)
287 {
288 	if (mnt->mnt_sb->s_readonly_remount)
289 		return 1;
290 	/* Order wrt setting s_flags/s_readonly_remount in do_remount() */
291 	smp_rmb();
292 	return __mnt_is_readonly(mnt);
293 }
294 
295 /*
296  * Most r/o & frozen checks on a fs are for operations that take discrete
297  * amounts of time, like a write() or unlink().  We must keep track of when
298  * those operations start (for permission checks) and when they end, so that we
299  * can determine when writes are able to occur to a filesystem.
300  */
301 /**
302  * __mnt_want_write - get write access to a mount without freeze protection
303  * @m: the mount on which to take a write
304  *
305  * This tells the low-level filesystem that a write is about to be performed to
306  * it, and makes sure that writes are allowed (mnt it read-write) before
307  * returning success. This operation does not protect against filesystem being
308  * frozen. When the write operation is finished, __mnt_drop_write() must be
309  * called. This is effectively a refcount.
310  */
__mnt_want_write(struct vfsmount * m)311 int __mnt_want_write(struct vfsmount *m)
312 {
313 	struct mount *mnt = real_mount(m);
314 	int ret = 0;
315 
316 	preempt_disable();
317 	mnt_inc_writers(mnt);
318 	/*
319 	 * The store to mnt_inc_writers must be visible before we pass
320 	 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
321 	 * incremented count after it has set MNT_WRITE_HOLD.
322 	 */
323 	smp_mb();
324 	while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
325 		cpu_relax();
326 	/*
327 	 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
328 	 * be set to match its requirements. So we must not load that until
329 	 * MNT_WRITE_HOLD is cleared.
330 	 */
331 	smp_rmb();
332 	if (mnt_is_readonly(m)) {
333 		mnt_dec_writers(mnt);
334 		ret = -EROFS;
335 	}
336 	preempt_enable();
337 
338 	return ret;
339 }
340 
341 /**
342  * mnt_want_write - get write access to a mount
343  * @m: the mount on which to take a write
344  *
345  * This tells the low-level filesystem that a write is about to be performed to
346  * it, and makes sure that writes are allowed (mount is read-write, filesystem
347  * is not frozen) before returning success.  When the write operation is
348  * finished, mnt_drop_write() must be called.  This is effectively a refcount.
349  */
mnt_want_write(struct vfsmount * m)350 int mnt_want_write(struct vfsmount *m)
351 {
352 	int ret;
353 
354 	sb_start_write(m->mnt_sb);
355 	ret = __mnt_want_write(m);
356 	if (ret)
357 		sb_end_write(m->mnt_sb);
358 	return ret;
359 }
360 EXPORT_SYMBOL_GPL(mnt_want_write);
361 
362 /**
363  * mnt_clone_write - get write access to a mount
364  * @mnt: the mount on which to take a write
365  *
366  * This is effectively like mnt_want_write, except
367  * it must only be used to take an extra write reference
368  * on a mountpoint that we already know has a write reference
369  * on it. This allows some optimisation.
370  *
371  * After finished, mnt_drop_write must be called as usual to
372  * drop the reference.
373  */
mnt_clone_write(struct vfsmount * mnt)374 int mnt_clone_write(struct vfsmount *mnt)
375 {
376 	/* superblock may be r/o */
377 	if (__mnt_is_readonly(mnt))
378 		return -EROFS;
379 	preempt_disable();
380 	mnt_inc_writers(real_mount(mnt));
381 	preempt_enable();
382 	return 0;
383 }
384 EXPORT_SYMBOL_GPL(mnt_clone_write);
385 
386 /**
387  * __mnt_want_write_file - get write access to a file's mount
388  * @file: the file who's mount on which to take a write
389  *
390  * This is like __mnt_want_write, but it takes a file and can
391  * do some optimisations if the file is open for write already
392  */
__mnt_want_write_file(struct file * file)393 int __mnt_want_write_file(struct file *file)
394 {
395 	if (!(file->f_mode & FMODE_WRITER))
396 		return __mnt_want_write(file->f_path.mnt);
397 	else
398 		return mnt_clone_write(file->f_path.mnt);
399 }
400 
401 /**
402  * mnt_want_write_file - get write access to a file's mount
403  * @file: the file who's mount on which to take a write
404  *
405  * This is like mnt_want_write, but it takes a file and can
406  * do some optimisations if the file is open for write already
407  */
mnt_want_write_file(struct file * file)408 int mnt_want_write_file(struct file *file)
409 {
410 	int ret;
411 
412 	sb_start_write(file_inode(file)->i_sb);
413 	ret = __mnt_want_write_file(file);
414 	if (ret)
415 		sb_end_write(file_inode(file)->i_sb);
416 	return ret;
417 }
418 EXPORT_SYMBOL_NS_GPL(mnt_want_write_file, ANDROID_GKI_VFS_EXPORT_ONLY);
419 
420 /**
421  * __mnt_drop_write - give up write access to a mount
422  * @mnt: the mount on which to give up write access
423  *
424  * Tells the low-level filesystem that we are done
425  * performing writes to it.  Must be matched with
426  * __mnt_want_write() call above.
427  */
__mnt_drop_write(struct vfsmount * mnt)428 void __mnt_drop_write(struct vfsmount *mnt)
429 {
430 	preempt_disable();
431 	mnt_dec_writers(real_mount(mnt));
432 	preempt_enable();
433 }
434 
435 /**
436  * mnt_drop_write - give up write access to a mount
437  * @mnt: the mount on which to give up write access
438  *
439  * Tells the low-level filesystem that we are done performing writes to it and
440  * also allows filesystem to be frozen again.  Must be matched with
441  * mnt_want_write() call above.
442  */
mnt_drop_write(struct vfsmount * mnt)443 void mnt_drop_write(struct vfsmount *mnt)
444 {
445 	__mnt_drop_write(mnt);
446 	sb_end_write(mnt->mnt_sb);
447 }
448 EXPORT_SYMBOL_GPL(mnt_drop_write);
449 
__mnt_drop_write_file(struct file * file)450 void __mnt_drop_write_file(struct file *file)
451 {
452 	__mnt_drop_write(file->f_path.mnt);
453 }
454 
mnt_drop_write_file(struct file * file)455 void mnt_drop_write_file(struct file *file)
456 {
457 	__mnt_drop_write_file(file);
458 	sb_end_write(file_inode(file)->i_sb);
459 }
460 EXPORT_SYMBOL_NS(mnt_drop_write_file, ANDROID_GKI_VFS_EXPORT_ONLY);
461 
mnt_make_readonly(struct mount * mnt)462 static int mnt_make_readonly(struct mount *mnt)
463 {
464 	int ret = 0;
465 
466 	lock_mount_hash();
467 	mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
468 	/*
469 	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
470 	 * should be visible before we do.
471 	 */
472 	smp_mb();
473 
474 	/*
475 	 * With writers on hold, if this value is zero, then there are
476 	 * definitely no active writers (although held writers may subsequently
477 	 * increment the count, they'll have to wait, and decrement it after
478 	 * seeing MNT_READONLY).
479 	 *
480 	 * It is OK to have counter incremented on one CPU and decremented on
481 	 * another: the sum will add up correctly. The danger would be when we
482 	 * sum up each counter, if we read a counter before it is incremented,
483 	 * but then read another CPU's count which it has been subsequently
484 	 * decremented from -- we would see more decrements than we should.
485 	 * MNT_WRITE_HOLD protects against this scenario, because
486 	 * mnt_want_write first increments count, then smp_mb, then spins on
487 	 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
488 	 * we're counting up here.
489 	 */
490 	if (mnt_get_writers(mnt) > 0)
491 		ret = -EBUSY;
492 	else
493 		mnt->mnt.mnt_flags |= MNT_READONLY;
494 	/*
495 	 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
496 	 * that become unheld will see MNT_READONLY.
497 	 */
498 	smp_wmb();
499 	mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
500 	unlock_mount_hash();
501 	return ret;
502 }
503 
__mnt_unmake_readonly(struct mount * mnt)504 static int __mnt_unmake_readonly(struct mount *mnt)
505 {
506 	lock_mount_hash();
507 	mnt->mnt.mnt_flags &= ~MNT_READONLY;
508 	unlock_mount_hash();
509 	return 0;
510 }
511 
sb_prepare_remount_readonly(struct super_block * sb)512 int sb_prepare_remount_readonly(struct super_block *sb)
513 {
514 	struct mount *mnt;
515 	int err = 0;
516 
517 	/* Racy optimization.  Recheck the counter under MNT_WRITE_HOLD */
518 	if (atomic_long_read(&sb->s_remove_count))
519 		return -EBUSY;
520 
521 	lock_mount_hash();
522 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
523 		if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
524 			mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
525 			smp_mb();
526 			if (mnt_get_writers(mnt) > 0) {
527 				err = -EBUSY;
528 				break;
529 			}
530 		}
531 	}
532 	if (!err && atomic_long_read(&sb->s_remove_count))
533 		err = -EBUSY;
534 
535 	if (!err) {
536 		sb->s_readonly_remount = 1;
537 		smp_wmb();
538 	}
539 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
540 		if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
541 			mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
542 	}
543 	unlock_mount_hash();
544 
545 	return err;
546 }
547 
free_vfsmnt(struct mount * mnt)548 static void free_vfsmnt(struct mount *mnt)
549 {
550 	kfree_const(mnt->mnt_devname);
551 #ifdef CONFIG_SMP
552 	free_percpu(mnt->mnt_pcp);
553 #endif
554 	kmem_cache_free(mnt_cache, mnt);
555 }
556 
delayed_free_vfsmnt(struct rcu_head * head)557 static void delayed_free_vfsmnt(struct rcu_head *head)
558 {
559 	free_vfsmnt(container_of(head, struct mount, mnt_rcu));
560 }
561 
562 /* call under rcu_read_lock */
__legitimize_mnt(struct vfsmount * bastard,unsigned seq)563 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
564 {
565 	struct mount *mnt;
566 	if (read_seqretry(&mount_lock, seq))
567 		return 1;
568 	if (bastard == NULL)
569 		return 0;
570 	mnt = real_mount(bastard);
571 	mnt_add_count(mnt, 1);
572 	smp_mb();			// see mntput_no_expire()
573 	if (likely(!read_seqretry(&mount_lock, seq)))
574 		return 0;
575 	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
576 		mnt_add_count(mnt, -1);
577 		return 1;
578 	}
579 	lock_mount_hash();
580 	if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
581 		mnt_add_count(mnt, -1);
582 		unlock_mount_hash();
583 		return 1;
584 	}
585 	unlock_mount_hash();
586 	/* caller will mntput() */
587 	return -1;
588 }
589 
590 /* call under rcu_read_lock */
legitimize_mnt(struct vfsmount * bastard,unsigned seq)591 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
592 {
593 	int res = __legitimize_mnt(bastard, seq);
594 	if (likely(!res))
595 		return true;
596 	if (unlikely(res < 0)) {
597 		rcu_read_unlock();
598 		mntput(bastard);
599 		rcu_read_lock();
600 	}
601 	return false;
602 }
603 
604 /*
605  * find the first mount at @dentry on vfsmount @mnt.
606  * call under rcu_read_lock()
607  */
__lookup_mnt(struct vfsmount * mnt,struct dentry * dentry)608 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
609 {
610 	struct hlist_head *head = m_hash(mnt, dentry);
611 	struct mount *p;
612 
613 	hlist_for_each_entry_rcu(p, head, mnt_hash)
614 		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
615 			return p;
616 	return NULL;
617 }
618 
619 /*
620  * lookup_mnt - Return the first child mount mounted at path
621  *
622  * "First" means first mounted chronologically.  If you create the
623  * following mounts:
624  *
625  * mount /dev/sda1 /mnt
626  * mount /dev/sda2 /mnt
627  * mount /dev/sda3 /mnt
628  *
629  * Then lookup_mnt() on the base /mnt dentry in the root mount will
630  * return successively the root dentry and vfsmount of /dev/sda1, then
631  * /dev/sda2, then /dev/sda3, then NULL.
632  *
633  * lookup_mnt takes a reference to the found vfsmount.
634  */
lookup_mnt(const struct path * path)635 struct vfsmount *lookup_mnt(const struct path *path)
636 {
637 	struct mount *child_mnt;
638 	struct vfsmount *m;
639 	unsigned seq;
640 
641 	rcu_read_lock();
642 	do {
643 		seq = read_seqbegin(&mount_lock);
644 		child_mnt = __lookup_mnt(path->mnt, path->dentry);
645 		m = child_mnt ? &child_mnt->mnt : NULL;
646 	} while (!legitimize_mnt(m, seq));
647 	rcu_read_unlock();
648 	return m;
649 }
650 
lock_ns_list(struct mnt_namespace * ns)651 static inline void lock_ns_list(struct mnt_namespace *ns)
652 {
653 	spin_lock(&ns->ns_lock);
654 }
655 
unlock_ns_list(struct mnt_namespace * ns)656 static inline void unlock_ns_list(struct mnt_namespace *ns)
657 {
658 	spin_unlock(&ns->ns_lock);
659 }
660 
mnt_is_cursor(struct mount * mnt)661 static inline bool mnt_is_cursor(struct mount *mnt)
662 {
663 	return mnt->mnt.mnt_flags & MNT_CURSOR;
664 }
665 
666 /*
667  * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
668  *                         current mount namespace.
669  *
670  * The common case is dentries are not mountpoints at all and that
671  * test is handled inline.  For the slow case when we are actually
672  * dealing with a mountpoint of some kind, walk through all of the
673  * mounts in the current mount namespace and test to see if the dentry
674  * is a mountpoint.
675  *
676  * The mount_hashtable is not usable in the context because we
677  * need to identify all mounts that may be in the current mount
678  * namespace not just a mount that happens to have some specified
679  * parent mount.
680  */
__is_local_mountpoint(struct dentry * dentry)681 bool __is_local_mountpoint(struct dentry *dentry)
682 {
683 	struct mnt_namespace *ns = current->nsproxy->mnt_ns;
684 	struct mount *mnt;
685 	bool is_covered = false;
686 
687 	down_read(&namespace_sem);
688 	lock_ns_list(ns);
689 	list_for_each_entry(mnt, &ns->list, mnt_list) {
690 		if (mnt_is_cursor(mnt))
691 			continue;
692 		is_covered = (mnt->mnt_mountpoint == dentry);
693 		if (is_covered)
694 			break;
695 	}
696 	unlock_ns_list(ns);
697 	up_read(&namespace_sem);
698 
699 	return is_covered;
700 }
701 
lookup_mountpoint(struct dentry * dentry)702 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
703 {
704 	struct hlist_head *chain = mp_hash(dentry);
705 	struct mountpoint *mp;
706 
707 	hlist_for_each_entry(mp, chain, m_hash) {
708 		if (mp->m_dentry == dentry) {
709 			mp->m_count++;
710 			return mp;
711 		}
712 	}
713 	return NULL;
714 }
715 
get_mountpoint(struct dentry * dentry)716 static struct mountpoint *get_mountpoint(struct dentry *dentry)
717 {
718 	struct mountpoint *mp, *new = NULL;
719 	int ret;
720 
721 	if (d_mountpoint(dentry)) {
722 		/* might be worth a WARN_ON() */
723 		if (d_unlinked(dentry))
724 			return ERR_PTR(-ENOENT);
725 mountpoint:
726 		read_seqlock_excl(&mount_lock);
727 		mp = lookup_mountpoint(dentry);
728 		read_sequnlock_excl(&mount_lock);
729 		if (mp)
730 			goto done;
731 	}
732 
733 	if (!new)
734 		new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
735 	if (!new)
736 		return ERR_PTR(-ENOMEM);
737 
738 
739 	/* Exactly one processes may set d_mounted */
740 	ret = d_set_mounted(dentry);
741 
742 	/* Someone else set d_mounted? */
743 	if (ret == -EBUSY)
744 		goto mountpoint;
745 
746 	/* The dentry is not available as a mountpoint? */
747 	mp = ERR_PTR(ret);
748 	if (ret)
749 		goto done;
750 
751 	/* Add the new mountpoint to the hash table */
752 	read_seqlock_excl(&mount_lock);
753 	new->m_dentry = dget(dentry);
754 	new->m_count = 1;
755 	hlist_add_head(&new->m_hash, mp_hash(dentry));
756 	INIT_HLIST_HEAD(&new->m_list);
757 	read_sequnlock_excl(&mount_lock);
758 
759 	mp = new;
760 	new = NULL;
761 done:
762 	kfree(new);
763 	return mp;
764 }
765 
766 /*
767  * vfsmount lock must be held.  Additionally, the caller is responsible
768  * for serializing calls for given disposal list.
769  */
__put_mountpoint(struct mountpoint * mp,struct list_head * list)770 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
771 {
772 	if (!--mp->m_count) {
773 		struct dentry *dentry = mp->m_dentry;
774 		BUG_ON(!hlist_empty(&mp->m_list));
775 		spin_lock(&dentry->d_lock);
776 		dentry->d_flags &= ~DCACHE_MOUNTED;
777 		spin_unlock(&dentry->d_lock);
778 		dput_to_list(dentry, list);
779 		hlist_del(&mp->m_hash);
780 		kfree(mp);
781 	}
782 }
783 
784 /* called with namespace_lock and vfsmount lock */
put_mountpoint(struct mountpoint * mp)785 static void put_mountpoint(struct mountpoint *mp)
786 {
787 	__put_mountpoint(mp, &ex_mountpoints);
788 }
789 
check_mnt(struct mount * mnt)790 static inline int check_mnt(struct mount *mnt)
791 {
792 	return mnt->mnt_ns == current->nsproxy->mnt_ns;
793 }
794 
795 /*
796  * vfsmount lock must be held for write
797  */
touch_mnt_namespace(struct mnt_namespace * ns)798 static void touch_mnt_namespace(struct mnt_namespace *ns)
799 {
800 	if (ns) {
801 		ns->event = ++event;
802 		wake_up_interruptible(&ns->poll);
803 	}
804 }
805 
806 /*
807  * vfsmount lock must be held for write
808  */
__touch_mnt_namespace(struct mnt_namespace * ns)809 static void __touch_mnt_namespace(struct mnt_namespace *ns)
810 {
811 	if (ns && ns->event != event) {
812 		ns->event = event;
813 		wake_up_interruptible(&ns->poll);
814 	}
815 }
816 
817 /*
818  * vfsmount lock must be held for write
819  */
unhash_mnt(struct mount * mnt)820 static struct mountpoint *unhash_mnt(struct mount *mnt)
821 {
822 	struct mountpoint *mp;
823 	mnt->mnt_parent = mnt;
824 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
825 	list_del_init(&mnt->mnt_child);
826 	hlist_del_init_rcu(&mnt->mnt_hash);
827 	hlist_del_init(&mnt->mnt_mp_list);
828 	mp = mnt->mnt_mp;
829 	mnt->mnt_mp = NULL;
830 	return mp;
831 }
832 
833 /*
834  * vfsmount lock must be held for write
835  */
umount_mnt(struct mount * mnt)836 static void umount_mnt(struct mount *mnt)
837 {
838 	put_mountpoint(unhash_mnt(mnt));
839 }
840 
841 /*
842  * vfsmount lock must be held for write
843  */
mnt_set_mountpoint(struct mount * mnt,struct mountpoint * mp,struct mount * child_mnt)844 void mnt_set_mountpoint(struct mount *mnt,
845 			struct mountpoint *mp,
846 			struct mount *child_mnt)
847 {
848 	mp->m_count++;
849 	mnt_add_count(mnt, 1);	/* essentially, that's mntget */
850 	child_mnt->mnt_mountpoint = mp->m_dentry;
851 	child_mnt->mnt_parent = mnt;
852 	child_mnt->mnt_mp = mp;
853 	hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
854 }
855 
__attach_mnt(struct mount * mnt,struct mount * parent)856 static void __attach_mnt(struct mount *mnt, struct mount *parent)
857 {
858 	hlist_add_head_rcu(&mnt->mnt_hash,
859 			   m_hash(&parent->mnt, mnt->mnt_mountpoint));
860 	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
861 }
862 
863 /*
864  * vfsmount lock must be held for write
865  */
attach_mnt(struct mount * mnt,struct mount * parent,struct mountpoint * mp)866 static void attach_mnt(struct mount *mnt,
867 			struct mount *parent,
868 			struct mountpoint *mp)
869 {
870 	mnt_set_mountpoint(parent, mp, mnt);
871 	__attach_mnt(mnt, parent);
872 }
873 
mnt_change_mountpoint(struct mount * parent,struct mountpoint * mp,struct mount * mnt)874 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
875 {
876 	struct mountpoint *old_mp = mnt->mnt_mp;
877 	struct mount *old_parent = mnt->mnt_parent;
878 
879 	list_del_init(&mnt->mnt_child);
880 	hlist_del_init(&mnt->mnt_mp_list);
881 	hlist_del_init_rcu(&mnt->mnt_hash);
882 
883 	attach_mnt(mnt, parent, mp);
884 
885 	put_mountpoint(old_mp);
886 	mnt_add_count(old_parent, -1);
887 }
888 
889 /*
890  * vfsmount lock must be held for write
891  */
commit_tree(struct mount * mnt)892 static void commit_tree(struct mount *mnt)
893 {
894 	struct mount *parent = mnt->mnt_parent;
895 	struct mount *m;
896 	LIST_HEAD(head);
897 	struct mnt_namespace *n = parent->mnt_ns;
898 
899 	BUG_ON(parent == mnt);
900 
901 	list_add_tail(&head, &mnt->mnt_list);
902 	list_for_each_entry(m, &head, mnt_list)
903 		m->mnt_ns = n;
904 
905 	list_splice(&head, n->list.prev);
906 
907 	n->mounts += n->pending_mounts;
908 	n->pending_mounts = 0;
909 
910 	__attach_mnt(mnt, parent);
911 	touch_mnt_namespace(n);
912 }
913 
next_mnt(struct mount * p,struct mount * root)914 static struct mount *next_mnt(struct mount *p, struct mount *root)
915 {
916 	struct list_head *next = p->mnt_mounts.next;
917 	if (next == &p->mnt_mounts) {
918 		while (1) {
919 			if (p == root)
920 				return NULL;
921 			next = p->mnt_child.next;
922 			if (next != &p->mnt_parent->mnt_mounts)
923 				break;
924 			p = p->mnt_parent;
925 		}
926 	}
927 	return list_entry(next, struct mount, mnt_child);
928 }
929 
skip_mnt_tree(struct mount * p)930 static struct mount *skip_mnt_tree(struct mount *p)
931 {
932 	struct list_head *prev = p->mnt_mounts.prev;
933 	while (prev != &p->mnt_mounts) {
934 		p = list_entry(prev, struct mount, mnt_child);
935 		prev = p->mnt_mounts.prev;
936 	}
937 	return p;
938 }
939 
940 /**
941  * vfs_create_mount - Create a mount for a configured superblock
942  * @fc: The configuration context with the superblock attached
943  *
944  * Create a mount to an already configured superblock.  If necessary, the
945  * caller should invoke vfs_get_tree() before calling this.
946  *
947  * Note that this does not attach the mount to anything.
948  */
vfs_create_mount(struct fs_context * fc)949 struct vfsmount *vfs_create_mount(struct fs_context *fc)
950 {
951 	struct mount *mnt;
952 
953 	if (!fc->root)
954 		return ERR_PTR(-EINVAL);
955 
956 	mnt = alloc_vfsmnt(fc->source ?: "none");
957 	if (!mnt)
958 		return ERR_PTR(-ENOMEM);
959 
960 	if (fc->sb_flags & SB_KERNMOUNT)
961 		mnt->mnt.mnt_flags = MNT_INTERNAL;
962 
963 	atomic_inc(&fc->root->d_sb->s_active);
964 	mnt->mnt.mnt_sb		= fc->root->d_sb;
965 	mnt->mnt.mnt_root	= dget(fc->root);
966 	mnt->mnt_mountpoint	= mnt->mnt.mnt_root;
967 	mnt->mnt_parent		= mnt;
968 
969 	lock_mount_hash();
970 	list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
971 	unlock_mount_hash();
972 	return &mnt->mnt;
973 }
974 EXPORT_SYMBOL(vfs_create_mount);
975 
fc_mount(struct fs_context * fc)976 struct vfsmount *fc_mount(struct fs_context *fc)
977 {
978 	int err = vfs_get_tree(fc);
979 	if (!err) {
980 		up_write(&fc->root->d_sb->s_umount);
981 		return vfs_create_mount(fc);
982 	}
983 	return ERR_PTR(err);
984 }
985 EXPORT_SYMBOL(fc_mount);
986 
vfs_kern_mount(struct file_system_type * type,int flags,const char * name,void * data)987 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
988 				int flags, const char *name,
989 				void *data)
990 {
991 	struct fs_context *fc;
992 	struct vfsmount *mnt;
993 	int ret = 0;
994 
995 	if (!type)
996 		return ERR_PTR(-EINVAL);
997 
998 	fc = fs_context_for_mount(type, flags);
999 	if (IS_ERR(fc))
1000 		return ERR_CAST(fc);
1001 
1002 	if (name)
1003 		ret = vfs_parse_fs_string(fc, "source",
1004 					  name, strlen(name));
1005 	if (!ret)
1006 		ret = parse_monolithic_mount_data(fc, data);
1007 	if (!ret)
1008 		mnt = fc_mount(fc);
1009 	else
1010 		mnt = ERR_PTR(ret);
1011 
1012 	put_fs_context(fc);
1013 	return mnt;
1014 }
1015 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1016 
1017 struct vfsmount *
vfs_submount(const struct dentry * mountpoint,struct file_system_type * type,const char * name,void * data)1018 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1019 	     const char *name, void *data)
1020 {
1021 	/* Until it is worked out how to pass the user namespace
1022 	 * through from the parent mount to the submount don't support
1023 	 * unprivileged mounts with submounts.
1024 	 */
1025 	if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1026 		return ERR_PTR(-EPERM);
1027 
1028 	return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1029 }
1030 EXPORT_SYMBOL_GPL(vfs_submount);
1031 
clone_mnt(struct mount * old,struct dentry * root,int flag)1032 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1033 					int flag)
1034 {
1035 	struct super_block *sb = old->mnt.mnt_sb;
1036 	struct mount *mnt;
1037 	int err;
1038 
1039 	mnt = alloc_vfsmnt(old->mnt_devname);
1040 	if (!mnt)
1041 		return ERR_PTR(-ENOMEM);
1042 
1043 	if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1044 		mnt->mnt_group_id = 0; /* not a peer of original */
1045 	else
1046 		mnt->mnt_group_id = old->mnt_group_id;
1047 
1048 	if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1049 		err = mnt_alloc_group_id(mnt);
1050 		if (err)
1051 			goto out_free;
1052 	}
1053 
1054 	mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1055 	mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1056 
1057 	atomic_inc(&sb->s_active);
1058 	mnt->mnt.mnt_sb = sb;
1059 	mnt->mnt.mnt_root = dget(root);
1060 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1061 	mnt->mnt_parent = mnt;
1062 	lock_mount_hash();
1063 	list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1064 	unlock_mount_hash();
1065 
1066 	if ((flag & CL_SLAVE) ||
1067 	    ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1068 		list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1069 		mnt->mnt_master = old;
1070 		CLEAR_MNT_SHARED(mnt);
1071 	} else if (!(flag & CL_PRIVATE)) {
1072 		if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1073 			list_add(&mnt->mnt_share, &old->mnt_share);
1074 		if (IS_MNT_SLAVE(old))
1075 			list_add(&mnt->mnt_slave, &old->mnt_slave);
1076 		mnt->mnt_master = old->mnt_master;
1077 	} else {
1078 		CLEAR_MNT_SHARED(mnt);
1079 	}
1080 	if (flag & CL_MAKE_SHARED)
1081 		set_mnt_shared(mnt);
1082 
1083 	/* stick the duplicate mount on the same expiry list
1084 	 * as the original if that was on one */
1085 	if (flag & CL_EXPIRE) {
1086 		if (!list_empty(&old->mnt_expire))
1087 			list_add(&mnt->mnt_expire, &old->mnt_expire);
1088 	}
1089 
1090 	return mnt;
1091 
1092  out_free:
1093 	mnt_free_id(mnt);
1094 	free_vfsmnt(mnt);
1095 	return ERR_PTR(err);
1096 }
1097 
cleanup_mnt(struct mount * mnt)1098 static void cleanup_mnt(struct mount *mnt)
1099 {
1100 	struct hlist_node *p;
1101 	struct mount *m;
1102 	/*
1103 	 * The warning here probably indicates that somebody messed
1104 	 * up a mnt_want/drop_write() pair.  If this happens, the
1105 	 * filesystem was probably unable to make r/w->r/o transitions.
1106 	 * The locking used to deal with mnt_count decrement provides barriers,
1107 	 * so mnt_get_writers() below is safe.
1108 	 */
1109 	WARN_ON(mnt_get_writers(mnt));
1110 	if (unlikely(mnt->mnt_pins.first))
1111 		mnt_pin_kill(mnt);
1112 	hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1113 		hlist_del(&m->mnt_umount);
1114 		mntput(&m->mnt);
1115 	}
1116 	fsnotify_vfsmount_delete(&mnt->mnt);
1117 	dput(mnt->mnt.mnt_root);
1118 	deactivate_super(mnt->mnt.mnt_sb);
1119 	mnt_free_id(mnt);
1120 	call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1121 }
1122 
__cleanup_mnt(struct rcu_head * head)1123 static void __cleanup_mnt(struct rcu_head *head)
1124 {
1125 	cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1126 }
1127 
1128 static LLIST_HEAD(delayed_mntput_list);
delayed_mntput(struct work_struct * unused)1129 static void delayed_mntput(struct work_struct *unused)
1130 {
1131 	struct llist_node *node = llist_del_all(&delayed_mntput_list);
1132 	struct mount *m, *t;
1133 
1134 	llist_for_each_entry_safe(m, t, node, mnt_llist)
1135 		cleanup_mnt(m);
1136 }
1137 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1138 
mntput_no_expire(struct mount * mnt)1139 static void mntput_no_expire(struct mount *mnt)
1140 {
1141 	LIST_HEAD(list);
1142 	int count;
1143 
1144 	rcu_read_lock();
1145 	if (likely(READ_ONCE(mnt->mnt_ns))) {
1146 		/*
1147 		 * Since we don't do lock_mount_hash() here,
1148 		 * ->mnt_ns can change under us.  However, if it's
1149 		 * non-NULL, then there's a reference that won't
1150 		 * be dropped until after an RCU delay done after
1151 		 * turning ->mnt_ns NULL.  So if we observe it
1152 		 * non-NULL under rcu_read_lock(), the reference
1153 		 * we are dropping is not the final one.
1154 		 */
1155 		mnt_add_count(mnt, -1);
1156 		rcu_read_unlock();
1157 		return;
1158 	}
1159 	lock_mount_hash();
1160 	/*
1161 	 * make sure that if __legitimize_mnt() has not seen us grab
1162 	 * mount_lock, we'll see their refcount increment here.
1163 	 */
1164 	smp_mb();
1165 	mnt_add_count(mnt, -1);
1166 	count = mnt_get_count(mnt);
1167 	if (count != 0) {
1168 		WARN_ON(count < 0);
1169 		rcu_read_unlock();
1170 		unlock_mount_hash();
1171 		return;
1172 	}
1173 	if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1174 		rcu_read_unlock();
1175 		unlock_mount_hash();
1176 		return;
1177 	}
1178 	mnt->mnt.mnt_flags |= MNT_DOOMED;
1179 	rcu_read_unlock();
1180 
1181 	list_del(&mnt->mnt_instance);
1182 
1183 	if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1184 		struct mount *p, *tmp;
1185 		list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
1186 			__put_mountpoint(unhash_mnt(p), &list);
1187 			hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1188 		}
1189 	}
1190 	unlock_mount_hash();
1191 	shrink_dentry_list(&list);
1192 
1193 	if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1194 		struct task_struct *task = current;
1195 		if (likely(!(task->flags & PF_KTHREAD))) {
1196 			init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1197 			if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1198 				return;
1199 		}
1200 		if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1201 			schedule_delayed_work(&delayed_mntput_work, 1);
1202 		return;
1203 	}
1204 	cleanup_mnt(mnt);
1205 }
1206 
mntput(struct vfsmount * mnt)1207 void mntput(struct vfsmount *mnt)
1208 {
1209 	if (mnt) {
1210 		struct mount *m = real_mount(mnt);
1211 		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1212 		if (unlikely(m->mnt_expiry_mark))
1213 			m->mnt_expiry_mark = 0;
1214 		mntput_no_expire(m);
1215 	}
1216 }
1217 EXPORT_SYMBOL(mntput);
1218 
mntget(struct vfsmount * mnt)1219 struct vfsmount *mntget(struct vfsmount *mnt)
1220 {
1221 	if (mnt)
1222 		mnt_add_count(real_mount(mnt), 1);
1223 	return mnt;
1224 }
1225 EXPORT_SYMBOL(mntget);
1226 
1227 /* path_is_mountpoint() - Check if path is a mount in the current
1228  *                          namespace.
1229  *
1230  *  d_mountpoint() can only be used reliably to establish if a dentry is
1231  *  not mounted in any namespace and that common case is handled inline.
1232  *  d_mountpoint() isn't aware of the possibility there may be multiple
1233  *  mounts using a given dentry in a different namespace. This function
1234  *  checks if the passed in path is a mountpoint rather than the dentry
1235  *  alone.
1236  */
path_is_mountpoint(const struct path * path)1237 bool path_is_mountpoint(const struct path *path)
1238 {
1239 	unsigned seq;
1240 	bool res;
1241 
1242 	if (!d_mountpoint(path->dentry))
1243 		return false;
1244 
1245 	rcu_read_lock();
1246 	do {
1247 		seq = read_seqbegin(&mount_lock);
1248 		res = __path_is_mountpoint(path);
1249 	} while (read_seqretry(&mount_lock, seq));
1250 	rcu_read_unlock();
1251 
1252 	return res;
1253 }
1254 EXPORT_SYMBOL(path_is_mountpoint);
1255 
mnt_clone_internal(const struct path * path)1256 struct vfsmount *mnt_clone_internal(const struct path *path)
1257 {
1258 	struct mount *p;
1259 	p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1260 	if (IS_ERR(p))
1261 		return ERR_CAST(p);
1262 	p->mnt.mnt_flags |= MNT_INTERNAL;
1263 	return &p->mnt;
1264 }
1265 
1266 #ifdef CONFIG_PROC_FS
mnt_list_next(struct mnt_namespace * ns,struct list_head * p)1267 static struct mount *mnt_list_next(struct mnt_namespace *ns,
1268 				   struct list_head *p)
1269 {
1270 	struct mount *mnt, *ret = NULL;
1271 
1272 	lock_ns_list(ns);
1273 	list_for_each_continue(p, &ns->list) {
1274 		mnt = list_entry(p, typeof(*mnt), mnt_list);
1275 		if (!mnt_is_cursor(mnt)) {
1276 			ret = mnt;
1277 			break;
1278 		}
1279 	}
1280 	unlock_ns_list(ns);
1281 
1282 	return ret;
1283 }
1284 
1285 /* iterator; we want it to have access to namespace_sem, thus here... */
m_start(struct seq_file * m,loff_t * pos)1286 static void *m_start(struct seq_file *m, loff_t *pos)
1287 {
1288 	struct proc_mounts *p = m->private;
1289 	struct list_head *prev;
1290 
1291 	down_read(&namespace_sem);
1292 	if (!*pos) {
1293 		prev = &p->ns->list;
1294 	} else {
1295 		prev = &p->cursor.mnt_list;
1296 
1297 		/* Read after we'd reached the end? */
1298 		if (list_empty(prev))
1299 			return NULL;
1300 	}
1301 
1302 	return mnt_list_next(p->ns, prev);
1303 }
1304 
m_next(struct seq_file * m,void * v,loff_t * pos)1305 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1306 {
1307 	struct proc_mounts *p = m->private;
1308 	struct mount *mnt = v;
1309 
1310 	++*pos;
1311 	return mnt_list_next(p->ns, &mnt->mnt_list);
1312 }
1313 
m_stop(struct seq_file * m,void * v)1314 static void m_stop(struct seq_file *m, void *v)
1315 {
1316 	struct proc_mounts *p = m->private;
1317 	struct mount *mnt = v;
1318 
1319 	lock_ns_list(p->ns);
1320 	if (mnt)
1321 		list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
1322 	else
1323 		list_del_init(&p->cursor.mnt_list);
1324 	unlock_ns_list(p->ns);
1325 	up_read(&namespace_sem);
1326 }
1327 
m_show(struct seq_file * m,void * v)1328 static int m_show(struct seq_file *m, void *v)
1329 {
1330 	struct proc_mounts *p = m->private;
1331 	struct mount *r = v;
1332 	return p->show(m, &r->mnt);
1333 }
1334 
1335 const struct seq_operations mounts_op = {
1336 	.start	= m_start,
1337 	.next	= m_next,
1338 	.stop	= m_stop,
1339 	.show	= m_show,
1340 };
1341 
mnt_cursor_del(struct mnt_namespace * ns,struct mount * cursor)1342 void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
1343 {
1344 	down_read(&namespace_sem);
1345 	lock_ns_list(ns);
1346 	list_del(&cursor->mnt_list);
1347 	unlock_ns_list(ns);
1348 	up_read(&namespace_sem);
1349 }
1350 #endif  /* CONFIG_PROC_FS */
1351 
1352 /**
1353  * may_umount_tree - check if a mount tree is busy
1354  * @mnt: root of mount tree
1355  *
1356  * This is called to check if a tree of mounts has any
1357  * open files, pwds, chroots or sub mounts that are
1358  * busy.
1359  */
may_umount_tree(struct vfsmount * m)1360 int may_umount_tree(struct vfsmount *m)
1361 {
1362 	struct mount *mnt = real_mount(m);
1363 	int actual_refs = 0;
1364 	int minimum_refs = 0;
1365 	struct mount *p;
1366 	BUG_ON(!m);
1367 
1368 	/* write lock needed for mnt_get_count */
1369 	lock_mount_hash();
1370 	for (p = mnt; p; p = next_mnt(p, mnt)) {
1371 		actual_refs += mnt_get_count(p);
1372 		minimum_refs += 2;
1373 	}
1374 	unlock_mount_hash();
1375 
1376 	if (actual_refs > minimum_refs)
1377 		return 0;
1378 
1379 	return 1;
1380 }
1381 
1382 EXPORT_SYMBOL(may_umount_tree);
1383 
1384 /**
1385  * may_umount - check if a mount point is busy
1386  * @mnt: root of mount
1387  *
1388  * This is called to check if a mount point has any
1389  * open files, pwds, chroots or sub mounts. If the
1390  * mount has sub mounts this will return busy
1391  * regardless of whether the sub mounts are busy.
1392  *
1393  * Doesn't take quota and stuff into account. IOW, in some cases it will
1394  * give false negatives. The main reason why it's here is that we need
1395  * a non-destructive way to look for easily umountable filesystems.
1396  */
may_umount(struct vfsmount * mnt)1397 int may_umount(struct vfsmount *mnt)
1398 {
1399 	int ret = 1;
1400 	down_read(&namespace_sem);
1401 	lock_mount_hash();
1402 	if (propagate_mount_busy(real_mount(mnt), 2))
1403 		ret = 0;
1404 	unlock_mount_hash();
1405 	up_read(&namespace_sem);
1406 	return ret;
1407 }
1408 
1409 EXPORT_SYMBOL(may_umount);
1410 
namespace_unlock(void)1411 static void namespace_unlock(void)
1412 {
1413 	struct hlist_head head;
1414 	struct hlist_node *p;
1415 	struct mount *m;
1416 	LIST_HEAD(list);
1417 
1418 	hlist_move_list(&unmounted, &head);
1419 	list_splice_init(&ex_mountpoints, &list);
1420 
1421 	up_write(&namespace_sem);
1422 
1423 	shrink_dentry_list(&list);
1424 
1425 	if (likely(hlist_empty(&head)))
1426 		return;
1427 
1428 	synchronize_rcu_expedited();
1429 
1430 	hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1431 		hlist_del(&m->mnt_umount);
1432 		mntput(&m->mnt);
1433 	}
1434 }
1435 
namespace_lock(void)1436 static inline void namespace_lock(void)
1437 {
1438 	down_write(&namespace_sem);
1439 }
1440 
1441 enum umount_tree_flags {
1442 	UMOUNT_SYNC = 1,
1443 	UMOUNT_PROPAGATE = 2,
1444 	UMOUNT_CONNECTED = 4,
1445 };
1446 
disconnect_mount(struct mount * mnt,enum umount_tree_flags how)1447 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1448 {
1449 	/* Leaving mounts connected is only valid for lazy umounts */
1450 	if (how & UMOUNT_SYNC)
1451 		return true;
1452 
1453 	/* A mount without a parent has nothing to be connected to */
1454 	if (!mnt_has_parent(mnt))
1455 		return true;
1456 
1457 	/* Because the reference counting rules change when mounts are
1458 	 * unmounted and connected, umounted mounts may not be
1459 	 * connected to mounted mounts.
1460 	 */
1461 	if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1462 		return true;
1463 
1464 	/* Has it been requested that the mount remain connected? */
1465 	if (how & UMOUNT_CONNECTED)
1466 		return false;
1467 
1468 	/* Is the mount locked such that it needs to remain connected? */
1469 	if (IS_MNT_LOCKED(mnt))
1470 		return false;
1471 
1472 	/* By default disconnect the mount */
1473 	return true;
1474 }
1475 
1476 /*
1477  * mount_lock must be held
1478  * namespace_sem must be held for write
1479  */
umount_tree(struct mount * mnt,enum umount_tree_flags how)1480 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1481 {
1482 	LIST_HEAD(tmp_list);
1483 	struct mount *p;
1484 
1485 	if (how & UMOUNT_PROPAGATE)
1486 		propagate_mount_unlock(mnt);
1487 
1488 	/* Gather the mounts to umount */
1489 	for (p = mnt; p; p = next_mnt(p, mnt)) {
1490 		p->mnt.mnt_flags |= MNT_UMOUNT;
1491 		list_move(&p->mnt_list, &tmp_list);
1492 	}
1493 
1494 	/* Hide the mounts from mnt_mounts */
1495 	list_for_each_entry(p, &tmp_list, mnt_list) {
1496 		list_del_init(&p->mnt_child);
1497 	}
1498 
1499 	/* Add propogated mounts to the tmp_list */
1500 	if (how & UMOUNT_PROPAGATE)
1501 		propagate_umount(&tmp_list);
1502 
1503 	while (!list_empty(&tmp_list)) {
1504 		struct mnt_namespace *ns;
1505 		bool disconnect;
1506 		p = list_first_entry(&tmp_list, struct mount, mnt_list);
1507 		list_del_init(&p->mnt_expire);
1508 		list_del_init(&p->mnt_list);
1509 		ns = p->mnt_ns;
1510 		if (ns) {
1511 			ns->mounts--;
1512 			__touch_mnt_namespace(ns);
1513 		}
1514 		p->mnt_ns = NULL;
1515 		if (how & UMOUNT_SYNC)
1516 			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1517 
1518 		disconnect = disconnect_mount(p, how);
1519 		if (mnt_has_parent(p)) {
1520 			mnt_add_count(p->mnt_parent, -1);
1521 			if (!disconnect) {
1522 				/* Don't forget about p */
1523 				list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1524 			} else {
1525 				umount_mnt(p);
1526 			}
1527 		}
1528 		change_mnt_propagation(p, MS_PRIVATE);
1529 		if (disconnect)
1530 			hlist_add_head(&p->mnt_umount, &unmounted);
1531 	}
1532 }
1533 
1534 static void shrink_submounts(struct mount *mnt);
1535 
do_umount_root(struct super_block * sb)1536 static int do_umount_root(struct super_block *sb)
1537 {
1538 	int ret = 0;
1539 
1540 	down_write(&sb->s_umount);
1541 	if (!sb_rdonly(sb)) {
1542 		struct fs_context *fc;
1543 
1544 		fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1545 						SB_RDONLY);
1546 		if (IS_ERR(fc)) {
1547 			ret = PTR_ERR(fc);
1548 		} else {
1549 			ret = parse_monolithic_mount_data(fc, NULL);
1550 			if (!ret)
1551 				ret = reconfigure_super(fc);
1552 			put_fs_context(fc);
1553 		}
1554 	}
1555 	up_write(&sb->s_umount);
1556 	return ret;
1557 }
1558 
do_umount(struct mount * mnt,int flags)1559 static int do_umount(struct mount *mnt, int flags)
1560 {
1561 	struct super_block *sb = mnt->mnt.mnt_sb;
1562 	int retval;
1563 
1564 	retval = security_sb_umount(&mnt->mnt, flags);
1565 	if (retval)
1566 		return retval;
1567 
1568 	/*
1569 	 * Allow userspace to request a mountpoint be expired rather than
1570 	 * unmounting unconditionally. Unmount only happens if:
1571 	 *  (1) the mark is already set (the mark is cleared by mntput())
1572 	 *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1573 	 */
1574 	if (flags & MNT_EXPIRE) {
1575 		if (&mnt->mnt == current->fs->root.mnt ||
1576 		    flags & (MNT_FORCE | MNT_DETACH))
1577 			return -EINVAL;
1578 
1579 		/*
1580 		 * probably don't strictly need the lock here if we examined
1581 		 * all race cases, but it's a slowpath.
1582 		 */
1583 		lock_mount_hash();
1584 		if (mnt_get_count(mnt) != 2) {
1585 			unlock_mount_hash();
1586 			return -EBUSY;
1587 		}
1588 		unlock_mount_hash();
1589 
1590 		if (!xchg(&mnt->mnt_expiry_mark, 1))
1591 			return -EAGAIN;
1592 	}
1593 
1594 	/*
1595 	 * If we may have to abort operations to get out of this
1596 	 * mount, and they will themselves hold resources we must
1597 	 * allow the fs to do things. In the Unix tradition of
1598 	 * 'Gee thats tricky lets do it in userspace' the umount_begin
1599 	 * might fail to complete on the first run through as other tasks
1600 	 * must return, and the like. Thats for the mount program to worry
1601 	 * about for the moment.
1602 	 */
1603 
1604 	if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1605 		sb->s_op->umount_begin(sb);
1606 	}
1607 
1608 	/*
1609 	 * No sense to grab the lock for this test, but test itself looks
1610 	 * somewhat bogus. Suggestions for better replacement?
1611 	 * Ho-hum... In principle, we might treat that as umount + switch
1612 	 * to rootfs. GC would eventually take care of the old vfsmount.
1613 	 * Actually it makes sense, especially if rootfs would contain a
1614 	 * /reboot - static binary that would close all descriptors and
1615 	 * call reboot(9). Then init(8) could umount root and exec /reboot.
1616 	 */
1617 	if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1618 		/*
1619 		 * Special case for "unmounting" root ...
1620 		 * we just try to remount it readonly.
1621 		 */
1622 		if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1623 			return -EPERM;
1624 		return do_umount_root(sb);
1625 	}
1626 
1627 	namespace_lock();
1628 	lock_mount_hash();
1629 
1630 	/* Recheck MNT_LOCKED with the locks held */
1631 	retval = -EINVAL;
1632 	if (mnt->mnt.mnt_flags & MNT_LOCKED)
1633 		goto out;
1634 
1635 	event++;
1636 	if (flags & MNT_DETACH) {
1637 		if (!list_empty(&mnt->mnt_list))
1638 			umount_tree(mnt, UMOUNT_PROPAGATE);
1639 		retval = 0;
1640 	} else {
1641 		shrink_submounts(mnt);
1642 		retval = -EBUSY;
1643 		if (!propagate_mount_busy(mnt, 2)) {
1644 			if (!list_empty(&mnt->mnt_list))
1645 				umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1646 			retval = 0;
1647 		}
1648 	}
1649 out:
1650 	unlock_mount_hash();
1651 	namespace_unlock();
1652 	return retval;
1653 }
1654 
1655 /*
1656  * __detach_mounts - lazily unmount all mounts on the specified dentry
1657  *
1658  * During unlink, rmdir, and d_drop it is possible to loose the path
1659  * to an existing mountpoint, and wind up leaking the mount.
1660  * detach_mounts allows lazily unmounting those mounts instead of
1661  * leaking them.
1662  *
1663  * The caller may hold dentry->d_inode->i_mutex.
1664  */
__detach_mounts(struct dentry * dentry)1665 void __detach_mounts(struct dentry *dentry)
1666 {
1667 	struct mountpoint *mp;
1668 	struct mount *mnt;
1669 
1670 	namespace_lock();
1671 	lock_mount_hash();
1672 	mp = lookup_mountpoint(dentry);
1673 	if (!mp)
1674 		goto out_unlock;
1675 
1676 	event++;
1677 	while (!hlist_empty(&mp->m_list)) {
1678 		mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1679 		if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1680 			umount_mnt(mnt);
1681 			hlist_add_head(&mnt->mnt_umount, &unmounted);
1682 		}
1683 		else umount_tree(mnt, UMOUNT_CONNECTED);
1684 	}
1685 	put_mountpoint(mp);
1686 out_unlock:
1687 	unlock_mount_hash();
1688 	namespace_unlock();
1689 }
1690 
1691 /*
1692  * Is the caller allowed to modify his namespace?
1693  */
may_mount(void)1694 static inline bool may_mount(void)
1695 {
1696 	return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1697 }
1698 
1699 #ifdef	CONFIG_MANDATORY_FILE_LOCKING
may_mandlock(void)1700 static bool may_mandlock(void)
1701 {
1702 	pr_warn_once("======================================================\n"
1703 		     "WARNING: the mand mount option is being deprecated and\n"
1704 		     "         will be removed in v5.15!\n"
1705 		     "======================================================\n");
1706 	return capable(CAP_SYS_ADMIN);
1707 }
1708 #else
may_mandlock(void)1709 static inline bool may_mandlock(void)
1710 {
1711 	pr_warn("VFS: \"mand\" mount option not supported");
1712 	return false;
1713 }
1714 #endif
1715 
can_umount(const struct path * path,int flags)1716 static int can_umount(const struct path *path, int flags)
1717 {
1718 	struct mount *mnt = real_mount(path->mnt);
1719 
1720 	if (!may_mount())
1721 		return -EPERM;
1722 	if (path->dentry != path->mnt->mnt_root)
1723 		return -EINVAL;
1724 	if (!check_mnt(mnt))
1725 		return -EINVAL;
1726 	if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1727 		return -EINVAL;
1728 	if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1729 		return -EPERM;
1730 	return 0;
1731 }
1732 
1733 // caller is responsible for flags being sane
path_umount(struct path * path,int flags)1734 int path_umount(struct path *path, int flags)
1735 {
1736 	struct mount *mnt = real_mount(path->mnt);
1737 	int ret;
1738 
1739 	ret = can_umount(path, flags);
1740 	if (!ret)
1741 		ret = do_umount(mnt, flags);
1742 
1743 	/* we mustn't call path_put() as that would clear mnt_expiry_mark */
1744 	dput(path->dentry);
1745 	mntput_no_expire(mnt);
1746 	return ret;
1747 }
1748 
ksys_umount(char __user * name,int flags)1749 static int ksys_umount(char __user *name, int flags)
1750 {
1751 	int lookup_flags = LOOKUP_MOUNTPOINT;
1752 	struct path path;
1753 	int ret;
1754 
1755 	// basic validity checks done first
1756 	if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1757 		return -EINVAL;
1758 
1759 	if (!(flags & UMOUNT_NOFOLLOW))
1760 		lookup_flags |= LOOKUP_FOLLOW;
1761 	ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1762 	if (ret)
1763 		return ret;
1764 	return path_umount(&path, flags);
1765 }
1766 
SYSCALL_DEFINE2(umount,char __user *,name,int,flags)1767 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1768 {
1769 	return ksys_umount(name, flags);
1770 }
1771 
1772 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1773 
1774 /*
1775  *	The 2.0 compatible umount. No flags.
1776  */
SYSCALL_DEFINE1(oldumount,char __user *,name)1777 SYSCALL_DEFINE1(oldumount, char __user *, name)
1778 {
1779 	return ksys_umount(name, 0);
1780 }
1781 
1782 #endif
1783 
is_mnt_ns_file(struct dentry * dentry)1784 static bool is_mnt_ns_file(struct dentry *dentry)
1785 {
1786 	/* Is this a proxy for a mount namespace? */
1787 	return dentry->d_op == &ns_dentry_operations &&
1788 	       dentry->d_fsdata == &mntns_operations;
1789 }
1790 
to_mnt_ns(struct ns_common * ns)1791 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1792 {
1793 	return container_of(ns, struct mnt_namespace, ns);
1794 }
1795 
from_mnt_ns(struct mnt_namespace * mnt)1796 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1797 {
1798 	return &mnt->ns;
1799 }
1800 
mnt_ns_loop(struct dentry * dentry)1801 static bool mnt_ns_loop(struct dentry *dentry)
1802 {
1803 	/* Could bind mounting the mount namespace inode cause a
1804 	 * mount namespace loop?
1805 	 */
1806 	struct mnt_namespace *mnt_ns;
1807 	if (!is_mnt_ns_file(dentry))
1808 		return false;
1809 
1810 	mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1811 	return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1812 }
1813 
copy_tree(struct mount * mnt,struct dentry * dentry,int flag)1814 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1815 					int flag)
1816 {
1817 	struct mount *res, *p, *q, *r, *parent;
1818 
1819 	if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1820 		return ERR_PTR(-EINVAL);
1821 
1822 	if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1823 		return ERR_PTR(-EINVAL);
1824 
1825 	res = q = clone_mnt(mnt, dentry, flag);
1826 	if (IS_ERR(q))
1827 		return q;
1828 
1829 	q->mnt_mountpoint = mnt->mnt_mountpoint;
1830 
1831 	p = mnt;
1832 	list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1833 		struct mount *s;
1834 		if (!is_subdir(r->mnt_mountpoint, dentry))
1835 			continue;
1836 
1837 		for (s = r; s; s = next_mnt(s, r)) {
1838 			if (!(flag & CL_COPY_UNBINDABLE) &&
1839 			    IS_MNT_UNBINDABLE(s)) {
1840 				if (s->mnt.mnt_flags & MNT_LOCKED) {
1841 					/* Both unbindable and locked. */
1842 					q = ERR_PTR(-EPERM);
1843 					goto out;
1844 				} else {
1845 					s = skip_mnt_tree(s);
1846 					continue;
1847 				}
1848 			}
1849 			if (!(flag & CL_COPY_MNT_NS_FILE) &&
1850 			    is_mnt_ns_file(s->mnt.mnt_root)) {
1851 				s = skip_mnt_tree(s);
1852 				continue;
1853 			}
1854 			while (p != s->mnt_parent) {
1855 				p = p->mnt_parent;
1856 				q = q->mnt_parent;
1857 			}
1858 			p = s;
1859 			parent = q;
1860 			q = clone_mnt(p, p->mnt.mnt_root, flag);
1861 			if (IS_ERR(q))
1862 				goto out;
1863 			lock_mount_hash();
1864 			list_add_tail(&q->mnt_list, &res->mnt_list);
1865 			attach_mnt(q, parent, p->mnt_mp);
1866 			unlock_mount_hash();
1867 		}
1868 	}
1869 	return res;
1870 out:
1871 	if (res) {
1872 		lock_mount_hash();
1873 		umount_tree(res, UMOUNT_SYNC);
1874 		unlock_mount_hash();
1875 	}
1876 	return q;
1877 }
1878 
1879 /* Caller should check returned pointer for errors */
1880 
collect_mounts(const struct path * path)1881 struct vfsmount *collect_mounts(const struct path *path)
1882 {
1883 	struct mount *tree;
1884 	namespace_lock();
1885 	if (!check_mnt(real_mount(path->mnt)))
1886 		tree = ERR_PTR(-EINVAL);
1887 	else
1888 		tree = copy_tree(real_mount(path->mnt), path->dentry,
1889 				 CL_COPY_ALL | CL_PRIVATE);
1890 	namespace_unlock();
1891 	if (IS_ERR(tree))
1892 		return ERR_CAST(tree);
1893 	return &tree->mnt;
1894 }
1895 
1896 static void free_mnt_ns(struct mnt_namespace *);
1897 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
1898 
dissolve_on_fput(struct vfsmount * mnt)1899 void dissolve_on_fput(struct vfsmount *mnt)
1900 {
1901 	struct mnt_namespace *ns;
1902 	namespace_lock();
1903 	lock_mount_hash();
1904 	ns = real_mount(mnt)->mnt_ns;
1905 	if (ns) {
1906 		if (is_anon_ns(ns))
1907 			umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
1908 		else
1909 			ns = NULL;
1910 	}
1911 	unlock_mount_hash();
1912 	namespace_unlock();
1913 	if (ns)
1914 		free_mnt_ns(ns);
1915 }
1916 
drop_collected_mounts(struct vfsmount * mnt)1917 void drop_collected_mounts(struct vfsmount *mnt)
1918 {
1919 	namespace_lock();
1920 	lock_mount_hash();
1921 	umount_tree(real_mount(mnt), 0);
1922 	unlock_mount_hash();
1923 	namespace_unlock();
1924 }
1925 
has_locked_children(struct mount * mnt,struct dentry * dentry)1926 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1927 {
1928 	struct mount *child;
1929 
1930 	list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1931 		if (!is_subdir(child->mnt_mountpoint, dentry))
1932 			continue;
1933 
1934 		if (child->mnt.mnt_flags & MNT_LOCKED)
1935 			return true;
1936 	}
1937 	return false;
1938 }
1939 
1940 /**
1941  * clone_private_mount - create a private clone of a path
1942  *
1943  * This creates a new vfsmount, which will be the clone of @path.  The new will
1944  * not be attached anywhere in the namespace and will be private (i.e. changes
1945  * to the originating mount won't be propagated into this).
1946  *
1947  * Release with mntput().
1948  */
clone_private_mount(const struct path * path)1949 struct vfsmount *clone_private_mount(const struct path *path)
1950 {
1951 	struct mount *old_mnt = real_mount(path->mnt);
1952 	struct mount *new_mnt;
1953 
1954 	down_read(&namespace_sem);
1955 	if (IS_MNT_UNBINDABLE(old_mnt))
1956 		goto invalid;
1957 
1958 	if (!check_mnt(old_mnt))
1959 		goto invalid;
1960 
1961 	if (has_locked_children(old_mnt, path->dentry))
1962 		goto invalid;
1963 
1964 	new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1965 	up_read(&namespace_sem);
1966 
1967 	if (IS_ERR(new_mnt))
1968 		return ERR_CAST(new_mnt);
1969 
1970 	/* Longterm mount to be removed by kern_unmount*() */
1971 	new_mnt->mnt_ns = MNT_NS_INTERNAL;
1972 
1973 	return &new_mnt->mnt;
1974 
1975 invalid:
1976 	up_read(&namespace_sem);
1977 	return ERR_PTR(-EINVAL);
1978 }
1979 EXPORT_SYMBOL_GPL(clone_private_mount);
1980 
iterate_mounts(int (* f)(struct vfsmount *,void *),void * arg,struct vfsmount * root)1981 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1982 		   struct vfsmount *root)
1983 {
1984 	struct mount *mnt;
1985 	int res = f(root, arg);
1986 	if (res)
1987 		return res;
1988 	list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1989 		res = f(&mnt->mnt, arg);
1990 		if (res)
1991 			return res;
1992 	}
1993 	return 0;
1994 }
1995 
lock_mnt_tree(struct mount * mnt)1996 static void lock_mnt_tree(struct mount *mnt)
1997 {
1998 	struct mount *p;
1999 
2000 	for (p = mnt; p; p = next_mnt(p, mnt)) {
2001 		int flags = p->mnt.mnt_flags;
2002 		/* Don't allow unprivileged users to change mount flags */
2003 		flags |= MNT_LOCK_ATIME;
2004 
2005 		if (flags & MNT_READONLY)
2006 			flags |= MNT_LOCK_READONLY;
2007 
2008 		if (flags & MNT_NODEV)
2009 			flags |= MNT_LOCK_NODEV;
2010 
2011 		if (flags & MNT_NOSUID)
2012 			flags |= MNT_LOCK_NOSUID;
2013 
2014 		if (flags & MNT_NOEXEC)
2015 			flags |= MNT_LOCK_NOEXEC;
2016 		/* Don't allow unprivileged users to reveal what is under a mount */
2017 		if (list_empty(&p->mnt_expire))
2018 			flags |= MNT_LOCKED;
2019 		p->mnt.mnt_flags = flags;
2020 	}
2021 }
2022 
cleanup_group_ids(struct mount * mnt,struct mount * end)2023 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2024 {
2025 	struct mount *p;
2026 
2027 	for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2028 		if (p->mnt_group_id && !IS_MNT_SHARED(p))
2029 			mnt_release_group_id(p);
2030 	}
2031 }
2032 
invent_group_ids(struct mount * mnt,bool recurse)2033 static int invent_group_ids(struct mount *mnt, bool recurse)
2034 {
2035 	struct mount *p;
2036 
2037 	for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2038 		if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
2039 			int err = mnt_alloc_group_id(p);
2040 			if (err) {
2041 				cleanup_group_ids(mnt, p);
2042 				return err;
2043 			}
2044 		}
2045 	}
2046 
2047 	return 0;
2048 }
2049 
count_mounts(struct mnt_namespace * ns,struct mount * mnt)2050 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2051 {
2052 	unsigned int max = READ_ONCE(sysctl_mount_max);
2053 	unsigned int mounts = 0, old, pending, sum;
2054 	struct mount *p;
2055 
2056 	for (p = mnt; p; p = next_mnt(p, mnt))
2057 		mounts++;
2058 
2059 	old = ns->mounts;
2060 	pending = ns->pending_mounts;
2061 	sum = old + pending;
2062 	if ((old > sum) ||
2063 	    (pending > sum) ||
2064 	    (max < sum) ||
2065 	    (mounts > (max - sum)))
2066 		return -ENOSPC;
2067 
2068 	ns->pending_mounts = pending + mounts;
2069 	return 0;
2070 }
2071 
2072 /*
2073  *  @source_mnt : mount tree to be attached
2074  *  @nd         : place the mount tree @source_mnt is attached
2075  *  @parent_nd  : if non-null, detach the source_mnt from its parent and
2076  *  		   store the parent mount and mountpoint dentry.
2077  *  		   (done when source_mnt is moved)
2078  *
2079  *  NOTE: in the table below explains the semantics when a source mount
2080  *  of a given type is attached to a destination mount of a given type.
2081  * ---------------------------------------------------------------------------
2082  * |         BIND MOUNT OPERATION                                            |
2083  * |**************************************************************************
2084  * | source-->| shared        |       private  |       slave    | unbindable |
2085  * | dest     |               |                |                |            |
2086  * |   |      |               |                |                |            |
2087  * |   v      |               |                |                |            |
2088  * |**************************************************************************
2089  * |  shared  | shared (++)   |     shared (+) |     shared(+++)|  invalid   |
2090  * |          |               |                |                |            |
2091  * |non-shared| shared (+)    |      private   |      slave (*) |  invalid   |
2092  * ***************************************************************************
2093  * A bind operation clones the source mount and mounts the clone on the
2094  * destination mount.
2095  *
2096  * (++)  the cloned mount is propagated to all the mounts in the propagation
2097  * 	 tree of the destination mount and the cloned mount is added to
2098  * 	 the peer group of the source mount.
2099  * (+)   the cloned mount is created under the destination mount and is marked
2100  *       as shared. The cloned mount is added to the peer group of the source
2101  *       mount.
2102  * (+++) the mount is propagated to all the mounts in the propagation tree
2103  *       of the destination mount and the cloned mount is made slave
2104  *       of the same master as that of the source mount. The cloned mount
2105  *       is marked as 'shared and slave'.
2106  * (*)   the cloned mount is made a slave of the same master as that of the
2107  * 	 source mount.
2108  *
2109  * ---------------------------------------------------------------------------
2110  * |         		MOVE MOUNT OPERATION                                 |
2111  * |**************************************************************************
2112  * | source-->| shared        |       private  |       slave    | unbindable |
2113  * | dest     |               |                |                |            |
2114  * |   |      |               |                |                |            |
2115  * |   v      |               |                |                |            |
2116  * |**************************************************************************
2117  * |  shared  | shared (+)    |     shared (+) |    shared(+++) |  invalid   |
2118  * |          |               |                |                |            |
2119  * |non-shared| shared (+*)   |      private   |    slave (*)   | unbindable |
2120  * ***************************************************************************
2121  *
2122  * (+)  the mount is moved to the destination. And is then propagated to
2123  * 	all the mounts in the propagation tree of the destination mount.
2124  * (+*)  the mount is moved to the destination.
2125  * (+++)  the mount is moved to the destination and is then propagated to
2126  * 	all the mounts belonging to the destination mount's propagation tree.
2127  * 	the mount is marked as 'shared and slave'.
2128  * (*)	the mount continues to be a slave at the new location.
2129  *
2130  * if the source mount is a tree, the operations explained above is
2131  * applied to each mount in the tree.
2132  * Must be called without spinlocks held, since this function can sleep
2133  * in allocations.
2134  */
attach_recursive_mnt(struct mount * source_mnt,struct mount * dest_mnt,struct mountpoint * dest_mp,bool moving)2135 static int attach_recursive_mnt(struct mount *source_mnt,
2136 			struct mount *dest_mnt,
2137 			struct mountpoint *dest_mp,
2138 			bool moving)
2139 {
2140 	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2141 	HLIST_HEAD(tree_list);
2142 	struct mnt_namespace *ns = dest_mnt->mnt_ns;
2143 	struct mountpoint *smp;
2144 	struct mount *child, *p;
2145 	struct hlist_node *n;
2146 	int err;
2147 
2148 	/* Preallocate a mountpoint in case the new mounts need
2149 	 * to be tucked under other mounts.
2150 	 */
2151 	smp = get_mountpoint(source_mnt->mnt.mnt_root);
2152 	if (IS_ERR(smp))
2153 		return PTR_ERR(smp);
2154 
2155 	/* Is there space to add these mounts to the mount namespace? */
2156 	if (!moving) {
2157 		err = count_mounts(ns, source_mnt);
2158 		if (err)
2159 			goto out;
2160 	}
2161 
2162 	if (IS_MNT_SHARED(dest_mnt)) {
2163 		err = invent_group_ids(source_mnt, true);
2164 		if (err)
2165 			goto out;
2166 		err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2167 		lock_mount_hash();
2168 		if (err)
2169 			goto out_cleanup_ids;
2170 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2171 			set_mnt_shared(p);
2172 	} else {
2173 		lock_mount_hash();
2174 	}
2175 	if (moving) {
2176 		unhash_mnt(source_mnt);
2177 		attach_mnt(source_mnt, dest_mnt, dest_mp);
2178 		touch_mnt_namespace(source_mnt->mnt_ns);
2179 	} else {
2180 		if (source_mnt->mnt_ns) {
2181 			/* move from anon - the caller will destroy */
2182 			list_del_init(&source_mnt->mnt_ns->list);
2183 		}
2184 		mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2185 		commit_tree(source_mnt);
2186 	}
2187 
2188 	hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2189 		struct mount *q;
2190 		hlist_del_init(&child->mnt_hash);
2191 		q = __lookup_mnt(&child->mnt_parent->mnt,
2192 				 child->mnt_mountpoint);
2193 		if (q)
2194 			mnt_change_mountpoint(child, smp, q);
2195 		/* Notice when we are propagating across user namespaces */
2196 		if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2197 			lock_mnt_tree(child);
2198 		child->mnt.mnt_flags &= ~MNT_LOCKED;
2199 		commit_tree(child);
2200 	}
2201 	put_mountpoint(smp);
2202 	unlock_mount_hash();
2203 
2204 	return 0;
2205 
2206  out_cleanup_ids:
2207 	while (!hlist_empty(&tree_list)) {
2208 		child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2209 		child->mnt_parent->mnt_ns->pending_mounts = 0;
2210 		umount_tree(child, UMOUNT_SYNC);
2211 	}
2212 	unlock_mount_hash();
2213 	cleanup_group_ids(source_mnt, NULL);
2214  out:
2215 	ns->pending_mounts = 0;
2216 
2217 	read_seqlock_excl(&mount_lock);
2218 	put_mountpoint(smp);
2219 	read_sequnlock_excl(&mount_lock);
2220 
2221 	return err;
2222 }
2223 
lock_mount(struct path * path)2224 static struct mountpoint *lock_mount(struct path *path)
2225 {
2226 	struct vfsmount *mnt;
2227 	struct dentry *dentry = path->dentry;
2228 retry:
2229 	inode_lock(dentry->d_inode);
2230 	if (unlikely(cant_mount(dentry))) {
2231 		inode_unlock(dentry->d_inode);
2232 		return ERR_PTR(-ENOENT);
2233 	}
2234 	namespace_lock();
2235 	mnt = lookup_mnt(path);
2236 	if (likely(!mnt)) {
2237 		struct mountpoint *mp = get_mountpoint(dentry);
2238 		if (IS_ERR(mp)) {
2239 			namespace_unlock();
2240 			inode_unlock(dentry->d_inode);
2241 			return mp;
2242 		}
2243 		return mp;
2244 	}
2245 	namespace_unlock();
2246 	inode_unlock(path->dentry->d_inode);
2247 	path_put(path);
2248 	path->mnt = mnt;
2249 	dentry = path->dentry = dget(mnt->mnt_root);
2250 	goto retry;
2251 }
2252 
unlock_mount(struct mountpoint * where)2253 static void unlock_mount(struct mountpoint *where)
2254 {
2255 	struct dentry *dentry = where->m_dentry;
2256 
2257 	read_seqlock_excl(&mount_lock);
2258 	put_mountpoint(where);
2259 	read_sequnlock_excl(&mount_lock);
2260 
2261 	namespace_unlock();
2262 	inode_unlock(dentry->d_inode);
2263 }
2264 
graft_tree(struct mount * mnt,struct mount * p,struct mountpoint * mp)2265 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2266 {
2267 	if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2268 		return -EINVAL;
2269 
2270 	if (d_is_dir(mp->m_dentry) !=
2271 	      d_is_dir(mnt->mnt.mnt_root))
2272 		return -ENOTDIR;
2273 
2274 	return attach_recursive_mnt(mnt, p, mp, false);
2275 }
2276 
2277 /*
2278  * Sanity check the flags to change_mnt_propagation.
2279  */
2280 
flags_to_propagation_type(int ms_flags)2281 static int flags_to_propagation_type(int ms_flags)
2282 {
2283 	int type = ms_flags & ~(MS_REC | MS_SILENT);
2284 
2285 	/* Fail if any non-propagation flags are set */
2286 	if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2287 		return 0;
2288 	/* Only one propagation flag should be set */
2289 	if (!is_power_of_2(type))
2290 		return 0;
2291 	return type;
2292 }
2293 
2294 /*
2295  * recursively change the type of the mountpoint.
2296  */
do_change_type(struct path * path,int ms_flags)2297 static int do_change_type(struct path *path, int ms_flags)
2298 {
2299 	struct mount *m;
2300 	struct mount *mnt = real_mount(path->mnt);
2301 	int recurse = ms_flags & MS_REC;
2302 	int type;
2303 	int err = 0;
2304 
2305 	if (path->dentry != path->mnt->mnt_root)
2306 		return -EINVAL;
2307 
2308 	type = flags_to_propagation_type(ms_flags);
2309 	if (!type)
2310 		return -EINVAL;
2311 
2312 	namespace_lock();
2313 	if (type == MS_SHARED) {
2314 		err = invent_group_ids(mnt, recurse);
2315 		if (err)
2316 			goto out_unlock;
2317 	}
2318 
2319 	lock_mount_hash();
2320 	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2321 		change_mnt_propagation(m, type);
2322 	unlock_mount_hash();
2323 
2324  out_unlock:
2325 	namespace_unlock();
2326 	return err;
2327 }
2328 
__do_loopback(struct path * old_path,int recurse)2329 static struct mount *__do_loopback(struct path *old_path, int recurse)
2330 {
2331 	struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2332 
2333 	if (IS_MNT_UNBINDABLE(old))
2334 		return mnt;
2335 
2336 	if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
2337 		return mnt;
2338 
2339 	if (!recurse && has_locked_children(old, old_path->dentry))
2340 		return mnt;
2341 
2342 	if (recurse)
2343 		mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2344 	else
2345 		mnt = clone_mnt(old, old_path->dentry, 0);
2346 
2347 	if (!IS_ERR(mnt))
2348 		mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2349 
2350 	return mnt;
2351 }
2352 
2353 /*
2354  * do loopback mount.
2355  */
do_loopback(struct path * path,const char * old_name,int recurse)2356 static int do_loopback(struct path *path, const char *old_name,
2357 				int recurse)
2358 {
2359 	struct path old_path;
2360 	struct mount *mnt = NULL, *parent;
2361 	struct mountpoint *mp;
2362 	int err;
2363 	if (!old_name || !*old_name)
2364 		return -EINVAL;
2365 	err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2366 	if (err)
2367 		return err;
2368 
2369 	err = -EINVAL;
2370 	if (mnt_ns_loop(old_path.dentry))
2371 		goto out;
2372 
2373 	mp = lock_mount(path);
2374 	if (IS_ERR(mp)) {
2375 		err = PTR_ERR(mp);
2376 		goto out;
2377 	}
2378 
2379 	parent = real_mount(path->mnt);
2380 	if (!check_mnt(parent))
2381 		goto out2;
2382 
2383 	mnt = __do_loopback(&old_path, recurse);
2384 	if (IS_ERR(mnt)) {
2385 		err = PTR_ERR(mnt);
2386 		goto out2;
2387 	}
2388 
2389 	err = graft_tree(mnt, parent, mp);
2390 	if (err) {
2391 		lock_mount_hash();
2392 		umount_tree(mnt, UMOUNT_SYNC);
2393 		unlock_mount_hash();
2394 	}
2395 out2:
2396 	unlock_mount(mp);
2397 out:
2398 	path_put(&old_path);
2399 	return err;
2400 }
2401 
open_detached_copy(struct path * path,bool recursive)2402 static struct file *open_detached_copy(struct path *path, bool recursive)
2403 {
2404 	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2405 	struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
2406 	struct mount *mnt, *p;
2407 	struct file *file;
2408 
2409 	if (IS_ERR(ns))
2410 		return ERR_CAST(ns);
2411 
2412 	namespace_lock();
2413 	mnt = __do_loopback(path, recursive);
2414 	if (IS_ERR(mnt)) {
2415 		namespace_unlock();
2416 		free_mnt_ns(ns);
2417 		return ERR_CAST(mnt);
2418 	}
2419 
2420 	lock_mount_hash();
2421 	for (p = mnt; p; p = next_mnt(p, mnt)) {
2422 		p->mnt_ns = ns;
2423 		ns->mounts++;
2424 	}
2425 	ns->root = mnt;
2426 	list_add_tail(&ns->list, &mnt->mnt_list);
2427 	mntget(&mnt->mnt);
2428 	unlock_mount_hash();
2429 	namespace_unlock();
2430 
2431 	mntput(path->mnt);
2432 	path->mnt = &mnt->mnt;
2433 	file = dentry_open(path, O_PATH, current_cred());
2434 	if (IS_ERR(file))
2435 		dissolve_on_fput(path->mnt);
2436 	else
2437 		file->f_mode |= FMODE_NEED_UNMOUNT;
2438 	return file;
2439 }
2440 
SYSCALL_DEFINE3(open_tree,int,dfd,const char __user *,filename,unsigned,flags)2441 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
2442 {
2443 	struct file *file;
2444 	struct path path;
2445 	int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
2446 	bool detached = flags & OPEN_TREE_CLONE;
2447 	int error;
2448 	int fd;
2449 
2450 	BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
2451 
2452 	if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
2453 		      AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
2454 		      OPEN_TREE_CLOEXEC))
2455 		return -EINVAL;
2456 
2457 	if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
2458 		return -EINVAL;
2459 
2460 	if (flags & AT_NO_AUTOMOUNT)
2461 		lookup_flags &= ~LOOKUP_AUTOMOUNT;
2462 	if (flags & AT_SYMLINK_NOFOLLOW)
2463 		lookup_flags &= ~LOOKUP_FOLLOW;
2464 	if (flags & AT_EMPTY_PATH)
2465 		lookup_flags |= LOOKUP_EMPTY;
2466 
2467 	if (detached && !may_mount())
2468 		return -EPERM;
2469 
2470 	fd = get_unused_fd_flags(flags & O_CLOEXEC);
2471 	if (fd < 0)
2472 		return fd;
2473 
2474 	error = user_path_at(dfd, filename, lookup_flags, &path);
2475 	if (unlikely(error)) {
2476 		file = ERR_PTR(error);
2477 	} else {
2478 		if (detached)
2479 			file = open_detached_copy(&path, flags & AT_RECURSIVE);
2480 		else
2481 			file = dentry_open(&path, O_PATH, current_cred());
2482 		path_put(&path);
2483 	}
2484 	if (IS_ERR(file)) {
2485 		put_unused_fd(fd);
2486 		return PTR_ERR(file);
2487 	}
2488 	fd_install(fd, file);
2489 	return fd;
2490 }
2491 
2492 /*
2493  * Don't allow locked mount flags to be cleared.
2494  *
2495  * No locks need to be held here while testing the various MNT_LOCK
2496  * flags because those flags can never be cleared once they are set.
2497  */
can_change_locked_flags(struct mount * mnt,unsigned int mnt_flags)2498 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2499 {
2500 	unsigned int fl = mnt->mnt.mnt_flags;
2501 
2502 	if ((fl & MNT_LOCK_READONLY) &&
2503 	    !(mnt_flags & MNT_READONLY))
2504 		return false;
2505 
2506 	if ((fl & MNT_LOCK_NODEV) &&
2507 	    !(mnt_flags & MNT_NODEV))
2508 		return false;
2509 
2510 	if ((fl & MNT_LOCK_NOSUID) &&
2511 	    !(mnt_flags & MNT_NOSUID))
2512 		return false;
2513 
2514 	if ((fl & MNT_LOCK_NOEXEC) &&
2515 	    !(mnt_flags & MNT_NOEXEC))
2516 		return false;
2517 
2518 	if ((fl & MNT_LOCK_ATIME) &&
2519 	    ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
2520 		return false;
2521 
2522 	return true;
2523 }
2524 
change_mount_ro_state(struct mount * mnt,unsigned int mnt_flags)2525 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2526 {
2527 	bool readonly_request = (mnt_flags & MNT_READONLY);
2528 
2529 	if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2530 		return 0;
2531 
2532 	if (readonly_request)
2533 		return mnt_make_readonly(mnt);
2534 
2535 	return __mnt_unmake_readonly(mnt);
2536 }
2537 
2538 /*
2539  * Update the user-settable attributes on a mount.  The caller must hold
2540  * sb->s_umount for writing.
2541  */
set_mount_attributes(struct mount * mnt,unsigned int mnt_flags)2542 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2543 {
2544 	lock_mount_hash();
2545 	mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2546 	mnt->mnt.mnt_flags = mnt_flags;
2547 	touch_mnt_namespace(mnt->mnt_ns);
2548 	unlock_mount_hash();
2549 }
2550 
mnt_warn_timestamp_expiry(struct path * mountpoint,struct vfsmount * mnt)2551 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2552 {
2553 	struct super_block *sb = mnt->mnt_sb;
2554 
2555 	if (!__mnt_is_readonly(mnt) &&
2556 	   (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
2557 		char *buf = (char *)__get_free_page(GFP_KERNEL);
2558 		char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
2559 		struct tm tm;
2560 
2561 		time64_to_tm(sb->s_time_max, 0, &tm);
2562 
2563 		pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
2564 			sb->s_type->name,
2565 			is_mounted(mnt) ? "remounted" : "mounted",
2566 			mntpath,
2567 			tm.tm_year+1900, (unsigned long long)sb->s_time_max);
2568 
2569 		free_page((unsigned long)buf);
2570 	}
2571 }
2572 
2573 /*
2574  * Handle reconfiguration of the mountpoint only without alteration of the
2575  * superblock it refers to.  This is triggered by specifying MS_REMOUNT|MS_BIND
2576  * to mount(2).
2577  */
do_reconfigure_mnt(struct path * path,unsigned int mnt_flags)2578 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
2579 {
2580 	struct super_block *sb = path->mnt->mnt_sb;
2581 	struct mount *mnt = real_mount(path->mnt);
2582 	int ret;
2583 
2584 	if (!check_mnt(mnt))
2585 		return -EINVAL;
2586 
2587 	if (path->dentry != mnt->mnt.mnt_root)
2588 		return -EINVAL;
2589 
2590 	if (!can_change_locked_flags(mnt, mnt_flags))
2591 		return -EPERM;
2592 
2593 	down_write(&sb->s_umount);
2594 	ret = change_mount_ro_state(mnt, mnt_flags);
2595 	if (ret == 0)
2596 		set_mount_attributes(mnt, mnt_flags);
2597 	up_write(&sb->s_umount);
2598 
2599 	mnt_warn_timestamp_expiry(path, &mnt->mnt);
2600 
2601 	return ret;
2602 }
2603 
2604 /*
2605  * change filesystem flags. dir should be a physical root of filesystem.
2606  * If you've mounted a non-root directory somewhere and want to do remount
2607  * on it - tough luck.
2608  */
do_remount(struct path * path,int ms_flags,int sb_flags,int mnt_flags,void * data)2609 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2610 		      int mnt_flags, void *data)
2611 {
2612 	int err;
2613 	struct super_block *sb = path->mnt->mnt_sb;
2614 	struct mount *mnt = real_mount(path->mnt);
2615 	struct fs_context *fc;
2616 
2617 	if (!check_mnt(mnt))
2618 		return -EINVAL;
2619 
2620 	if (path->dentry != path->mnt->mnt_root)
2621 		return -EINVAL;
2622 
2623 	if (!can_change_locked_flags(mnt, mnt_flags))
2624 		return -EPERM;
2625 
2626 	fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
2627 	if (IS_ERR(fc))
2628 		return PTR_ERR(fc);
2629 
2630 	/*
2631 	 * Indicate to the filesystem that the remount request is coming
2632 	 * from the legacy mount system call.
2633 	 */
2634 	fc->oldapi = true;
2635 
2636 	err = parse_monolithic_mount_data(fc, data);
2637 	if (!err) {
2638 		down_write(&sb->s_umount);
2639 		err = -EPERM;
2640 		if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
2641 			err = reconfigure_super(fc);
2642 			if (!err)
2643 				set_mount_attributes(mnt, mnt_flags);
2644 		}
2645 		up_write(&sb->s_umount);
2646 	}
2647 
2648 	mnt_warn_timestamp_expiry(path, &mnt->mnt);
2649 
2650 	put_fs_context(fc);
2651 	return err;
2652 }
2653 
tree_contains_unbindable(struct mount * mnt)2654 static inline int tree_contains_unbindable(struct mount *mnt)
2655 {
2656 	struct mount *p;
2657 	for (p = mnt; p; p = next_mnt(p, mnt)) {
2658 		if (IS_MNT_UNBINDABLE(p))
2659 			return 1;
2660 	}
2661 	return 0;
2662 }
2663 
2664 /*
2665  * Check that there aren't references to earlier/same mount namespaces in the
2666  * specified subtree.  Such references can act as pins for mount namespaces
2667  * that aren't checked by the mount-cycle checking code, thereby allowing
2668  * cycles to be made.
2669  */
check_for_nsfs_mounts(struct mount * subtree)2670 static bool check_for_nsfs_mounts(struct mount *subtree)
2671 {
2672 	struct mount *p;
2673 	bool ret = false;
2674 
2675 	lock_mount_hash();
2676 	for (p = subtree; p; p = next_mnt(p, subtree))
2677 		if (mnt_ns_loop(p->mnt.mnt_root))
2678 			goto out;
2679 
2680 	ret = true;
2681 out:
2682 	unlock_mount_hash();
2683 	return ret;
2684 }
2685 
do_move_mount(struct path * old_path,struct path * new_path)2686 static int do_move_mount(struct path *old_path, struct path *new_path)
2687 {
2688 	struct mnt_namespace *ns;
2689 	struct mount *p;
2690 	struct mount *old;
2691 	struct mount *parent;
2692 	struct mountpoint *mp, *old_mp;
2693 	int err;
2694 	bool attached;
2695 
2696 	mp = lock_mount(new_path);
2697 	if (IS_ERR(mp))
2698 		return PTR_ERR(mp);
2699 
2700 	old = real_mount(old_path->mnt);
2701 	p = real_mount(new_path->mnt);
2702 	parent = old->mnt_parent;
2703 	attached = mnt_has_parent(old);
2704 	old_mp = old->mnt_mp;
2705 	ns = old->mnt_ns;
2706 
2707 	err = -EINVAL;
2708 	/* The mountpoint must be in our namespace. */
2709 	if (!check_mnt(p))
2710 		goto out;
2711 
2712 	/* The thing moved must be mounted... */
2713 	if (!is_mounted(&old->mnt))
2714 		goto out;
2715 
2716 	/* ... and either ours or the root of anon namespace */
2717 	if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
2718 		goto out;
2719 
2720 	if (old->mnt.mnt_flags & MNT_LOCKED)
2721 		goto out;
2722 
2723 	if (old_path->dentry != old_path->mnt->mnt_root)
2724 		goto out;
2725 
2726 	if (d_is_dir(new_path->dentry) !=
2727 	    d_is_dir(old_path->dentry))
2728 		goto out;
2729 	/*
2730 	 * Don't move a mount residing in a shared parent.
2731 	 */
2732 	if (attached && IS_MNT_SHARED(parent))
2733 		goto out;
2734 	/*
2735 	 * Don't move a mount tree containing unbindable mounts to a destination
2736 	 * mount which is shared.
2737 	 */
2738 	if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2739 		goto out;
2740 	err = -ELOOP;
2741 	if (!check_for_nsfs_mounts(old))
2742 		goto out;
2743 	for (; mnt_has_parent(p); p = p->mnt_parent)
2744 		if (p == old)
2745 			goto out;
2746 
2747 	err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
2748 				   attached);
2749 	if (err)
2750 		goto out;
2751 
2752 	/* if the mount is moved, it should no longer be expire
2753 	 * automatically */
2754 	list_del_init(&old->mnt_expire);
2755 	if (attached)
2756 		put_mountpoint(old_mp);
2757 out:
2758 	unlock_mount(mp);
2759 	if (!err) {
2760 		if (attached)
2761 			mntput_no_expire(parent);
2762 		else
2763 			free_mnt_ns(ns);
2764 	}
2765 	return err;
2766 }
2767 
do_move_mount_old(struct path * path,const char * old_name)2768 static int do_move_mount_old(struct path *path, const char *old_name)
2769 {
2770 	struct path old_path;
2771 	int err;
2772 
2773 	if (!old_name || !*old_name)
2774 		return -EINVAL;
2775 
2776 	err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2777 	if (err)
2778 		return err;
2779 
2780 	err = do_move_mount(&old_path, path);
2781 	path_put(&old_path);
2782 	return err;
2783 }
2784 
2785 /*
2786  * add a mount into a namespace's mount tree
2787  */
do_add_mount(struct mount * newmnt,struct mountpoint * mp,struct path * path,int mnt_flags)2788 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
2789 			struct path *path, int mnt_flags)
2790 {
2791 	struct mount *parent = real_mount(path->mnt);
2792 
2793 	mnt_flags &= ~MNT_INTERNAL_FLAGS;
2794 
2795 	if (unlikely(!check_mnt(parent))) {
2796 		/* that's acceptable only for automounts done in private ns */
2797 		if (!(mnt_flags & MNT_SHRINKABLE))
2798 			return -EINVAL;
2799 		/* ... and for those we'd better have mountpoint still alive */
2800 		if (!parent->mnt_ns)
2801 			return -EINVAL;
2802 	}
2803 
2804 	/* Refuse the same filesystem on the same mount point */
2805 	if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2806 	    path->mnt->mnt_root == path->dentry)
2807 		return -EBUSY;
2808 
2809 	if (d_is_symlink(newmnt->mnt.mnt_root))
2810 		return -EINVAL;
2811 
2812 	newmnt->mnt.mnt_flags = mnt_flags;
2813 	return graft_tree(newmnt, parent, mp);
2814 }
2815 
2816 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
2817 
2818 /*
2819  * Create a new mount using a superblock configuration and request it
2820  * be added to the namespace tree.
2821  */
do_new_mount_fc(struct fs_context * fc,struct path * mountpoint,unsigned int mnt_flags)2822 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
2823 			   unsigned int mnt_flags)
2824 {
2825 	struct vfsmount *mnt;
2826 	struct mountpoint *mp;
2827 	struct super_block *sb = fc->root->d_sb;
2828 	int error;
2829 
2830 	error = security_sb_kern_mount(sb);
2831 	if (!error && mount_too_revealing(sb, &mnt_flags))
2832 		error = -EPERM;
2833 
2834 	if (unlikely(error)) {
2835 		fc_drop_locked(fc);
2836 		return error;
2837 	}
2838 
2839 	up_write(&sb->s_umount);
2840 
2841 	mnt = vfs_create_mount(fc);
2842 	if (IS_ERR(mnt))
2843 		return PTR_ERR(mnt);
2844 
2845 	mnt_warn_timestamp_expiry(mountpoint, mnt);
2846 
2847 	mp = lock_mount(mountpoint);
2848 	if (IS_ERR(mp)) {
2849 		mntput(mnt);
2850 		return PTR_ERR(mp);
2851 	}
2852 	error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
2853 	unlock_mount(mp);
2854 	if (error < 0)
2855 		mntput(mnt);
2856 	return error;
2857 }
2858 
2859 /*
2860  * create a new mount for userspace and request it to be added into the
2861  * namespace's tree
2862  */
do_new_mount(struct path * path,const char * fstype,int sb_flags,int mnt_flags,const char * name,void * data)2863 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
2864 			int mnt_flags, const char *name, void *data)
2865 {
2866 	struct file_system_type *type;
2867 	struct fs_context *fc;
2868 	const char *subtype = NULL;
2869 	int err = 0;
2870 
2871 	if (!fstype)
2872 		return -EINVAL;
2873 
2874 	type = get_fs_type(fstype);
2875 	if (!type)
2876 		return -ENODEV;
2877 
2878 	if (type->fs_flags & FS_HAS_SUBTYPE) {
2879 		subtype = strchr(fstype, '.');
2880 		if (subtype) {
2881 			subtype++;
2882 			if (!*subtype) {
2883 				put_filesystem(type);
2884 				return -EINVAL;
2885 			}
2886 		}
2887 	}
2888 
2889 	fc = fs_context_for_mount(type, sb_flags);
2890 	put_filesystem(type);
2891 	if (IS_ERR(fc))
2892 		return PTR_ERR(fc);
2893 
2894 	/*
2895 	 * Indicate to the filesystem that the mount request is coming
2896 	 * from the legacy mount system call.
2897 	 */
2898 	fc->oldapi = true;
2899 
2900 	if (subtype)
2901 		err = vfs_parse_fs_string(fc, "subtype",
2902 					  subtype, strlen(subtype));
2903 	if (!err && name)
2904 		err = vfs_parse_fs_string(fc, "source", name, strlen(name));
2905 	if (!err)
2906 		err = parse_monolithic_mount_data(fc, data);
2907 	if (!err && !mount_capable(fc))
2908 		err = -EPERM;
2909 	if (!err)
2910 		err = vfs_get_tree(fc);
2911 	if (!err)
2912 		err = do_new_mount_fc(fc, path, mnt_flags);
2913 
2914 	put_fs_context(fc);
2915 	return err;
2916 }
2917 
finish_automount(struct vfsmount * m,struct path * path)2918 int finish_automount(struct vfsmount *m, struct path *path)
2919 {
2920 	struct dentry *dentry = path->dentry;
2921 	struct mountpoint *mp;
2922 	struct mount *mnt;
2923 	int err;
2924 
2925 	if (!m)
2926 		return 0;
2927 	if (IS_ERR(m))
2928 		return PTR_ERR(m);
2929 
2930 	mnt = real_mount(m);
2931 	/* The new mount record should have at least 2 refs to prevent it being
2932 	 * expired before we get a chance to add it
2933 	 */
2934 	BUG_ON(mnt_get_count(mnt) < 2);
2935 
2936 	if (m->mnt_sb == path->mnt->mnt_sb &&
2937 	    m->mnt_root == dentry) {
2938 		err = -ELOOP;
2939 		goto discard;
2940 	}
2941 
2942 	/*
2943 	 * we don't want to use lock_mount() - in this case finding something
2944 	 * that overmounts our mountpoint to be means "quitely drop what we've
2945 	 * got", not "try to mount it on top".
2946 	 */
2947 	inode_lock(dentry->d_inode);
2948 	namespace_lock();
2949 	if (unlikely(cant_mount(dentry))) {
2950 		err = -ENOENT;
2951 		goto discard_locked;
2952 	}
2953 	rcu_read_lock();
2954 	if (unlikely(__lookup_mnt(path->mnt, dentry))) {
2955 		rcu_read_unlock();
2956 		err = 0;
2957 		goto discard_locked;
2958 	}
2959 	rcu_read_unlock();
2960 	mp = get_mountpoint(dentry);
2961 	if (IS_ERR(mp)) {
2962 		err = PTR_ERR(mp);
2963 		goto discard_locked;
2964 	}
2965 
2966 	err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2967 	unlock_mount(mp);
2968 	if (unlikely(err))
2969 		goto discard;
2970 	mntput(m);
2971 	return 0;
2972 
2973 discard_locked:
2974 	namespace_unlock();
2975 	inode_unlock(dentry->d_inode);
2976 discard:
2977 	/* remove m from any expiration list it may be on */
2978 	if (!list_empty(&mnt->mnt_expire)) {
2979 		namespace_lock();
2980 		list_del_init(&mnt->mnt_expire);
2981 		namespace_unlock();
2982 	}
2983 	mntput(m);
2984 	mntput(m);
2985 	return err;
2986 }
2987 
2988 /**
2989  * mnt_set_expiry - Put a mount on an expiration list
2990  * @mnt: The mount to list.
2991  * @expiry_list: The list to add the mount to.
2992  */
mnt_set_expiry(struct vfsmount * mnt,struct list_head * expiry_list)2993 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2994 {
2995 	namespace_lock();
2996 
2997 	list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2998 
2999 	namespace_unlock();
3000 }
3001 EXPORT_SYMBOL(mnt_set_expiry);
3002 
3003 /*
3004  * process a list of expirable mountpoints with the intent of discarding any
3005  * mountpoints that aren't in use and haven't been touched since last we came
3006  * here
3007  */
mark_mounts_for_expiry(struct list_head * mounts)3008 void mark_mounts_for_expiry(struct list_head *mounts)
3009 {
3010 	struct mount *mnt, *next;
3011 	LIST_HEAD(graveyard);
3012 
3013 	if (list_empty(mounts))
3014 		return;
3015 
3016 	namespace_lock();
3017 	lock_mount_hash();
3018 
3019 	/* extract from the expiration list every vfsmount that matches the
3020 	 * following criteria:
3021 	 * - only referenced by its parent vfsmount
3022 	 * - still marked for expiry (marked on the last call here; marks are
3023 	 *   cleared by mntput())
3024 	 */
3025 	list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3026 		if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3027 			propagate_mount_busy(mnt, 1))
3028 			continue;
3029 		list_move(&mnt->mnt_expire, &graveyard);
3030 	}
3031 	while (!list_empty(&graveyard)) {
3032 		mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3033 		touch_mnt_namespace(mnt->mnt_ns);
3034 		umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3035 	}
3036 	unlock_mount_hash();
3037 	namespace_unlock();
3038 }
3039 
3040 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3041 
3042 /*
3043  * Ripoff of 'select_parent()'
3044  *
3045  * search the list of submounts for a given mountpoint, and move any
3046  * shrinkable submounts to the 'graveyard' list.
3047  */
select_submounts(struct mount * parent,struct list_head * graveyard)3048 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3049 {
3050 	struct mount *this_parent = parent;
3051 	struct list_head *next;
3052 	int found = 0;
3053 
3054 repeat:
3055 	next = this_parent->mnt_mounts.next;
3056 resume:
3057 	while (next != &this_parent->mnt_mounts) {
3058 		struct list_head *tmp = next;
3059 		struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3060 
3061 		next = tmp->next;
3062 		if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3063 			continue;
3064 		/*
3065 		 * Descend a level if the d_mounts list is non-empty.
3066 		 */
3067 		if (!list_empty(&mnt->mnt_mounts)) {
3068 			this_parent = mnt;
3069 			goto repeat;
3070 		}
3071 
3072 		if (!propagate_mount_busy(mnt, 1)) {
3073 			list_move_tail(&mnt->mnt_expire, graveyard);
3074 			found++;
3075 		}
3076 	}
3077 	/*
3078 	 * All done at this level ... ascend and resume the search
3079 	 */
3080 	if (this_parent != parent) {
3081 		next = this_parent->mnt_child.next;
3082 		this_parent = this_parent->mnt_parent;
3083 		goto resume;
3084 	}
3085 	return found;
3086 }
3087 
3088 /*
3089  * process a list of expirable mountpoints with the intent of discarding any
3090  * submounts of a specific parent mountpoint
3091  *
3092  * mount_lock must be held for write
3093  */
shrink_submounts(struct mount * mnt)3094 static void shrink_submounts(struct mount *mnt)
3095 {
3096 	LIST_HEAD(graveyard);
3097 	struct mount *m;
3098 
3099 	/* extract submounts of 'mountpoint' from the expiration list */
3100 	while (select_submounts(mnt, &graveyard)) {
3101 		while (!list_empty(&graveyard)) {
3102 			m = list_first_entry(&graveyard, struct mount,
3103 						mnt_expire);
3104 			touch_mnt_namespace(m->mnt_ns);
3105 			umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3106 		}
3107 	}
3108 }
3109 
copy_mount_options(const void __user * data)3110 static void *copy_mount_options(const void __user * data)
3111 {
3112 	char *copy;
3113 	unsigned left, offset;
3114 
3115 	if (!data)
3116 		return NULL;
3117 
3118 	copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
3119 	if (!copy)
3120 		return ERR_PTR(-ENOMEM);
3121 
3122 	left = copy_from_user(copy, data, PAGE_SIZE);
3123 
3124 	/*
3125 	 * Not all architectures have an exact copy_from_user(). Resort to
3126 	 * byte at a time.
3127 	 */
3128 	offset = PAGE_SIZE - left;
3129 	while (left) {
3130 		char c;
3131 		if (get_user(c, (const char __user *)data + offset))
3132 			break;
3133 		copy[offset] = c;
3134 		left--;
3135 		offset++;
3136 	}
3137 
3138 	if (left == PAGE_SIZE) {
3139 		kfree(copy);
3140 		return ERR_PTR(-EFAULT);
3141 	}
3142 
3143 	return copy;
3144 }
3145 
copy_mount_string(const void __user * data)3146 static char *copy_mount_string(const void __user *data)
3147 {
3148 	return data ? strndup_user(data, PATH_MAX) : NULL;
3149 }
3150 
3151 /*
3152  * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3153  * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3154  *
3155  * data is a (void *) that can point to any structure up to
3156  * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3157  * information (or be NULL).
3158  *
3159  * Pre-0.97 versions of mount() didn't have a flags word.
3160  * When the flags word was introduced its top half was required
3161  * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3162  * Therefore, if this magic number is present, it carries no information
3163  * and must be discarded.
3164  */
path_mount(const char * dev_name,struct path * path,const char * type_page,unsigned long flags,void * data_page)3165 int path_mount(const char *dev_name, struct path *path,
3166 		const char *type_page, unsigned long flags, void *data_page)
3167 {
3168 	unsigned int mnt_flags = 0, sb_flags;
3169 	int ret;
3170 
3171 	/* Discard magic */
3172 	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
3173 		flags &= ~MS_MGC_MSK;
3174 
3175 	/* Basic sanity checks */
3176 	if (data_page)
3177 		((char *)data_page)[PAGE_SIZE - 1] = 0;
3178 
3179 	if (flags & MS_NOUSER)
3180 		return -EINVAL;
3181 
3182 	ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
3183 	if (ret)
3184 		return ret;
3185 	if (!may_mount())
3186 		return -EPERM;
3187 	if ((flags & SB_MANDLOCK) && !may_mandlock())
3188 		return -EPERM;
3189 
3190 	/* Default to relatime unless overriden */
3191 	if (!(flags & MS_NOATIME))
3192 		mnt_flags |= MNT_RELATIME;
3193 
3194 	/* Separate the per-mountpoint flags */
3195 	if (flags & MS_NOSUID)
3196 		mnt_flags |= MNT_NOSUID;
3197 	if (flags & MS_NODEV)
3198 		mnt_flags |= MNT_NODEV;
3199 	if (flags & MS_NOEXEC)
3200 		mnt_flags |= MNT_NOEXEC;
3201 	if (flags & MS_NOATIME)
3202 		mnt_flags |= MNT_NOATIME;
3203 	if (flags & MS_NODIRATIME)
3204 		mnt_flags |= MNT_NODIRATIME;
3205 	if (flags & MS_STRICTATIME)
3206 		mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
3207 	if (flags & MS_RDONLY)
3208 		mnt_flags |= MNT_READONLY;
3209 	if (flags & MS_NOSYMFOLLOW)
3210 		mnt_flags |= MNT_NOSYMFOLLOW;
3211 
3212 	/* The default atime for remount is preservation */
3213 	if ((flags & MS_REMOUNT) &&
3214 	    ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3215 		       MS_STRICTATIME)) == 0)) {
3216 		mnt_flags &= ~MNT_ATIME_MASK;
3217 		mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3218 	}
3219 
3220 	sb_flags = flags & (SB_RDONLY |
3221 			    SB_SYNCHRONOUS |
3222 			    SB_MANDLOCK |
3223 			    SB_DIRSYNC |
3224 			    SB_SILENT |
3225 			    SB_POSIXACL |
3226 			    SB_LAZYTIME |
3227 			    SB_I_VERSION);
3228 
3229 	if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
3230 		return do_reconfigure_mnt(path, mnt_flags);
3231 	if (flags & MS_REMOUNT)
3232 		return do_remount(path, flags, sb_flags, mnt_flags, data_page);
3233 	if (flags & MS_BIND)
3234 		return do_loopback(path, dev_name, flags & MS_REC);
3235 	if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
3236 		return do_change_type(path, flags);
3237 	if (flags & MS_MOVE)
3238 		return do_move_mount_old(path, dev_name);
3239 
3240 	return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
3241 			    data_page);
3242 }
3243 
do_mount(const char * dev_name,const char __user * dir_name,const char * type_page,unsigned long flags,void * data_page)3244 long do_mount(const char *dev_name, const char __user *dir_name,
3245 		const char *type_page, unsigned long flags, void *data_page)
3246 {
3247 	struct path path;
3248 	int ret;
3249 
3250 	ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
3251 	if (ret)
3252 		return ret;
3253 	ret = path_mount(dev_name, &path, type_page, flags, data_page);
3254 	path_put(&path);
3255 	return ret;
3256 }
3257 
inc_mnt_namespaces(struct user_namespace * ns)3258 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
3259 {
3260 	return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
3261 }
3262 
dec_mnt_namespaces(struct ucounts * ucounts)3263 static void dec_mnt_namespaces(struct ucounts *ucounts)
3264 {
3265 	dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
3266 }
3267 
free_mnt_ns(struct mnt_namespace * ns)3268 static void free_mnt_ns(struct mnt_namespace *ns)
3269 {
3270 	if (!is_anon_ns(ns))
3271 		ns_free_inum(&ns->ns);
3272 	dec_mnt_namespaces(ns->ucounts);
3273 	put_user_ns(ns->user_ns);
3274 	kfree(ns);
3275 }
3276 
3277 /*
3278  * Assign a sequence number so we can detect when we attempt to bind
3279  * mount a reference to an older mount namespace into the current
3280  * mount namespace, preventing reference counting loops.  A 64bit
3281  * number incrementing at 10Ghz will take 12,427 years to wrap which
3282  * is effectively never, so we can ignore the possibility.
3283  */
3284 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
3285 
alloc_mnt_ns(struct user_namespace * user_ns,bool anon)3286 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
3287 {
3288 	struct mnt_namespace *new_ns;
3289 	struct ucounts *ucounts;
3290 	int ret;
3291 
3292 	ucounts = inc_mnt_namespaces(user_ns);
3293 	if (!ucounts)
3294 		return ERR_PTR(-ENOSPC);
3295 
3296 	new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
3297 	if (!new_ns) {
3298 		dec_mnt_namespaces(ucounts);
3299 		return ERR_PTR(-ENOMEM);
3300 	}
3301 	if (!anon) {
3302 		ret = ns_alloc_inum(&new_ns->ns);
3303 		if (ret) {
3304 			kfree(new_ns);
3305 			dec_mnt_namespaces(ucounts);
3306 			return ERR_PTR(ret);
3307 		}
3308 	}
3309 	new_ns->ns.ops = &mntns_operations;
3310 	if (!anon)
3311 		new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
3312 	atomic_set(&new_ns->count, 1);
3313 	INIT_LIST_HEAD(&new_ns->list);
3314 	init_waitqueue_head(&new_ns->poll);
3315 	spin_lock_init(&new_ns->ns_lock);
3316 	new_ns->user_ns = get_user_ns(user_ns);
3317 	new_ns->ucounts = ucounts;
3318 	return new_ns;
3319 }
3320 
3321 __latent_entropy
copy_mnt_ns(unsigned long flags,struct mnt_namespace * ns,struct user_namespace * user_ns,struct fs_struct * new_fs)3322 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
3323 		struct user_namespace *user_ns, struct fs_struct *new_fs)
3324 {
3325 	struct mnt_namespace *new_ns;
3326 	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
3327 	struct mount *p, *q;
3328 	struct mount *old;
3329 	struct mount *new;
3330 	int copy_flags;
3331 
3332 	BUG_ON(!ns);
3333 
3334 	if (likely(!(flags & CLONE_NEWNS))) {
3335 		get_mnt_ns(ns);
3336 		return ns;
3337 	}
3338 
3339 	old = ns->root;
3340 
3341 	new_ns = alloc_mnt_ns(user_ns, false);
3342 	if (IS_ERR(new_ns))
3343 		return new_ns;
3344 
3345 	namespace_lock();
3346 	/* First pass: copy the tree topology */
3347 	copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
3348 	if (user_ns != ns->user_ns)
3349 		copy_flags |= CL_SHARED_TO_SLAVE;
3350 	new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3351 	if (IS_ERR(new)) {
3352 		namespace_unlock();
3353 		free_mnt_ns(new_ns);
3354 		return ERR_CAST(new);
3355 	}
3356 	if (user_ns != ns->user_ns) {
3357 		lock_mount_hash();
3358 		lock_mnt_tree(new);
3359 		unlock_mount_hash();
3360 	}
3361 	new_ns->root = new;
3362 	list_add_tail(&new_ns->list, &new->mnt_list);
3363 
3364 	/*
3365 	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3366 	 * as belonging to new namespace.  We have already acquired a private
3367 	 * fs_struct, so tsk->fs->lock is not needed.
3368 	 */
3369 	p = old;
3370 	q = new;
3371 	while (p) {
3372 		q->mnt_ns = new_ns;
3373 		new_ns->mounts++;
3374 		if (new_fs) {
3375 			if (&p->mnt == new_fs->root.mnt) {
3376 				new_fs->root.mnt = mntget(&q->mnt);
3377 				rootmnt = &p->mnt;
3378 			}
3379 			if (&p->mnt == new_fs->pwd.mnt) {
3380 				new_fs->pwd.mnt = mntget(&q->mnt);
3381 				pwdmnt = &p->mnt;
3382 			}
3383 		}
3384 		p = next_mnt(p, old);
3385 		q = next_mnt(q, new);
3386 		if (!q)
3387 			break;
3388 		while (p->mnt.mnt_root != q->mnt.mnt_root)
3389 			p = next_mnt(p, old);
3390 	}
3391 	namespace_unlock();
3392 
3393 	if (rootmnt)
3394 		mntput(rootmnt);
3395 	if (pwdmnt)
3396 		mntput(pwdmnt);
3397 
3398 	return new_ns;
3399 }
3400 
mount_subtree(struct vfsmount * m,const char * name)3401 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
3402 {
3403 	struct mount *mnt = real_mount(m);
3404 	struct mnt_namespace *ns;
3405 	struct super_block *s;
3406 	struct path path;
3407 	int err;
3408 
3409 	ns = alloc_mnt_ns(&init_user_ns, true);
3410 	if (IS_ERR(ns)) {
3411 		mntput(m);
3412 		return ERR_CAST(ns);
3413 	}
3414 	mnt->mnt_ns = ns;
3415 	ns->root = mnt;
3416 	ns->mounts++;
3417 	list_add(&mnt->mnt_list, &ns->list);
3418 
3419 	err = vfs_path_lookup(m->mnt_root, m,
3420 			name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
3421 
3422 	put_mnt_ns(ns);
3423 
3424 	if (err)
3425 		return ERR_PTR(err);
3426 
3427 	/* trade a vfsmount reference for active sb one */
3428 	s = path.mnt->mnt_sb;
3429 	atomic_inc(&s->s_active);
3430 	mntput(path.mnt);
3431 	/* lock the sucker */
3432 	down_write(&s->s_umount);
3433 	/* ... and return the root of (sub)tree on it */
3434 	return path.dentry;
3435 }
3436 EXPORT_SYMBOL(mount_subtree);
3437 
SYSCALL_DEFINE5(mount,char __user *,dev_name,char __user *,dir_name,char __user *,type,unsigned long,flags,void __user *,data)3438 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3439 		char __user *, type, unsigned long, flags, void __user *, data)
3440 {
3441 	int ret;
3442 	char *kernel_type;
3443 	char *kernel_dev;
3444 	void *options;
3445 
3446 	kernel_type = copy_mount_string(type);
3447 	ret = PTR_ERR(kernel_type);
3448 	if (IS_ERR(kernel_type))
3449 		goto out_type;
3450 
3451 	kernel_dev = copy_mount_string(dev_name);
3452 	ret = PTR_ERR(kernel_dev);
3453 	if (IS_ERR(kernel_dev))
3454 		goto out_dev;
3455 
3456 	options = copy_mount_options(data);
3457 	ret = PTR_ERR(options);
3458 	if (IS_ERR(options))
3459 		goto out_data;
3460 
3461 	ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3462 
3463 	kfree(options);
3464 out_data:
3465 	kfree(kernel_dev);
3466 out_dev:
3467 	kfree(kernel_type);
3468 out_type:
3469 	return ret;
3470 }
3471 
3472 /*
3473  * Create a kernel mount representation for a new, prepared superblock
3474  * (specified by fs_fd) and attach to an open_tree-like file descriptor.
3475  */
SYSCALL_DEFINE3(fsmount,int,fs_fd,unsigned int,flags,unsigned int,attr_flags)3476 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
3477 		unsigned int, attr_flags)
3478 {
3479 	struct mnt_namespace *ns;
3480 	struct fs_context *fc;
3481 	struct file *file;
3482 	struct path newmount;
3483 	struct mount *mnt;
3484 	struct fd f;
3485 	unsigned int mnt_flags = 0;
3486 	long ret;
3487 
3488 	if (!may_mount())
3489 		return -EPERM;
3490 
3491 	if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
3492 		return -EINVAL;
3493 
3494 	if (attr_flags & ~(MOUNT_ATTR_RDONLY |
3495 			   MOUNT_ATTR_NOSUID |
3496 			   MOUNT_ATTR_NODEV |
3497 			   MOUNT_ATTR_NOEXEC |
3498 			   MOUNT_ATTR__ATIME |
3499 			   MOUNT_ATTR_NODIRATIME))
3500 		return -EINVAL;
3501 
3502 	if (attr_flags & MOUNT_ATTR_RDONLY)
3503 		mnt_flags |= MNT_READONLY;
3504 	if (attr_flags & MOUNT_ATTR_NOSUID)
3505 		mnt_flags |= MNT_NOSUID;
3506 	if (attr_flags & MOUNT_ATTR_NODEV)
3507 		mnt_flags |= MNT_NODEV;
3508 	if (attr_flags & MOUNT_ATTR_NOEXEC)
3509 		mnt_flags |= MNT_NOEXEC;
3510 	if (attr_flags & MOUNT_ATTR_NODIRATIME)
3511 		mnt_flags |= MNT_NODIRATIME;
3512 
3513 	switch (attr_flags & MOUNT_ATTR__ATIME) {
3514 	case MOUNT_ATTR_STRICTATIME:
3515 		break;
3516 	case MOUNT_ATTR_NOATIME:
3517 		mnt_flags |= MNT_NOATIME;
3518 		break;
3519 	case MOUNT_ATTR_RELATIME:
3520 		mnt_flags |= MNT_RELATIME;
3521 		break;
3522 	default:
3523 		return -EINVAL;
3524 	}
3525 
3526 	f = fdget(fs_fd);
3527 	if (!f.file)
3528 		return -EBADF;
3529 
3530 	ret = -EINVAL;
3531 	if (f.file->f_op != &fscontext_fops)
3532 		goto err_fsfd;
3533 
3534 	fc = f.file->private_data;
3535 
3536 	ret = mutex_lock_interruptible(&fc->uapi_mutex);
3537 	if (ret < 0)
3538 		goto err_fsfd;
3539 
3540 	/* There must be a valid superblock or we can't mount it */
3541 	ret = -EINVAL;
3542 	if (!fc->root)
3543 		goto err_unlock;
3544 
3545 	ret = -EPERM;
3546 	if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
3547 		pr_warn("VFS: Mount too revealing\n");
3548 		goto err_unlock;
3549 	}
3550 
3551 	ret = -EBUSY;
3552 	if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
3553 		goto err_unlock;
3554 
3555 	ret = -EPERM;
3556 	if ((fc->sb_flags & SB_MANDLOCK) && !may_mandlock())
3557 		goto err_unlock;
3558 
3559 	newmount.mnt = vfs_create_mount(fc);
3560 	if (IS_ERR(newmount.mnt)) {
3561 		ret = PTR_ERR(newmount.mnt);
3562 		goto err_unlock;
3563 	}
3564 	newmount.dentry = dget(fc->root);
3565 	newmount.mnt->mnt_flags = mnt_flags;
3566 
3567 	/* We've done the mount bit - now move the file context into more or
3568 	 * less the same state as if we'd done an fspick().  We don't want to
3569 	 * do any memory allocation or anything like that at this point as we
3570 	 * don't want to have to handle any errors incurred.
3571 	 */
3572 	vfs_clean_context(fc);
3573 
3574 	ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
3575 	if (IS_ERR(ns)) {
3576 		ret = PTR_ERR(ns);
3577 		goto err_path;
3578 	}
3579 	mnt = real_mount(newmount.mnt);
3580 	mnt->mnt_ns = ns;
3581 	ns->root = mnt;
3582 	ns->mounts = 1;
3583 	list_add(&mnt->mnt_list, &ns->list);
3584 	mntget(newmount.mnt);
3585 
3586 	/* Attach to an apparent O_PATH fd with a note that we need to unmount
3587 	 * it, not just simply put it.
3588 	 */
3589 	file = dentry_open(&newmount, O_PATH, fc->cred);
3590 	if (IS_ERR(file)) {
3591 		dissolve_on_fput(newmount.mnt);
3592 		ret = PTR_ERR(file);
3593 		goto err_path;
3594 	}
3595 	file->f_mode |= FMODE_NEED_UNMOUNT;
3596 
3597 	ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
3598 	if (ret >= 0)
3599 		fd_install(ret, file);
3600 	else
3601 		fput(file);
3602 
3603 err_path:
3604 	path_put(&newmount);
3605 err_unlock:
3606 	mutex_unlock(&fc->uapi_mutex);
3607 err_fsfd:
3608 	fdput(f);
3609 	return ret;
3610 }
3611 
3612 /*
3613  * Move a mount from one place to another.  In combination with
3614  * fsopen()/fsmount() this is used to install a new mount and in combination
3615  * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
3616  * a mount subtree.
3617  *
3618  * Note the flags value is a combination of MOVE_MOUNT_* flags.
3619  */
SYSCALL_DEFINE5(move_mount,int,from_dfd,const char __user *,from_pathname,int,to_dfd,const char __user *,to_pathname,unsigned int,flags)3620 SYSCALL_DEFINE5(move_mount,
3621 		int, from_dfd, const char __user *, from_pathname,
3622 		int, to_dfd, const char __user *, to_pathname,
3623 		unsigned int, flags)
3624 {
3625 	struct path from_path, to_path;
3626 	unsigned int lflags;
3627 	int ret = 0;
3628 
3629 	if (!may_mount())
3630 		return -EPERM;
3631 
3632 	if (flags & ~MOVE_MOUNT__MASK)
3633 		return -EINVAL;
3634 
3635 	/* If someone gives a pathname, they aren't permitted to move
3636 	 * from an fd that requires unmount as we can't get at the flag
3637 	 * to clear it afterwards.
3638 	 */
3639 	lflags = 0;
3640 	if (flags & MOVE_MOUNT_F_SYMLINKS)	lflags |= LOOKUP_FOLLOW;
3641 	if (flags & MOVE_MOUNT_F_AUTOMOUNTS)	lflags |= LOOKUP_AUTOMOUNT;
3642 	if (flags & MOVE_MOUNT_F_EMPTY_PATH)	lflags |= LOOKUP_EMPTY;
3643 
3644 	ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
3645 	if (ret < 0)
3646 		return ret;
3647 
3648 	lflags = 0;
3649 	if (flags & MOVE_MOUNT_T_SYMLINKS)	lflags |= LOOKUP_FOLLOW;
3650 	if (flags & MOVE_MOUNT_T_AUTOMOUNTS)	lflags |= LOOKUP_AUTOMOUNT;
3651 	if (flags & MOVE_MOUNT_T_EMPTY_PATH)	lflags |= LOOKUP_EMPTY;
3652 
3653 	ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
3654 	if (ret < 0)
3655 		goto out_from;
3656 
3657 	ret = security_move_mount(&from_path, &to_path);
3658 	if (ret < 0)
3659 		goto out_to;
3660 
3661 	ret = do_move_mount(&from_path, &to_path);
3662 
3663 out_to:
3664 	path_put(&to_path);
3665 out_from:
3666 	path_put(&from_path);
3667 	return ret;
3668 }
3669 
3670 /*
3671  * Return true if path is reachable from root
3672  *
3673  * namespace_sem or mount_lock is held
3674  */
is_path_reachable(struct mount * mnt,struct dentry * dentry,const struct path * root)3675 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
3676 			 const struct path *root)
3677 {
3678 	while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
3679 		dentry = mnt->mnt_mountpoint;
3680 		mnt = mnt->mnt_parent;
3681 	}
3682 	return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
3683 }
3684 
path_is_under(const struct path * path1,const struct path * path2)3685 bool path_is_under(const struct path *path1, const struct path *path2)
3686 {
3687 	bool res;
3688 	read_seqlock_excl(&mount_lock);
3689 	res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
3690 	read_sequnlock_excl(&mount_lock);
3691 	return res;
3692 }
3693 EXPORT_SYMBOL(path_is_under);
3694 
3695 /*
3696  * pivot_root Semantics:
3697  * Moves the root file system of the current process to the directory put_old,
3698  * makes new_root as the new root file system of the current process, and sets
3699  * root/cwd of all processes which had them on the current root to new_root.
3700  *
3701  * Restrictions:
3702  * The new_root and put_old must be directories, and  must not be on the
3703  * same file  system as the current process root. The put_old  must  be
3704  * underneath new_root,  i.e. adding a non-zero number of /.. to the string
3705  * pointed to by put_old must yield the same directory as new_root. No other
3706  * file system may be mounted on put_old. After all, new_root is a mountpoint.
3707  *
3708  * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3709  * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
3710  * in this situation.
3711  *
3712  * Notes:
3713  *  - we don't move root/cwd if they are not at the root (reason: if something
3714  *    cared enough to change them, it's probably wrong to force them elsewhere)
3715  *  - it's okay to pick a root that isn't the root of a file system, e.g.
3716  *    /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3717  *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3718  *    first.
3719  */
SYSCALL_DEFINE2(pivot_root,const char __user *,new_root,const char __user *,put_old)3720 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
3721 		const char __user *, put_old)
3722 {
3723 	struct path new, old, root;
3724 	struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
3725 	struct mountpoint *old_mp, *root_mp;
3726 	int error;
3727 
3728 	if (!may_mount())
3729 		return -EPERM;
3730 
3731 	error = user_path_at(AT_FDCWD, new_root,
3732 			     LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
3733 	if (error)
3734 		goto out0;
3735 
3736 	error = user_path_at(AT_FDCWD, put_old,
3737 			     LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
3738 	if (error)
3739 		goto out1;
3740 
3741 	error = security_sb_pivotroot(&old, &new);
3742 	if (error)
3743 		goto out2;
3744 
3745 	get_fs_root(current->fs, &root);
3746 	old_mp = lock_mount(&old);
3747 	error = PTR_ERR(old_mp);
3748 	if (IS_ERR(old_mp))
3749 		goto out3;
3750 
3751 	error = -EINVAL;
3752 	new_mnt = real_mount(new.mnt);
3753 	root_mnt = real_mount(root.mnt);
3754 	old_mnt = real_mount(old.mnt);
3755 	ex_parent = new_mnt->mnt_parent;
3756 	root_parent = root_mnt->mnt_parent;
3757 	if (IS_MNT_SHARED(old_mnt) ||
3758 		IS_MNT_SHARED(ex_parent) ||
3759 		IS_MNT_SHARED(root_parent))
3760 		goto out4;
3761 	if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
3762 		goto out4;
3763 	if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
3764 		goto out4;
3765 	error = -ENOENT;
3766 	if (d_unlinked(new.dentry))
3767 		goto out4;
3768 	error = -EBUSY;
3769 	if (new_mnt == root_mnt || old_mnt == root_mnt)
3770 		goto out4; /* loop, on the same file system  */
3771 	error = -EINVAL;
3772 	if (root.mnt->mnt_root != root.dentry)
3773 		goto out4; /* not a mountpoint */
3774 	if (!mnt_has_parent(root_mnt))
3775 		goto out4; /* not attached */
3776 	if (new.mnt->mnt_root != new.dentry)
3777 		goto out4; /* not a mountpoint */
3778 	if (!mnt_has_parent(new_mnt))
3779 		goto out4; /* not attached */
3780 	/* make sure we can reach put_old from new_root */
3781 	if (!is_path_reachable(old_mnt, old.dentry, &new))
3782 		goto out4;
3783 	/* make certain new is below the root */
3784 	if (!is_path_reachable(new_mnt, new.dentry, &root))
3785 		goto out4;
3786 	lock_mount_hash();
3787 	umount_mnt(new_mnt);
3788 	root_mp = unhash_mnt(root_mnt);  /* we'll need its mountpoint */
3789 	if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
3790 		new_mnt->mnt.mnt_flags |= MNT_LOCKED;
3791 		root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
3792 	}
3793 	/* mount old root on put_old */
3794 	attach_mnt(root_mnt, old_mnt, old_mp);
3795 	/* mount new_root on / */
3796 	attach_mnt(new_mnt, root_parent, root_mp);
3797 	mnt_add_count(root_parent, -1);
3798 	touch_mnt_namespace(current->nsproxy->mnt_ns);
3799 	/* A moved mount should not expire automatically */
3800 	list_del_init(&new_mnt->mnt_expire);
3801 	put_mountpoint(root_mp);
3802 	unlock_mount_hash();
3803 	chroot_fs_refs(&root, &new);
3804 	error = 0;
3805 out4:
3806 	unlock_mount(old_mp);
3807 	if (!error)
3808 		mntput_no_expire(ex_parent);
3809 out3:
3810 	path_put(&root);
3811 out2:
3812 	path_put(&old);
3813 out1:
3814 	path_put(&new);
3815 out0:
3816 	return error;
3817 }
3818 
init_mount_tree(void)3819 static void __init init_mount_tree(void)
3820 {
3821 	struct vfsmount *mnt;
3822 	struct mount *m;
3823 	struct mnt_namespace *ns;
3824 	struct path root;
3825 
3826 	mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
3827 	if (IS_ERR(mnt))
3828 		panic("Can't create rootfs");
3829 
3830 	ns = alloc_mnt_ns(&init_user_ns, false);
3831 	if (IS_ERR(ns))
3832 		panic("Can't allocate initial namespace");
3833 	m = real_mount(mnt);
3834 	m->mnt_ns = ns;
3835 	ns->root = m;
3836 	ns->mounts = 1;
3837 	list_add(&m->mnt_list, &ns->list);
3838 	init_task.nsproxy->mnt_ns = ns;
3839 	get_mnt_ns(ns);
3840 
3841 	root.mnt = mnt;
3842 	root.dentry = mnt->mnt_root;
3843 	mnt->mnt_flags |= MNT_LOCKED;
3844 
3845 	set_fs_pwd(current->fs, &root);
3846 	set_fs_root(current->fs, &root);
3847 }
3848 
mnt_init(void)3849 void __init mnt_init(void)
3850 {
3851 	int err;
3852 
3853 	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
3854 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3855 
3856 	mount_hashtable = alloc_large_system_hash("Mount-cache",
3857 				sizeof(struct hlist_head),
3858 				mhash_entries, 19,
3859 				HASH_ZERO,
3860 				&m_hash_shift, &m_hash_mask, 0, 0);
3861 	mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
3862 				sizeof(struct hlist_head),
3863 				mphash_entries, 19,
3864 				HASH_ZERO,
3865 				&mp_hash_shift, &mp_hash_mask, 0, 0);
3866 
3867 	if (!mount_hashtable || !mountpoint_hashtable)
3868 		panic("Failed to allocate mount hash table\n");
3869 
3870 	kernfs_init();
3871 
3872 	err = sysfs_init();
3873 	if (err)
3874 		printk(KERN_WARNING "%s: sysfs_init error: %d\n",
3875 			__func__, err);
3876 	fs_kobj = kobject_create_and_add("fs", NULL);
3877 	if (!fs_kobj)
3878 		printk(KERN_WARNING "%s: kobj create error\n", __func__);
3879 	shmem_init();
3880 	init_rootfs();
3881 	init_mount_tree();
3882 }
3883 
put_mnt_ns(struct mnt_namespace * ns)3884 void put_mnt_ns(struct mnt_namespace *ns)
3885 {
3886 	if (!atomic_dec_and_test(&ns->count))
3887 		return;
3888 	drop_collected_mounts(&ns->root->mnt);
3889 	free_mnt_ns(ns);
3890 }
3891 
kern_mount(struct file_system_type * type)3892 struct vfsmount *kern_mount(struct file_system_type *type)
3893 {
3894 	struct vfsmount *mnt;
3895 	mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
3896 	if (!IS_ERR(mnt)) {
3897 		/*
3898 		 * it is a longterm mount, don't release mnt until
3899 		 * we unmount before file sys is unregistered
3900 		*/
3901 		real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
3902 	}
3903 	return mnt;
3904 }
3905 EXPORT_SYMBOL_GPL(kern_mount);
3906 
kern_unmount(struct vfsmount * mnt)3907 void kern_unmount(struct vfsmount *mnt)
3908 {
3909 	/* release long term mount so mount point can be released */
3910 	if (!IS_ERR_OR_NULL(mnt)) {
3911 		real_mount(mnt)->mnt_ns = NULL;
3912 		synchronize_rcu();	/* yecchhh... */
3913 		mntput(mnt);
3914 	}
3915 }
3916 EXPORT_SYMBOL(kern_unmount);
3917 
kern_unmount_array(struct vfsmount * mnt[],unsigned int num)3918 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
3919 {
3920 	unsigned int i;
3921 
3922 	for (i = 0; i < num; i++)
3923 		if (mnt[i])
3924 			real_mount(mnt[i])->mnt_ns = NULL;
3925 	synchronize_rcu_expedited();
3926 	for (i = 0; i < num; i++)
3927 		mntput(mnt[i]);
3928 }
3929 EXPORT_SYMBOL(kern_unmount_array);
3930 
our_mnt(struct vfsmount * mnt)3931 bool our_mnt(struct vfsmount *mnt)
3932 {
3933 	return check_mnt(real_mount(mnt));
3934 }
3935 
current_chrooted(void)3936 bool current_chrooted(void)
3937 {
3938 	/* Does the current process have a non-standard root */
3939 	struct path ns_root;
3940 	struct path fs_root;
3941 	bool chrooted;
3942 
3943 	/* Find the namespace root */
3944 	ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
3945 	ns_root.dentry = ns_root.mnt->mnt_root;
3946 	path_get(&ns_root);
3947 	while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
3948 		;
3949 
3950 	get_fs_root(current->fs, &fs_root);
3951 
3952 	chrooted = !path_equal(&fs_root, &ns_root);
3953 
3954 	path_put(&fs_root);
3955 	path_put(&ns_root);
3956 
3957 	return chrooted;
3958 }
3959 
mnt_already_visible(struct mnt_namespace * ns,const struct super_block * sb,int * new_mnt_flags)3960 static bool mnt_already_visible(struct mnt_namespace *ns,
3961 				const struct super_block *sb,
3962 				int *new_mnt_flags)
3963 {
3964 	int new_flags = *new_mnt_flags;
3965 	struct mount *mnt;
3966 	bool visible = false;
3967 
3968 	down_read(&namespace_sem);
3969 	lock_ns_list(ns);
3970 	list_for_each_entry(mnt, &ns->list, mnt_list) {
3971 		struct mount *child;
3972 		int mnt_flags;
3973 
3974 		if (mnt_is_cursor(mnt))
3975 			continue;
3976 
3977 		if (mnt->mnt.mnt_sb->s_type != sb->s_type)
3978 			continue;
3979 
3980 		/* This mount is not fully visible if it's root directory
3981 		 * is not the root directory of the filesystem.
3982 		 */
3983 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
3984 			continue;
3985 
3986 		/* A local view of the mount flags */
3987 		mnt_flags = mnt->mnt.mnt_flags;
3988 
3989 		/* Don't miss readonly hidden in the superblock flags */
3990 		if (sb_rdonly(mnt->mnt.mnt_sb))
3991 			mnt_flags |= MNT_LOCK_READONLY;
3992 
3993 		/* Verify the mount flags are equal to or more permissive
3994 		 * than the proposed new mount.
3995 		 */
3996 		if ((mnt_flags & MNT_LOCK_READONLY) &&
3997 		    !(new_flags & MNT_READONLY))
3998 			continue;
3999 		if ((mnt_flags & MNT_LOCK_ATIME) &&
4000 		    ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
4001 			continue;
4002 
4003 		/* This mount is not fully visible if there are any
4004 		 * locked child mounts that cover anything except for
4005 		 * empty directories.
4006 		 */
4007 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4008 			struct inode *inode = child->mnt_mountpoint->d_inode;
4009 			/* Only worry about locked mounts */
4010 			if (!(child->mnt.mnt_flags & MNT_LOCKED))
4011 				continue;
4012 			/* Is the directory permanetly empty? */
4013 			if (!is_empty_dir_inode(inode))
4014 				goto next;
4015 		}
4016 		/* Preserve the locked attributes */
4017 		*new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
4018 					       MNT_LOCK_ATIME);
4019 		visible = true;
4020 		goto found;
4021 	next:	;
4022 	}
4023 found:
4024 	unlock_ns_list(ns);
4025 	up_read(&namespace_sem);
4026 	return visible;
4027 }
4028 
mount_too_revealing(const struct super_block * sb,int * new_mnt_flags)4029 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
4030 {
4031 	const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
4032 	struct mnt_namespace *ns = current->nsproxy->mnt_ns;
4033 	unsigned long s_iflags;
4034 
4035 	if (ns->user_ns == &init_user_ns)
4036 		return false;
4037 
4038 	/* Can this filesystem be too revealing? */
4039 	s_iflags = sb->s_iflags;
4040 	if (!(s_iflags & SB_I_USERNS_VISIBLE))
4041 		return false;
4042 
4043 	if ((s_iflags & required_iflags) != required_iflags) {
4044 		WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
4045 			  required_iflags);
4046 		return true;
4047 	}
4048 
4049 	return !mnt_already_visible(ns, sb, new_mnt_flags);
4050 }
4051 
mnt_may_suid(struct vfsmount * mnt)4052 bool mnt_may_suid(struct vfsmount *mnt)
4053 {
4054 	/*
4055 	 * Foreign mounts (accessed via fchdir or through /proc
4056 	 * symlinks) are always treated as if they are nosuid.  This
4057 	 * prevents namespaces from trusting potentially unsafe
4058 	 * suid/sgid bits, file caps, or security labels that originate
4059 	 * in other namespaces.
4060 	 */
4061 	return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
4062 	       current_in_userns(mnt->mnt_sb->s_user_ns);
4063 }
4064 
mntns_get(struct task_struct * task)4065 static struct ns_common *mntns_get(struct task_struct *task)
4066 {
4067 	struct ns_common *ns = NULL;
4068 	struct nsproxy *nsproxy;
4069 
4070 	task_lock(task);
4071 	nsproxy = task->nsproxy;
4072 	if (nsproxy) {
4073 		ns = &nsproxy->mnt_ns->ns;
4074 		get_mnt_ns(to_mnt_ns(ns));
4075 	}
4076 	task_unlock(task);
4077 
4078 	return ns;
4079 }
4080 
mntns_put(struct ns_common * ns)4081 static void mntns_put(struct ns_common *ns)
4082 {
4083 	put_mnt_ns(to_mnt_ns(ns));
4084 }
4085 
mntns_install(struct nsset * nsset,struct ns_common * ns)4086 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
4087 {
4088 	struct nsproxy *nsproxy = nsset->nsproxy;
4089 	struct fs_struct *fs = nsset->fs;
4090 	struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
4091 	struct user_namespace *user_ns = nsset->cred->user_ns;
4092 	struct path root;
4093 	int err;
4094 
4095 	if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
4096 	    !ns_capable(user_ns, CAP_SYS_CHROOT) ||
4097 	    !ns_capable(user_ns, CAP_SYS_ADMIN))
4098 		return -EPERM;
4099 
4100 	if (is_anon_ns(mnt_ns))
4101 		return -EINVAL;
4102 
4103 	if (fs->users != 1)
4104 		return -EINVAL;
4105 
4106 	get_mnt_ns(mnt_ns);
4107 	old_mnt_ns = nsproxy->mnt_ns;
4108 	nsproxy->mnt_ns = mnt_ns;
4109 
4110 	/* Find the root */
4111 	err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
4112 				"/", LOOKUP_DOWN, &root);
4113 	if (err) {
4114 		/* revert to old namespace */
4115 		nsproxy->mnt_ns = old_mnt_ns;
4116 		put_mnt_ns(mnt_ns);
4117 		return err;
4118 	}
4119 
4120 	put_mnt_ns(old_mnt_ns);
4121 
4122 	/* Update the pwd and root */
4123 	set_fs_pwd(fs, &root);
4124 	set_fs_root(fs, &root);
4125 
4126 	path_put(&root);
4127 	return 0;
4128 }
4129 
mntns_owner(struct ns_common * ns)4130 static struct user_namespace *mntns_owner(struct ns_common *ns)
4131 {
4132 	return to_mnt_ns(ns)->user_ns;
4133 }
4134 
4135 const struct proc_ns_operations mntns_operations = {
4136 	.name		= "mnt",
4137 	.type		= CLONE_NEWNS,
4138 	.get		= mntns_get,
4139 	.put		= mntns_put,
4140 	.install	= mntns_install,
4141 	.owner		= mntns_owner,
4142 };
4143