Lines Matching refs:mnt
84 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) in m_hash() argument
86 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); in m_hash()
103 static int mnt_alloc_id(struct mount *mnt) in mnt_alloc_id() argument
110 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); in mnt_alloc_id()
112 mnt_id_start = mnt->mnt_id + 1; in mnt_alloc_id()
120 static void mnt_free_id(struct mount *mnt) in mnt_free_id() argument
122 int id = mnt->mnt_id; in mnt_free_id()
135 static int mnt_alloc_group_id(struct mount *mnt) in mnt_alloc_group_id() argument
144 &mnt->mnt_group_id); in mnt_alloc_group_id()
146 mnt_group_start = mnt->mnt_group_id + 1; in mnt_alloc_group_id()
154 void mnt_release_group_id(struct mount *mnt) in mnt_release_group_id() argument
156 int id = mnt->mnt_group_id; in mnt_release_group_id()
160 mnt->mnt_group_id = 0; in mnt_release_group_id()
166 static inline void mnt_add_count(struct mount *mnt, int n) in mnt_add_count() argument
169 this_cpu_add(mnt->mnt_pcp->mnt_count, n); in mnt_add_count()
172 mnt->mnt_count += n; in mnt_add_count()
180 unsigned int mnt_get_count(struct mount *mnt) in mnt_get_count() argument
187 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; in mnt_get_count()
192 return mnt->mnt_count; in mnt_get_count()
201 mntput(&m->mnt); in drop_mountpoint()
206 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); in alloc_vfsmnt() local
207 if (mnt) { in alloc_vfsmnt()
210 err = mnt_alloc_id(mnt); in alloc_vfsmnt()
215 mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL); in alloc_vfsmnt()
216 if (!mnt->mnt_devname) in alloc_vfsmnt()
221 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); in alloc_vfsmnt()
222 if (!mnt->mnt_pcp) in alloc_vfsmnt()
225 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); in alloc_vfsmnt()
227 mnt->mnt_count = 1; in alloc_vfsmnt()
228 mnt->mnt_writers = 0; in alloc_vfsmnt()
230 mnt->mnt.data = NULL; in alloc_vfsmnt()
232 INIT_HLIST_NODE(&mnt->mnt_hash); in alloc_vfsmnt()
233 INIT_LIST_HEAD(&mnt->mnt_child); in alloc_vfsmnt()
234 INIT_LIST_HEAD(&mnt->mnt_mounts); in alloc_vfsmnt()
235 INIT_LIST_HEAD(&mnt->mnt_list); in alloc_vfsmnt()
236 INIT_LIST_HEAD(&mnt->mnt_expire); in alloc_vfsmnt()
237 INIT_LIST_HEAD(&mnt->mnt_share); in alloc_vfsmnt()
238 INIT_LIST_HEAD(&mnt->mnt_slave_list); in alloc_vfsmnt()
239 INIT_LIST_HEAD(&mnt->mnt_slave); in alloc_vfsmnt()
240 INIT_HLIST_NODE(&mnt->mnt_mp_list); in alloc_vfsmnt()
241 INIT_LIST_HEAD(&mnt->mnt_umounting); in alloc_vfsmnt()
243 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); in alloc_vfsmnt()
245 init_fs_pin(&mnt->mnt_umount, drop_mountpoint); in alloc_vfsmnt()
247 return mnt; in alloc_vfsmnt()
251 kfree_const(mnt->mnt_devname); in alloc_vfsmnt()
254 mnt_free_id(mnt); in alloc_vfsmnt()
256 kmem_cache_free(mnt_cache, mnt); in alloc_vfsmnt()
279 int __mnt_is_readonly(struct vfsmount *mnt) in __mnt_is_readonly() argument
281 if (mnt->mnt_flags & MNT_READONLY) in __mnt_is_readonly()
283 if (mnt->mnt_sb->s_flags & MS_RDONLY) in __mnt_is_readonly()
289 static inline void mnt_inc_writers(struct mount *mnt) in mnt_inc_writers() argument
292 this_cpu_inc(mnt->mnt_pcp->mnt_writers); in mnt_inc_writers()
294 mnt->mnt_writers++; in mnt_inc_writers()
298 static inline void mnt_dec_writers(struct mount *mnt) in mnt_dec_writers() argument
301 this_cpu_dec(mnt->mnt_pcp->mnt_writers); in mnt_dec_writers()
303 mnt->mnt_writers--; in mnt_dec_writers()
307 static unsigned int mnt_get_writers(struct mount *mnt) in mnt_get_writers() argument
314 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; in mnt_get_writers()
319 return mnt->mnt_writers; in mnt_get_writers()
323 static int mnt_is_readonly(struct vfsmount *mnt) in mnt_is_readonly() argument
325 if (mnt->mnt_sb->s_readonly_remount) in mnt_is_readonly()
329 return __mnt_is_readonly(mnt); in mnt_is_readonly()
350 struct mount *mnt = real_mount(m); in __mnt_want_write() local
354 mnt_inc_writers(mnt); in __mnt_want_write()
361 while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) in __mnt_want_write()
370 mnt_dec_writers(mnt); in __mnt_want_write()
411 int mnt_clone_write(struct vfsmount *mnt) in mnt_clone_write() argument
414 if (__mnt_is_readonly(mnt)) in mnt_clone_write()
417 mnt_inc_writers(real_mount(mnt)); in mnt_clone_write()
433 return __mnt_want_write(file->f_path.mnt); in __mnt_want_write_file()
435 return mnt_clone_write(file->f_path.mnt); in __mnt_want_write_file()
449 sb_start_write(file->f_path.mnt->mnt_sb); in mnt_want_write_file()
452 sb_end_write(file->f_path.mnt->mnt_sb); in mnt_want_write_file()
465 void __mnt_drop_write(struct vfsmount *mnt) in __mnt_drop_write() argument
468 mnt_dec_writers(real_mount(mnt)); in __mnt_drop_write()
480 void mnt_drop_write(struct vfsmount *mnt) in mnt_drop_write() argument
482 __mnt_drop_write(mnt); in mnt_drop_write()
483 sb_end_write(mnt->mnt_sb); in mnt_drop_write()
489 __mnt_drop_write(file->f_path.mnt); in __mnt_drop_write_file()
494 mnt_drop_write(file->f_path.mnt); in mnt_drop_write_file()
498 static int mnt_make_readonly(struct mount *mnt) in mnt_make_readonly() argument
503 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; in mnt_make_readonly()
526 if (mnt_get_writers(mnt) > 0) in mnt_make_readonly()
529 mnt->mnt.mnt_flags |= MNT_READONLY; in mnt_make_readonly()
535 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; in mnt_make_readonly()
540 static void __mnt_unmake_readonly(struct mount *mnt) in __mnt_unmake_readonly() argument
543 mnt->mnt.mnt_flags &= ~MNT_READONLY; in __mnt_unmake_readonly()
549 struct mount *mnt; in sb_prepare_remount_readonly() local
557 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { in sb_prepare_remount_readonly()
558 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { in sb_prepare_remount_readonly()
559 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; in sb_prepare_remount_readonly()
561 if (mnt_get_writers(mnt) > 0) { in sb_prepare_remount_readonly()
574 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { in sb_prepare_remount_readonly()
575 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) in sb_prepare_remount_readonly()
576 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; in sb_prepare_remount_readonly()
583 static void free_vfsmnt(struct mount *mnt) in free_vfsmnt() argument
585 kfree(mnt->mnt.data); in free_vfsmnt()
586 kfree_const(mnt->mnt_devname); in free_vfsmnt()
588 free_percpu(mnt->mnt_pcp); in free_vfsmnt()
590 kmem_cache_free(mnt_cache, mnt); in free_vfsmnt()
601 struct mount *mnt; in __legitimize_mnt() local
606 mnt = real_mount(bastard); in __legitimize_mnt()
607 mnt_add_count(mnt, 1); in __legitimize_mnt()
612 mnt_add_count(mnt, -1); in __legitimize_mnt()
617 mnt_add_count(mnt, -1); in __legitimize_mnt()
644 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) in __lookup_mnt() argument
646 struct hlist_head *head = m_hash(mnt, dentry); in __lookup_mnt()
650 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) in __lookup_mnt()
680 child_mnt = __lookup_mnt(path->mnt, path->dentry); in lookup_mnt()
681 m = child_mnt ? &child_mnt->mnt : NULL; in lookup_mnt()
705 struct mount *mnt; in __is_local_mountpoint() local
712 list_for_each_entry(mnt, &ns->list, mnt_list) { in __is_local_mountpoint()
713 is_covered = (mnt->mnt_mountpoint == dentry); in __is_local_mountpoint()
799 static inline int check_mnt(struct mount *mnt) in check_mnt() argument
801 return mnt->mnt_ns == current->nsproxy->mnt_ns; in check_mnt()
829 static void unhash_mnt(struct mount *mnt) in unhash_mnt() argument
831 mnt->mnt_parent = mnt; in unhash_mnt()
832 mnt->mnt_mountpoint = mnt->mnt.mnt_root; in unhash_mnt()
833 list_del_init(&mnt->mnt_child); in unhash_mnt()
834 hlist_del_init_rcu(&mnt->mnt_hash); in unhash_mnt()
835 hlist_del_init(&mnt->mnt_mp_list); in unhash_mnt()
836 put_mountpoint(mnt->mnt_mp); in unhash_mnt()
837 mnt->mnt_mp = NULL; in unhash_mnt()
843 static void detach_mnt(struct mount *mnt, struct path *old_path) in detach_mnt() argument
845 old_path->dentry = mnt->mnt_mountpoint; in detach_mnt()
846 old_path->mnt = &mnt->mnt_parent->mnt; in detach_mnt()
847 unhash_mnt(mnt); in detach_mnt()
853 static void umount_mnt(struct mount *mnt) in umount_mnt() argument
856 mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint; in umount_mnt()
857 unhash_mnt(mnt); in umount_mnt()
863 void mnt_set_mountpoint(struct mount *mnt, in mnt_set_mountpoint() argument
868 mnt_add_count(mnt, 1); /* essentially, that's mntget */ in mnt_set_mountpoint()
870 child_mnt->mnt_parent = mnt; in mnt_set_mountpoint()
875 static void __attach_mnt(struct mount *mnt, struct mount *parent) in __attach_mnt() argument
877 hlist_add_head_rcu(&mnt->mnt_hash, in __attach_mnt()
878 m_hash(&parent->mnt, mnt->mnt_mountpoint)); in __attach_mnt()
879 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); in __attach_mnt()
885 static void attach_mnt(struct mount *mnt, in attach_mnt() argument
889 mnt_set_mountpoint(parent, mp, mnt); in attach_mnt()
890 __attach_mnt(mnt, parent); in attach_mnt()
893 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) in mnt_change_mountpoint() argument
895 struct mountpoint *old_mp = mnt->mnt_mp; in mnt_change_mountpoint()
896 struct dentry *old_mountpoint = mnt->mnt_mountpoint; in mnt_change_mountpoint()
897 struct mount *old_parent = mnt->mnt_parent; in mnt_change_mountpoint()
899 list_del_init(&mnt->mnt_child); in mnt_change_mountpoint()
900 hlist_del_init(&mnt->mnt_mp_list); in mnt_change_mountpoint()
901 hlist_del_init_rcu(&mnt->mnt_hash); in mnt_change_mountpoint()
903 attach_mnt(mnt, parent, mp); in mnt_change_mountpoint()
928 static void commit_tree(struct mount *mnt) in commit_tree() argument
930 struct mount *parent = mnt->mnt_parent; in commit_tree()
935 BUG_ON(parent == mnt); in commit_tree()
937 list_add_tail(&head, &mnt->mnt_list); in commit_tree()
946 __attach_mnt(mnt, parent); in commit_tree()
979 struct mount *mnt; in vfs_kern_mount() local
985 mnt = alloc_vfsmnt(name); in vfs_kern_mount()
986 if (!mnt) in vfs_kern_mount()
990 mnt->mnt.data = type->alloc_mnt_data(); in vfs_kern_mount()
991 if (!mnt->mnt.data) { in vfs_kern_mount()
992 mnt_free_id(mnt); in vfs_kern_mount()
993 free_vfsmnt(mnt); in vfs_kern_mount()
998 mnt->mnt.mnt_flags = MNT_INTERNAL; in vfs_kern_mount()
1000 root = mount_fs(type, flags, name, &mnt->mnt, data); in vfs_kern_mount()
1002 mnt_free_id(mnt); in vfs_kern_mount()
1003 free_vfsmnt(mnt); in vfs_kern_mount()
1007 mnt->mnt.mnt_root = root; in vfs_kern_mount()
1008 mnt->mnt.mnt_sb = root->d_sb; in vfs_kern_mount()
1009 mnt->mnt_mountpoint = mnt->mnt.mnt_root; in vfs_kern_mount()
1010 mnt->mnt_parent = mnt; in vfs_kern_mount()
1012 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); in vfs_kern_mount()
1014 return &mnt->mnt; in vfs_kern_mount()
1021 struct super_block *sb = old->mnt.mnt_sb; in clone_mnt()
1022 struct mount *mnt; in clone_mnt() local
1025 mnt = alloc_vfsmnt(old->mnt_devname); in clone_mnt()
1026 if (!mnt) in clone_mnt()
1030 mnt->mnt.data = sb->s_op->clone_mnt_data(old->mnt.data); in clone_mnt()
1031 if (!mnt->mnt.data) { in clone_mnt()
1038 mnt->mnt_group_id = 0; /* not a peer of original */ in clone_mnt()
1040 mnt->mnt_group_id = old->mnt_group_id; in clone_mnt()
1042 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { in clone_mnt()
1043 err = mnt_alloc_group_id(mnt); in clone_mnt()
1048 mnt->mnt.mnt_flags = old->mnt.mnt_flags; in clone_mnt()
1049 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); in clone_mnt()
1052 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; in clone_mnt()
1054 if (mnt->mnt.mnt_flags & MNT_READONLY) in clone_mnt()
1055 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; in clone_mnt()
1057 if (mnt->mnt.mnt_flags & MNT_NODEV) in clone_mnt()
1058 mnt->mnt.mnt_flags |= MNT_LOCK_NODEV; in clone_mnt()
1060 if (mnt->mnt.mnt_flags & MNT_NOSUID) in clone_mnt()
1061 mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID; in clone_mnt()
1063 if (mnt->mnt.mnt_flags & MNT_NOEXEC) in clone_mnt()
1064 mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC; in clone_mnt()
1070 mnt->mnt.mnt_flags |= MNT_LOCKED; in clone_mnt()
1073 mnt->mnt.mnt_sb = sb; in clone_mnt()
1074 mnt->mnt.mnt_root = dget(root); in clone_mnt()
1075 mnt->mnt_mountpoint = mnt->mnt.mnt_root; in clone_mnt()
1076 mnt->mnt_parent = mnt; in clone_mnt()
1078 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); in clone_mnt()
1083 list_add(&mnt->mnt_slave, &old->mnt_slave_list); in clone_mnt()
1084 mnt->mnt_master = old; in clone_mnt()
1085 CLEAR_MNT_SHARED(mnt); in clone_mnt()
1088 list_add(&mnt->mnt_share, &old->mnt_share); in clone_mnt()
1090 list_add(&mnt->mnt_slave, &old->mnt_slave); in clone_mnt()
1091 mnt->mnt_master = old->mnt_master; in clone_mnt()
1094 set_mnt_shared(mnt); in clone_mnt()
1100 list_add(&mnt->mnt_expire, &old->mnt_expire); in clone_mnt()
1103 return mnt; in clone_mnt()
1106 mnt_free_id(mnt); in clone_mnt()
1107 free_vfsmnt(mnt); in clone_mnt()
1111 static void cleanup_mnt(struct mount *mnt) in cleanup_mnt() argument
1123 WARN_ON(mnt_get_writers(mnt)); in cleanup_mnt()
1124 if (unlikely(mnt->mnt_pins.first)) in cleanup_mnt()
1125 mnt_pin_kill(mnt); in cleanup_mnt()
1126 fsnotify_vfsmount_delete(&mnt->mnt); in cleanup_mnt()
1127 dput(mnt->mnt.mnt_root); in cleanup_mnt()
1128 deactivate_super(mnt->mnt.mnt_sb); in cleanup_mnt()
1129 mnt_free_id(mnt); in cleanup_mnt()
1130 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); in cleanup_mnt()
1151 static void mntput_no_expire(struct mount *mnt) in mntput_no_expire() argument
1154 if (likely(READ_ONCE(mnt->mnt_ns))) { in mntput_no_expire()
1164 mnt_add_count(mnt, -1); in mntput_no_expire()
1174 mnt_add_count(mnt, -1); in mntput_no_expire()
1175 if (mnt_get_count(mnt)) { in mntput_no_expire()
1180 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { in mntput_no_expire()
1185 mnt->mnt.mnt_flags |= MNT_DOOMED; in mntput_no_expire()
1188 list_del(&mnt->mnt_instance); in mntput_no_expire()
1190 if (unlikely(!list_empty(&mnt->mnt_mounts))) { in mntput_no_expire()
1192 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { in mntput_no_expire()
1198 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { in mntput_no_expire()
1201 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); in mntput_no_expire()
1202 if (!task_work_add(task, &mnt->mnt_rcu, true)) in mntput_no_expire()
1205 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) in mntput_no_expire()
1209 cleanup_mnt(mnt); in mntput_no_expire()
1212 void mntput(struct vfsmount *mnt) in mntput() argument
1214 if (mnt) { in mntput()
1215 struct mount *m = real_mount(mnt); in mntput()
1224 struct vfsmount *mntget(struct vfsmount *mnt) in mntget() argument
1226 if (mnt) in mntget()
1227 mnt_add_count(real_mount(mnt), 1); in mntget()
1228 return mnt; in mntget()
1235 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); in mnt_clone_internal()
1238 p->mnt.mnt_flags |= MNT_INTERNAL; in mnt_clone_internal()
1239 return &p->mnt; in mnt_clone_internal()
1342 return p->show(m, &r->mnt); in m_show()
1363 struct mount *mnt = real_mount(m); in may_umount_tree() local
1371 for (p = mnt; p; p = next_mnt(p, mnt)) { in may_umount_tree()
1398 int may_umount(struct vfsmount *mnt) in may_umount() argument
1403 if (propagate_mount_busy(real_mount(mnt), 2)) in may_umount()
1441 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) in disconnect_mount() argument
1448 if (!mnt_has_parent(mnt)) in disconnect_mount()
1455 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) in disconnect_mount()
1463 if (IS_MNT_LOCKED(mnt)) in disconnect_mount()
1474 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) in umount_tree() argument
1480 propagate_mount_unlock(mnt); in umount_tree()
1483 for (p = mnt; p; p = next_mnt(p, mnt)) { in umount_tree()
1484 p->mnt.mnt_flags |= MNT_UMOUNT; in umount_tree()
1510 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; in umount_tree()
1514 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, in umount_tree()
1529 static void shrink_submounts(struct mount *mnt);
1531 static int do_umount(struct mount *mnt, int flags) in do_umount() argument
1533 struct super_block *sb = mnt->mnt.mnt_sb; in do_umount()
1536 retval = security_sb_umount(&mnt->mnt, flags); in do_umount()
1547 if (&mnt->mnt == current->fs->root.mnt || in do_umount()
1556 if (mnt_get_count(mnt) != 2) { in do_umount()
1562 if (!xchg(&mnt->mnt_expiry_mark, 1)) in do_umount()
1589 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { in do_umount()
1608 if (mnt->mnt.mnt_flags & MNT_LOCKED) in do_umount()
1613 if (!list_empty(&mnt->mnt_list)) in do_umount()
1614 umount_tree(mnt, UMOUNT_PROPAGATE); in do_umount()
1617 shrink_submounts(mnt); in do_umount()
1619 if (!propagate_mount_busy(mnt, 2)) { in do_umount()
1620 if (!list_empty(&mnt->mnt_list)) in do_umount()
1621 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); in do_umount()
1644 struct mount *mnt; in __detach_mounts() local
1654 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); in __detach_mounts()
1655 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { in __detach_mounts()
1656 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); in __detach_mounts()
1657 umount_mnt(mnt); in __detach_mounts()
1659 else umount_tree(mnt, UMOUNT_CONNECTED); in __detach_mounts()
1686 struct mount *mnt; in SYSCALL_DEFINE2() local
1702 mnt = real_mount(path.mnt); in SYSCALL_DEFINE2()
1704 if (path.dentry != path.mnt->mnt_root) in SYSCALL_DEFINE2()
1706 if (!check_mnt(mnt)) in SYSCALL_DEFINE2()
1708 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ in SYSCALL_DEFINE2()
1714 retval = do_umount(mnt, flags); in SYSCALL_DEFINE2()
1718 mntput_no_expire(mnt); in SYSCALL_DEFINE2()
1760 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, in copy_tree() argument
1765 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) in copy_tree()
1771 res = q = clone_mnt(mnt, dentry, flag); in copy_tree()
1775 q->mnt_mountpoint = mnt->mnt_mountpoint; in copy_tree()
1777 p = mnt; in copy_tree()
1778 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { in copy_tree()
1786 if (s->mnt.mnt_flags & MNT_LOCKED) { in copy_tree()
1796 is_mnt_ns_file(s->mnt.mnt_root)) { in copy_tree()
1806 q = clone_mnt(p, p->mnt.mnt_root, flag); in copy_tree()
1831 if (!check_mnt(real_mount(path->mnt))) in collect_mounts()
1834 tree = copy_tree(real_mount(path->mnt), path->dentry, in collect_mounts()
1839 return &tree->mnt; in collect_mounts()
1842 void drop_collected_mounts(struct vfsmount *mnt) in drop_collected_mounts() argument
1846 umount_tree(real_mount(mnt), 0); in drop_collected_mounts()
1851 static bool has_locked_children(struct mount *mnt, struct dentry *dentry) in has_locked_children() argument
1855 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { in has_locked_children()
1859 if (child->mnt.mnt_flags & MNT_LOCKED) in has_locked_children()
1876 struct mount *old_mnt = real_mount(path->mnt); in clone_private_mount()
1895 return &new_mnt->mnt; in clone_private_mount()
1906 struct mount *mnt; in iterate_mounts() local
1910 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { in iterate_mounts()
1911 res = f(&mnt->mnt, arg); in iterate_mounts()
1918 static void cleanup_group_ids(struct mount *mnt, struct mount *end) in cleanup_group_ids() argument
1922 for (p = mnt; p != end; p = next_mnt(p, mnt)) { in cleanup_group_ids()
1928 static int invent_group_ids(struct mount *mnt, bool recurse) in invent_group_ids() argument
1932 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { in invent_group_ids()
1936 cleanup_group_ids(mnt, p); in invent_group_ids()
1945 int count_mounts(struct mnt_namespace *ns, struct mount *mnt) in count_mounts() argument
1951 for (p = mnt; p; p = next_mnt(p, mnt)) in count_mounts()
2045 smp = get_mountpoint(source_mnt->mnt.mnt_root); in attach_recursive_mnt()
2081 q = __lookup_mnt(&child->mnt_parent->mnt, in attach_recursive_mnt()
2112 struct vfsmount *mnt; in lock_mount() local
2121 mnt = lookup_mnt(path); in lock_mount()
2122 if (likely(!mnt)) { in lock_mount()
2134 path->mnt = mnt; in lock_mount()
2135 dentry = path->dentry = dget(mnt->mnt_root); in lock_mount()
2151 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) in graft_tree() argument
2153 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) in graft_tree()
2157 d_is_dir(mnt->mnt.mnt_root)) in graft_tree()
2160 return attach_recursive_mnt(mnt, p, mp, NULL); in graft_tree()
2186 struct mount *mnt = real_mount(path->mnt); in do_change_type() local
2191 if (path->dentry != path->mnt->mnt_root) in do_change_type()
2200 err = invent_group_ids(mnt, recurse); in do_change_type()
2206 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) in do_change_type()
2222 struct mount *mnt = NULL, *old, *parent; in do_loopback() local
2240 old = real_mount(old_path.mnt); in do_loopback()
2241 parent = real_mount(path->mnt); in do_loopback()
2257 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE); in do_loopback()
2259 mnt = clone_mnt(old, old_path.dentry, 0); in do_loopback()
2261 if (IS_ERR(mnt)) { in do_loopback()
2262 err = PTR_ERR(mnt); in do_loopback()
2266 mnt->mnt.mnt_flags &= ~MNT_LOCKED; in do_loopback()
2268 err = graft_tree(mnt, parent, mp); in do_loopback()
2271 umount_tree(mnt, UMOUNT_SYNC); in do_loopback()
2281 static int change_mount_flags(struct vfsmount *mnt, int ms_flags) in change_mount_flags() argument
2288 if (readonly_request == __mnt_is_readonly(mnt)) in change_mount_flags()
2292 error = mnt_make_readonly(real_mount(mnt)); in change_mount_flags()
2294 __mnt_unmake_readonly(real_mount(mnt)); in change_mount_flags()
2307 struct super_block *sb = path->mnt->mnt_sb; in do_remount()
2308 struct mount *mnt = real_mount(path->mnt); in do_remount() local
2310 if (!check_mnt(mnt)) in do_remount()
2313 if (path->dentry != path->mnt->mnt_root) in do_remount()
2322 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && in do_remount()
2326 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && in do_remount()
2329 if ((mnt->mnt_ns->user_ns != &init_user_ns) && in do_remount()
2336 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && in do_remount()
2340 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && in do_remount()
2344 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && in do_remount()
2345 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { in do_remount()
2355 err = change_mount_flags(path->mnt, flags); in do_remount()
2359 err = do_remount_sb2(path->mnt, sb, flags, data, 0); in do_remount()
2362 propagate_remount(mnt); in do_remount()
2368 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; in do_remount()
2369 mnt->mnt.mnt_flags = mnt_flags; in do_remount()
2370 touch_mnt_namespace(mnt->mnt_ns); in do_remount()
2377 static inline int tree_contains_unbindable(struct mount *mnt) in tree_contains_unbindable() argument
2380 for (p = mnt; p; p = next_mnt(p, mnt)) { in tree_contains_unbindable()
2405 old = real_mount(old_path.mnt); in do_move_mount()
2406 p = real_mount(path->mnt); in do_move_mount()
2412 if (old->mnt.mnt_flags & MNT_LOCKED) in do_move_mount()
2416 if (old_path.dentry != old_path.mnt->mnt_root) in do_move_mount()
2441 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path); in do_move_mount()
2457 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) in fs_set_subtype() argument
2469 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); in fs_set_subtype()
2471 if (!mnt->mnt_sb->s_subtype) in fs_set_subtype()
2473 return mnt; in fs_set_subtype()
2476 mntput(mnt); in fs_set_subtype()
2495 parent = real_mount(path->mnt); in do_add_mount()
2508 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && in do_add_mount()
2509 path->mnt->mnt_root == path->dentry) in do_add_mount()
2513 if (d_is_symlink(newmnt->mnt.mnt_root)) in do_add_mount()
2516 newmnt->mnt.mnt_flags = mnt_flags; in do_add_mount()
2535 struct vfsmount *mnt; in do_new_mount() local
2565 mnt = vfs_kern_mount(type, flags, name, data); in do_new_mount()
2566 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && in do_new_mount()
2567 !mnt->mnt_sb->s_subtype) in do_new_mount()
2568 mnt = fs_set_subtype(mnt, fstype); in do_new_mount()
2571 if (IS_ERR(mnt)) in do_new_mount()
2572 return PTR_ERR(mnt); in do_new_mount()
2574 err = do_add_mount(real_mount(mnt), path, mnt_flags); in do_new_mount()
2576 mntput(mnt); in do_new_mount()
2582 struct mount *mnt = real_mount(m); in finish_automount() local
2587 BUG_ON(mnt_get_count(mnt) < 2); in finish_automount()
2589 if (m->mnt_sb == path->mnt->mnt_sb && in finish_automount()
2595 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); in finish_automount()
2600 if (!list_empty(&mnt->mnt_expire)) { in finish_automount()
2602 list_del_init(&mnt->mnt_expire); in finish_automount()
2615 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) in mnt_set_expiry() argument
2619 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); in mnt_set_expiry()
2632 struct mount *mnt, *next; in mark_mounts_for_expiry() local
2647 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { in mark_mounts_for_expiry()
2648 if (!xchg(&mnt->mnt_expiry_mark, 1) || in mark_mounts_for_expiry()
2649 propagate_mount_busy(mnt, 1)) in mark_mounts_for_expiry()
2651 list_move(&mnt->mnt_expire, &graveyard); in mark_mounts_for_expiry()
2654 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); in mark_mounts_for_expiry()
2655 touch_mnt_namespace(mnt->mnt_ns); in mark_mounts_for_expiry()
2656 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); in mark_mounts_for_expiry()
2681 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); in select_submounts() local
2684 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) in select_submounts()
2689 if (!list_empty(&mnt->mnt_mounts)) { in select_submounts()
2690 this_parent = mnt; in select_submounts()
2694 if (!propagate_mount_busy(mnt, 1)) { in select_submounts()
2695 list_move_tail(&mnt->mnt_expire, graveyard); in select_submounts()
2716 static void shrink_submounts(struct mount *mnt) in shrink_submounts() argument
2722 while (select_submounts(mnt, &graveyard)) { in shrink_submounts()
2864 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; in do_mount()
2958 new = copy_tree(old, old->mnt.mnt_root, copy_flags); in copy_mnt_ns()
2978 if (&p->mnt == new_fs->root.mnt) { in copy_mnt_ns()
2979 new_fs->root.mnt = mntget(&q->mnt); in copy_mnt_ns()
2980 rootmnt = &p->mnt; in copy_mnt_ns()
2982 if (&p->mnt == new_fs->pwd.mnt) { in copy_mnt_ns()
2983 new_fs->pwd.mnt = mntget(&q->mnt); in copy_mnt_ns()
2984 pwdmnt = &p->mnt; in copy_mnt_ns()
2991 while (p->mnt.mnt_root != q->mnt.mnt_root) in copy_mnt_ns()
3012 struct mount *mnt = real_mount(m); in create_mnt_ns() local
3013 mnt->mnt_ns = new_ns; in create_mnt_ns()
3014 new_ns->root = mnt; in create_mnt_ns()
3016 list_add(&mnt->mnt_list, &new_ns->list); in create_mnt_ns()
3023 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) in mount_subtree() argument
3030 ns = create_mnt_ns(mnt); in mount_subtree()
3034 err = vfs_path_lookup(mnt->mnt_root, mnt, in mount_subtree()
3043 s = path.mnt->mnt_sb; in mount_subtree()
3045 mntput(path.mnt); in mount_subtree()
3092 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, in is_path_reachable() argument
3095 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { in is_path_reachable()
3096 dentry = mnt->mnt_mountpoint; in is_path_reachable()
3097 mnt = mnt->mnt_parent; in is_path_reachable()
3099 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); in is_path_reachable()
3106 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); in path_is_under()
3167 new_mnt = real_mount(new.mnt); in SYSCALL_DEFINE2()
3168 root_mnt = real_mount(root.mnt); in SYSCALL_DEFINE2()
3169 old_mnt = real_mount(old.mnt); in SYSCALL_DEFINE2()
3176 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) in SYSCALL_DEFINE2()
3185 if (root.mnt->mnt_root != root.dentry) in SYSCALL_DEFINE2()
3190 if (new.mnt->mnt_root != new.dentry) in SYSCALL_DEFINE2()
3204 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { in SYSCALL_DEFINE2()
3205 new_mnt->mnt.mnt_flags |= MNT_LOCKED; in SYSCALL_DEFINE2()
3206 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; in SYSCALL_DEFINE2()
3211 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp); in SYSCALL_DEFINE2()
3237 struct vfsmount *mnt; in init_mount_tree() local
3245 mnt = vfs_kern_mount(type, 0, "rootfs", NULL); in init_mount_tree()
3247 if (IS_ERR(mnt)) in init_mount_tree()
3250 ns = create_mnt_ns(mnt); in init_mount_tree()
3257 root.mnt = mnt; in init_mount_tree()
3258 root.dentry = mnt->mnt_root; in init_mount_tree()
3259 mnt->mnt_flags |= MNT_LOCKED; in init_mount_tree()
3309 drop_collected_mounts(&ns->root->mnt); in put_mnt_ns()
3315 struct vfsmount *mnt; in kern_mount_data() local
3316 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); in kern_mount_data()
3317 if (!IS_ERR(mnt)) { in kern_mount_data()
3322 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; in kern_mount_data()
3324 return mnt; in kern_mount_data()
3328 void kern_unmount(struct vfsmount *mnt) in kern_unmount() argument
3331 if (!IS_ERR_OR_NULL(mnt)) { in kern_unmount()
3332 real_mount(mnt)->mnt_ns = NULL; in kern_unmount()
3334 mntput(mnt); in kern_unmount()
3339 bool our_mnt(struct vfsmount *mnt) in our_mnt() argument
3341 return check_mnt(real_mount(mnt)); in our_mnt()
3352 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; in current_chrooted()
3353 ns_root.dentry = ns_root.mnt->mnt_root; in current_chrooted()
3372 struct mount *mnt; in fs_fully_visible() local
3379 list_for_each_entry(mnt, &ns->list, mnt_list) { in fs_fully_visible()
3383 if (mnt->mnt.mnt_sb->s_type != type) in fs_fully_visible()
3389 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) in fs_fully_visible()
3395 mnt_flags = mnt->mnt.mnt_flags; in fs_fully_visible()
3396 if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC) in fs_fully_visible()
3400 if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY) in fs_fully_visible()
3426 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { in fs_fully_visible()
3429 if (!(child->mnt.mnt_flags & MNT_LOCKED)) in fs_fully_visible()
3490 root.mnt = &mnt_ns->root->mnt; in mntns_install()
3491 root.dentry = mnt_ns->root->mnt.mnt_root; in mntns_install()