• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4  */
5 
6 #include <linux/dcache.h>
7 #include <linux/fs.h>
8 #include <linux/gfp.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/mount.h>
12 #include <linux/srcu.h>
13 
14 #include <linux/fsnotify_backend.h>
15 #include "fsnotify.h"
16 
17 /*
18  * Clear all of the marks on an inode when it is being evicted from core
19  */
__fsnotify_inode_delete(struct inode * inode)20 void __fsnotify_inode_delete(struct inode *inode)
21 {
22 	fsnotify_clear_marks_by_inode(inode);
23 }
24 EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
25 
__fsnotify_vfsmount_delete(struct vfsmount * mnt)26 void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
27 {
28 	fsnotify_clear_marks_by_mount(mnt);
29 }
30 
31 /**
32  * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
33  * @sb: superblock being unmounted.
34  *
35  * Called during unmount with no locks held, so needs to be safe against
36  * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block.
37  */
fsnotify_unmount_inodes(struct super_block * sb)38 static void fsnotify_unmount_inodes(struct super_block *sb)
39 {
40 	struct inode *inode, *iput_inode = NULL;
41 
42 	spin_lock(&sb->s_inode_list_lock);
43 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
44 		/*
45 		 * We cannot __iget() an inode in state I_FREEING,
46 		 * I_WILL_FREE, or I_NEW which is fine because by that point
47 		 * the inode cannot have any associated watches.
48 		 */
49 		spin_lock(&inode->i_lock);
50 		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
51 			spin_unlock(&inode->i_lock);
52 			continue;
53 		}
54 
55 		/*
56 		 * If i_count is zero, the inode cannot have any watches and
57 		 * doing an __iget/iput with SB_ACTIVE clear would actually
58 		 * evict all inodes with zero i_count from icache which is
59 		 * unnecessarily violent and may in fact be illegal to do.
60 		 * However, we should have been called /after/ evict_inodes
61 		 * removed all zero refcount inodes, in any case.  Test to
62 		 * be sure.
63 		 */
64 		if (!atomic_read(&inode->i_count)) {
65 			spin_unlock(&inode->i_lock);
66 			continue;
67 		}
68 
69 		__iget(inode);
70 		spin_unlock(&inode->i_lock);
71 		spin_unlock(&sb->s_inode_list_lock);
72 
73 		if (iput_inode)
74 			iput(iput_inode);
75 
76 		/* for each watch, send FS_UNMOUNT and then remove it */
77 		fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
78 
79 		fsnotify_inode_delete(inode);
80 
81 		iput_inode = inode;
82 
83 		cond_resched();
84 		spin_lock(&sb->s_inode_list_lock);
85 	}
86 	spin_unlock(&sb->s_inode_list_lock);
87 
88 	if (iput_inode)
89 		iput(iput_inode);
90 	/* Wait for outstanding inode references from connectors */
91 	wait_var_event(&sb->s_fsnotify_inode_refs,
92 		       !atomic_long_read(&sb->s_fsnotify_inode_refs));
93 }
94 
fsnotify_sb_delete(struct super_block * sb)95 void fsnotify_sb_delete(struct super_block *sb)
96 {
97 	fsnotify_unmount_inodes(sb);
98 	fsnotify_clear_marks_by_sb(sb);
99 }
100 
101 /*
102  * Given an inode, first check if we care what happens to our children.  Inotify
103  * and dnotify both tell their parents about events.  If we care about any event
104  * on a child we run all of our children and set a dentry flag saying that the
105  * parent cares.  Thus when an event happens on a child it can quickly tell if
106  * if there is a need to find a parent and send the event to the parent.
107  */
__fsnotify_update_child_dentry_flags(struct inode * inode)108 void __fsnotify_update_child_dentry_flags(struct inode *inode)
109 {
110 	struct dentry *alias;
111 	int watched;
112 
113 	if (!S_ISDIR(inode->i_mode))
114 		return;
115 
116 	/* determine if the children should tell inode about their events */
117 	watched = fsnotify_inode_watches_children(inode);
118 
119 	spin_lock(&inode->i_lock);
120 	/* run all of the dentries associated with this inode.  Since this is a
121 	 * directory, there damn well better only be one item on this list */
122 	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
123 		struct dentry *child;
124 
125 		/* run all of the children of the original inode and fix their
126 		 * d_flags to indicate parental interest (their parent is the
127 		 * original inode) */
128 		spin_lock(&alias->d_lock);
129 		list_for_each_entry(child, &alias->d_subdirs, d_child) {
130 			if (!child->d_inode)
131 				continue;
132 
133 			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
134 			if (watched)
135 				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
136 			else
137 				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
138 			spin_unlock(&child->d_lock);
139 		}
140 		spin_unlock(&alias->d_lock);
141 	}
142 	spin_unlock(&inode->i_lock);
143 }
144 
145 /* Notify this dentry's parent about a child's events. */
__fsnotify_parent(const struct path * path,struct dentry * dentry,__u32 mask)146 int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
147 {
148 	struct dentry *parent;
149 	struct inode *p_inode;
150 	int ret = 0;
151 
152 	if (!dentry)
153 		dentry = path->dentry;
154 
155 	if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
156 		return 0;
157 
158 	parent = dget_parent(dentry);
159 	p_inode = parent->d_inode;
160 
161 	if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
162 		__fsnotify_update_child_dentry_flags(p_inode);
163 	} else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) {
164 		struct name_snapshot name;
165 
166 		/* we are notifying a parent so come up with the new mask which
167 		 * specifies these are events which came from a child. */
168 		mask |= FS_EVENT_ON_CHILD;
169 
170 		take_dentry_name_snapshot(&name, dentry);
171 		if (path)
172 			ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
173 				       &name.name, 0);
174 		else
175 			ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
176 				       &name.name, 0);
177 		release_dentry_name_snapshot(&name);
178 	}
179 
180 	dput(parent);
181 
182 	return ret;
183 }
184 EXPORT_SYMBOL_GPL(__fsnotify_parent);
185 
send_to_group(struct inode * to_tell,__u32 mask,const void * data,int data_is,u32 cookie,const struct qstr * file_name,struct fsnotify_iter_info * iter_info)186 static int send_to_group(struct inode *to_tell,
187 			 __u32 mask, const void *data,
188 			 int data_is, u32 cookie,
189 			 const struct qstr *file_name,
190 			 struct fsnotify_iter_info *iter_info)
191 {
192 	struct fsnotify_group *group = NULL;
193 	__u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
194 	__u32 marks_mask = 0;
195 	__u32 marks_ignored_mask = 0;
196 	struct fsnotify_mark *mark;
197 	int type;
198 
199 	if (WARN_ON(!iter_info->report_mask))
200 		return 0;
201 
202 	/* clear ignored on inode modification */
203 	if (mask & FS_MODIFY) {
204 		fsnotify_foreach_obj_type(type) {
205 			if (!fsnotify_iter_should_report_type(iter_info, type))
206 				continue;
207 			mark = iter_info->marks[type];
208 			if (mark &&
209 			    !(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
210 				mark->ignored_mask = 0;
211 		}
212 	}
213 
214 	fsnotify_foreach_obj_type(type) {
215 		if (!fsnotify_iter_should_report_type(iter_info, type))
216 			continue;
217 		mark = iter_info->marks[type];
218 		/* does the object mark tell us to do something? */
219 		if (mark) {
220 			group = mark->group;
221 			marks_mask |= mark->mask;
222 			marks_ignored_mask |= mark->ignored_mask;
223 		}
224 	}
225 
226 	pr_debug("%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x"
227 		 " data=%p data_is=%d cookie=%d\n",
228 		 __func__, group, to_tell, mask, marks_mask, marks_ignored_mask,
229 		 data, data_is, cookie);
230 
231 	if (!(test_mask & marks_mask & ~marks_ignored_mask))
232 		return 0;
233 
234 	return group->ops->handle_event(group, to_tell, mask, data, data_is,
235 					file_name, cookie, iter_info);
236 }
237 
fsnotify_first_mark(struct fsnotify_mark_connector ** connp)238 static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp)
239 {
240 	struct fsnotify_mark_connector *conn;
241 	struct hlist_node *node = NULL;
242 
243 	conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
244 	if (conn)
245 		node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu);
246 
247 	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
248 }
249 
fsnotify_next_mark(struct fsnotify_mark * mark)250 static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
251 {
252 	struct hlist_node *node = NULL;
253 
254 	if (mark)
255 		node = srcu_dereference(mark->obj_list.next,
256 					&fsnotify_mark_srcu);
257 
258 	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
259 }
260 
261 /*
262  * iter_info is a multi head priority queue of marks.
263  * Pick a subset of marks from queue heads, all with the
264  * same group and set the report_mask for selected subset.
265  * Returns the report_mask of the selected subset.
266  */
fsnotify_iter_select_report_types(struct fsnotify_iter_info * iter_info)267 static unsigned int fsnotify_iter_select_report_types(
268 		struct fsnotify_iter_info *iter_info)
269 {
270 	struct fsnotify_group *max_prio_group = NULL;
271 	struct fsnotify_mark *mark;
272 	int type;
273 
274 	/* Choose max prio group among groups of all queue heads */
275 	fsnotify_foreach_obj_type(type) {
276 		mark = iter_info->marks[type];
277 		if (mark &&
278 		    fsnotify_compare_groups(max_prio_group, mark->group) > 0)
279 			max_prio_group = mark->group;
280 	}
281 
282 	if (!max_prio_group)
283 		return 0;
284 
285 	/* Set the report mask for marks from same group as max prio group */
286 	iter_info->report_mask = 0;
287 	fsnotify_foreach_obj_type(type) {
288 		mark = iter_info->marks[type];
289 		if (mark &&
290 		    fsnotify_compare_groups(max_prio_group, mark->group) == 0)
291 			fsnotify_iter_set_report_type(iter_info, type);
292 	}
293 
294 	return iter_info->report_mask;
295 }
296 
297 /*
298  * Pop from iter_info multi head queue, the marks that were iterated in the
299  * current iteration step.
300  */
fsnotify_iter_next(struct fsnotify_iter_info * iter_info)301 static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info)
302 {
303 	int type;
304 
305 	fsnotify_foreach_obj_type(type) {
306 		if (fsnotify_iter_should_report_type(iter_info, type))
307 			iter_info->marks[type] =
308 				fsnotify_next_mark(iter_info->marks[type]);
309 	}
310 }
311 
312 /*
313  * This is the main call to fsnotify.  The VFS calls into hook specific functions
314  * in linux/fsnotify.h.  Those functions then in turn call here.  Here will call
315  * out to all of the registered fsnotify_group.  Those groups can then use the
316  * notification event in whatever means they feel necessary.
317  */
fsnotify(struct inode * to_tell,__u32 mask,const void * data,int data_is,const struct qstr * file_name,u32 cookie)318 int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
319 	     const struct qstr *file_name, u32 cookie)
320 {
321 	struct fsnotify_iter_info iter_info = {};
322 	struct super_block *sb = to_tell->i_sb;
323 	struct mount *mnt = NULL;
324 	__u32 mnt_or_sb_mask = sb->s_fsnotify_mask;
325 	int ret = 0;
326 	__u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
327 
328 	if (data_is == FSNOTIFY_EVENT_PATH) {
329 		mnt = real_mount(((const struct path *)data)->mnt);
330 		mnt_or_sb_mask |= mnt->mnt_fsnotify_mask;
331 	}
332 	/* An event "on child" is not intended for a mount/sb mark */
333 	if (mask & FS_EVENT_ON_CHILD)
334 		mnt_or_sb_mask = 0;
335 
336 	/*
337 	 * Optimization: srcu_read_lock() has a memory barrier which can
338 	 * be expensive.  It protects walking the *_fsnotify_marks lists.
339 	 * However, if we do not walk the lists, we do not have to do
340 	 * SRCU because we have no references to any objects and do not
341 	 * need SRCU to keep them "alive".
342 	 */
343 	if (!to_tell->i_fsnotify_marks && !sb->s_fsnotify_marks &&
344 	    (!mnt || !mnt->mnt_fsnotify_marks))
345 		return 0;
346 	/*
347 	 * if this is a modify event we may need to clear the ignored masks
348 	 * otherwise return if neither the inode nor the vfsmount/sb care about
349 	 * this type of event.
350 	 */
351 	if (!(mask & FS_MODIFY) &&
352 	    !(test_mask & (to_tell->i_fsnotify_mask | mnt_or_sb_mask)))
353 		return 0;
354 
355 	iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
356 
357 	iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
358 		fsnotify_first_mark(&to_tell->i_fsnotify_marks);
359 	iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] =
360 		fsnotify_first_mark(&sb->s_fsnotify_marks);
361 	if (mnt) {
362 		iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
363 			fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
364 	}
365 
366 	/*
367 	 * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark
368 	 * ignore masks are properly reflected for mount/sb mark notifications.
369 	 * That's why this traversal is so complicated...
370 	 */
371 	while (fsnotify_iter_select_report_types(&iter_info)) {
372 		ret = send_to_group(to_tell, mask, data, data_is, cookie,
373 				    file_name, &iter_info);
374 
375 		if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
376 			goto out;
377 
378 		fsnotify_iter_next(&iter_info);
379 	}
380 	ret = 0;
381 out:
382 	srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx);
383 
384 	return ret;
385 }
386 EXPORT_SYMBOL_GPL(fsnotify);
387 
388 extern struct kmem_cache *fsnotify_mark_connector_cachep;
389 
fsnotify_init(void)390 static __init int fsnotify_init(void)
391 {
392 	int ret;
393 
394 	BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 25);
395 
396 	ret = init_srcu_struct(&fsnotify_mark_srcu);
397 	if (ret)
398 		panic("initializing fsnotify_mark_srcu");
399 
400 	fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector,
401 						    SLAB_PANIC);
402 
403 	return 0;
404 }
405 core_initcall(fsnotify_init);
406