1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * fs/inotify_user.c - inotify support for userspace
4 *
5 * Authors:
6 * John McCutchan <ttb@tentacle.dhs.org>
7 * Robert Love <rml@novell.com>
8 *
9 * Copyright (C) 2005 John McCutchan
10 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 *
12 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 */
15
16 #include <linux/file.h>
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* fs_initcall */
21 #include <linux/inotify.h>
22 #include <linux/kernel.h> /* roundup() */
23 #include <linux/namei.h> /* LOOKUP_FOLLOW */
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h> /* struct kmem_cache */
26 #include <linux/syscalls.h>
27 #include <linux/types.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/uaccess.h>
30 #include <linux/poll.h>
31 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 #include <linux/security.h>
34
35 #include "inotify.h"
36 #include "../fdinfo.h"
37
38 #include <asm/ioctls.h>
39
40 /* configurable via /proc/sys/fs/inotify/ */
41 static int inotify_max_queued_events __read_mostly;
42
43 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
44
45 #ifdef CONFIG_SYSCTL
46
47 #include <linux/sysctl.h>
48
49 struct ctl_table inotify_table[] = {
50 {
51 .procname = "max_user_instances",
52 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
53 .maxlen = sizeof(int),
54 .mode = 0644,
55 .proc_handler = proc_dointvec_minmax,
56 .extra1 = SYSCTL_ZERO,
57 },
58 {
59 .procname = "max_user_watches",
60 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
61 .maxlen = sizeof(int),
62 .mode = 0644,
63 .proc_handler = proc_dointvec_minmax,
64 .extra1 = SYSCTL_ZERO,
65 },
66 {
67 .procname = "max_queued_events",
68 .data = &inotify_max_queued_events,
69 .maxlen = sizeof(int),
70 .mode = 0644,
71 .proc_handler = proc_dointvec_minmax,
72 .extra1 = SYSCTL_ZERO
73 },
74 { }
75 };
76 #endif /* CONFIG_SYSCTL */
77
inotify_arg_to_mask(struct inode * inode,u32 arg)78 static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg)
79 {
80 __u32 mask;
81
82 /*
83 * Everything should accept their own ignored and should receive events
84 * when the inode is unmounted. All directories care about children.
85 */
86 mask = (FS_IN_IGNORED | FS_UNMOUNT);
87 if (S_ISDIR(inode->i_mode))
88 mask |= FS_EVENT_ON_CHILD;
89
90 /* mask off the flags used to open the fd */
91 mask |= (arg & INOTIFY_USER_MASK);
92
93 return mask;
94 }
95
inotify_mask_to_arg(__u32 mask)96 static inline u32 inotify_mask_to_arg(__u32 mask)
97 {
98 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
99 IN_Q_OVERFLOW);
100 }
101
102 /* intofiy userspace file descriptor functions */
inotify_poll(struct file * file,poll_table * wait)103 static __poll_t inotify_poll(struct file *file, poll_table *wait)
104 {
105 struct fsnotify_group *group = file->private_data;
106 __poll_t ret = 0;
107
108 poll_wait(file, &group->notification_waitq, wait);
109 spin_lock(&group->notification_lock);
110 if (!fsnotify_notify_queue_is_empty(group))
111 ret = EPOLLIN | EPOLLRDNORM;
112 spin_unlock(&group->notification_lock);
113
114 return ret;
115 }
116
round_event_name_len(struct fsnotify_event * fsn_event)117 static int round_event_name_len(struct fsnotify_event *fsn_event)
118 {
119 struct inotify_event_info *event;
120
121 event = INOTIFY_E(fsn_event);
122 if (!event->name_len)
123 return 0;
124 return roundup(event->name_len + 1, sizeof(struct inotify_event));
125 }
126
127 /*
128 * Get an inotify_kernel_event if one exists and is small
129 * enough to fit in "count". Return an error pointer if
130 * not large enough.
131 *
132 * Called with the group->notification_lock held.
133 */
get_one_event(struct fsnotify_group * group,size_t count)134 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
135 size_t count)
136 {
137 size_t event_size = sizeof(struct inotify_event);
138 struct fsnotify_event *event;
139
140 if (fsnotify_notify_queue_is_empty(group))
141 return NULL;
142
143 event = fsnotify_peek_first_event(group);
144
145 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
146
147 event_size += round_event_name_len(event);
148 if (event_size > count)
149 return ERR_PTR(-EINVAL);
150
151 /* held the notification_lock the whole time, so this is the
152 * same event we peeked above */
153 fsnotify_remove_first_event(group);
154
155 return event;
156 }
157
158 /*
159 * Copy an event to user space, returning how much we copied.
160 *
161 * We already checked that the event size is smaller than the
162 * buffer we had in "get_one_event()" above.
163 */
copy_event_to_user(struct fsnotify_group * group,struct fsnotify_event * fsn_event,char __user * buf)164 static ssize_t copy_event_to_user(struct fsnotify_group *group,
165 struct fsnotify_event *fsn_event,
166 char __user *buf)
167 {
168 struct inotify_event inotify_event;
169 struct inotify_event_info *event;
170 size_t event_size = sizeof(struct inotify_event);
171 size_t name_len;
172 size_t pad_name_len;
173
174 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
175
176 event = INOTIFY_E(fsn_event);
177 name_len = event->name_len;
178 /*
179 * round up name length so it is a multiple of event_size
180 * plus an extra byte for the terminating '\0'.
181 */
182 pad_name_len = round_event_name_len(fsn_event);
183 inotify_event.len = pad_name_len;
184 inotify_event.mask = inotify_mask_to_arg(event->mask);
185 inotify_event.wd = event->wd;
186 inotify_event.cookie = event->sync_cookie;
187
188 /* send the main event */
189 if (copy_to_user(buf, &inotify_event, event_size))
190 return -EFAULT;
191
192 buf += event_size;
193
194 /*
195 * fsnotify only stores the pathname, so here we have to send the pathname
196 * and then pad that pathname out to a multiple of sizeof(inotify_event)
197 * with zeros.
198 */
199 if (pad_name_len) {
200 /* copy the path name */
201 if (copy_to_user(buf, event->name, name_len))
202 return -EFAULT;
203 buf += name_len;
204
205 /* fill userspace with 0's */
206 if (clear_user(buf, pad_name_len - name_len))
207 return -EFAULT;
208 event_size += pad_name_len;
209 }
210
211 return event_size;
212 }
213
inotify_read(struct file * file,char __user * buf,size_t count,loff_t * pos)214 static ssize_t inotify_read(struct file *file, char __user *buf,
215 size_t count, loff_t *pos)
216 {
217 struct fsnotify_group *group;
218 struct fsnotify_event *kevent;
219 char __user *start;
220 int ret;
221 DEFINE_WAIT_FUNC(wait, woken_wake_function);
222
223 start = buf;
224 group = file->private_data;
225
226 add_wait_queue(&group->notification_waitq, &wait);
227 while (1) {
228 spin_lock(&group->notification_lock);
229 kevent = get_one_event(group, count);
230 spin_unlock(&group->notification_lock);
231
232 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
233
234 if (kevent) {
235 ret = PTR_ERR(kevent);
236 if (IS_ERR(kevent))
237 break;
238 ret = copy_event_to_user(group, kevent, buf);
239 fsnotify_destroy_event(group, kevent);
240 if (ret < 0)
241 break;
242 buf += ret;
243 count -= ret;
244 continue;
245 }
246
247 ret = -EAGAIN;
248 if (file->f_flags & O_NONBLOCK)
249 break;
250 ret = -ERESTARTSYS;
251 if (signal_pending(current))
252 break;
253
254 if (start != buf)
255 break;
256
257 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
258 }
259 remove_wait_queue(&group->notification_waitq, &wait);
260
261 if (start != buf && ret != -EFAULT)
262 ret = buf - start;
263 return ret;
264 }
265
inotify_release(struct inode * ignored,struct file * file)266 static int inotify_release(struct inode *ignored, struct file *file)
267 {
268 struct fsnotify_group *group = file->private_data;
269
270 pr_debug("%s: group=%p\n", __func__, group);
271
272 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
273 fsnotify_destroy_group(group);
274
275 return 0;
276 }
277
inotify_ioctl(struct file * file,unsigned int cmd,unsigned long arg)278 static long inotify_ioctl(struct file *file, unsigned int cmd,
279 unsigned long arg)
280 {
281 struct fsnotify_group *group;
282 struct fsnotify_event *fsn_event;
283 void __user *p;
284 int ret = -ENOTTY;
285 size_t send_len = 0;
286
287 group = file->private_data;
288 p = (void __user *) arg;
289
290 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
291
292 switch (cmd) {
293 case FIONREAD:
294 spin_lock(&group->notification_lock);
295 list_for_each_entry(fsn_event, &group->notification_list,
296 list) {
297 send_len += sizeof(struct inotify_event);
298 send_len += round_event_name_len(fsn_event);
299 }
300 spin_unlock(&group->notification_lock);
301 ret = put_user(send_len, (int __user *) p);
302 break;
303 #ifdef CONFIG_CHECKPOINT_RESTORE
304 case INOTIFY_IOC_SETNEXTWD:
305 ret = -EINVAL;
306 if (arg >= 1 && arg <= INT_MAX) {
307 struct inotify_group_private_data *data;
308
309 data = &group->inotify_data;
310 spin_lock(&data->idr_lock);
311 idr_set_cursor(&data->idr, (unsigned int)arg);
312 spin_unlock(&data->idr_lock);
313 ret = 0;
314 }
315 break;
316 #endif /* CONFIG_CHECKPOINT_RESTORE */
317 }
318
319 return ret;
320 }
321
322 static const struct file_operations inotify_fops = {
323 .show_fdinfo = inotify_show_fdinfo,
324 .poll = inotify_poll,
325 .read = inotify_read,
326 .fasync = fsnotify_fasync,
327 .release = inotify_release,
328 .unlocked_ioctl = inotify_ioctl,
329 .compat_ioctl = inotify_ioctl,
330 .llseek = noop_llseek,
331 };
332
333
334 /*
335 * find_inode - resolve a user-given path to a specific inode
336 */
inotify_find_inode(const char __user * dirname,struct path * path,unsigned int flags,__u64 mask)337 static int inotify_find_inode(const char __user *dirname, struct path *path,
338 unsigned int flags, __u64 mask)
339 {
340 int error;
341
342 error = user_path_at(AT_FDCWD, dirname, flags, path);
343 if (error)
344 return error;
345 /* you can only watch an inode if you have read permissions on it */
346 error = inode_permission(path->dentry->d_inode, MAY_READ);
347 if (error) {
348 path_put(path);
349 return error;
350 }
351 error = security_path_notify(path, mask,
352 FSNOTIFY_OBJ_TYPE_INODE);
353 if (error)
354 path_put(path);
355
356 return error;
357 }
358
inotify_add_to_idr(struct idr * idr,spinlock_t * idr_lock,struct inotify_inode_mark * i_mark)359 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
360 struct inotify_inode_mark *i_mark)
361 {
362 int ret;
363
364 idr_preload(GFP_KERNEL);
365 spin_lock(idr_lock);
366
367 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
368 if (ret >= 0) {
369 /* we added the mark to the idr, take a reference */
370 i_mark->wd = ret;
371 fsnotify_get_mark(&i_mark->fsn_mark);
372 }
373
374 spin_unlock(idr_lock);
375 idr_preload_end();
376 return ret < 0 ? ret : 0;
377 }
378
inotify_idr_find_locked(struct fsnotify_group * group,int wd)379 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
380 int wd)
381 {
382 struct idr *idr = &group->inotify_data.idr;
383 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
384 struct inotify_inode_mark *i_mark;
385
386 assert_spin_locked(idr_lock);
387
388 i_mark = idr_find(idr, wd);
389 if (i_mark) {
390 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
391
392 fsnotify_get_mark(fsn_mark);
393 /* One ref for being in the idr, one ref we just took */
394 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
395 }
396
397 return i_mark;
398 }
399
inotify_idr_find(struct fsnotify_group * group,int wd)400 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
401 int wd)
402 {
403 struct inotify_inode_mark *i_mark;
404 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
405
406 spin_lock(idr_lock);
407 i_mark = inotify_idr_find_locked(group, wd);
408 spin_unlock(idr_lock);
409
410 return i_mark;
411 }
412
413 /*
414 * Remove the mark from the idr (if present) and drop the reference
415 * on the mark because it was in the idr.
416 */
inotify_remove_from_idr(struct fsnotify_group * group,struct inotify_inode_mark * i_mark)417 static void inotify_remove_from_idr(struct fsnotify_group *group,
418 struct inotify_inode_mark *i_mark)
419 {
420 struct idr *idr = &group->inotify_data.idr;
421 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
422 struct inotify_inode_mark *found_i_mark = NULL;
423 int wd;
424
425 spin_lock(idr_lock);
426 wd = i_mark->wd;
427
428 /*
429 * does this i_mark think it is in the idr? we shouldn't get called
430 * if it wasn't....
431 */
432 if (wd == -1) {
433 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
434 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
435 goto out;
436 }
437
438 /* Lets look in the idr to see if we find it */
439 found_i_mark = inotify_idr_find_locked(group, wd);
440 if (unlikely(!found_i_mark)) {
441 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
442 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
443 goto out;
444 }
445
446 /*
447 * We found an mark in the idr at the right wd, but it's
448 * not the mark we were told to remove. eparis seriously
449 * fucked up somewhere.
450 */
451 if (unlikely(found_i_mark != i_mark)) {
452 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
453 "found_i_mark=%p found_i_mark->wd=%d "
454 "found_i_mark->group=%p\n", __func__, i_mark,
455 i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
456 found_i_mark->wd, found_i_mark->fsn_mark.group);
457 goto out;
458 }
459
460 /*
461 * One ref for being in the idr
462 * one ref grabbed by inotify_idr_find
463 */
464 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
465 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
466 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
467 /* we can't really recover with bad ref cnting.. */
468 BUG();
469 }
470
471 idr_remove(idr, wd);
472 /* Removed from the idr, drop that ref. */
473 fsnotify_put_mark(&i_mark->fsn_mark);
474 out:
475 i_mark->wd = -1;
476 spin_unlock(idr_lock);
477 /* match the ref taken by inotify_idr_find_locked() */
478 if (found_i_mark)
479 fsnotify_put_mark(&found_i_mark->fsn_mark);
480 }
481
482 /*
483 * Send IN_IGNORED for this wd, remove this wd from the idr.
484 */
inotify_ignored_and_remove_idr(struct fsnotify_mark * fsn_mark,struct fsnotify_group * group)485 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
486 struct fsnotify_group *group)
487 {
488 struct inotify_inode_mark *i_mark;
489
490 /* Queue ignore event for the watch */
491 inotify_handle_inode_event(fsn_mark, FS_IN_IGNORED, NULL, NULL, NULL,
492 0);
493
494 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
495 /* remove this mark from the idr */
496 inotify_remove_from_idr(group, i_mark);
497
498 dec_inotify_watches(group->inotify_data.ucounts);
499 }
500
inotify_update_existing_watch(struct fsnotify_group * group,struct inode * inode,u32 arg)501 static int inotify_update_existing_watch(struct fsnotify_group *group,
502 struct inode *inode,
503 u32 arg)
504 {
505 struct fsnotify_mark *fsn_mark;
506 struct inotify_inode_mark *i_mark;
507 __u32 old_mask, new_mask;
508 __u32 mask;
509 int add = (arg & IN_MASK_ADD);
510 int create = (arg & IN_MASK_CREATE);
511 int ret;
512
513 mask = inotify_arg_to_mask(inode, arg);
514
515 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
516 if (!fsn_mark)
517 return -ENOENT;
518 else if (create) {
519 ret = -EEXIST;
520 goto out;
521 }
522
523 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
524
525 spin_lock(&fsn_mark->lock);
526 old_mask = fsn_mark->mask;
527 if (add)
528 fsn_mark->mask |= mask;
529 else
530 fsn_mark->mask = mask;
531 new_mask = fsn_mark->mask;
532 spin_unlock(&fsn_mark->lock);
533
534 if (old_mask != new_mask) {
535 /* more bits in old than in new? */
536 int dropped = (old_mask & ~new_mask);
537 /* more bits in this fsn_mark than the inode's mask? */
538 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
539
540 /* update the inode with this new fsn_mark */
541 if (dropped || do_inode)
542 fsnotify_recalc_mask(inode->i_fsnotify_marks);
543
544 }
545
546 /* return the wd */
547 ret = i_mark->wd;
548
549 out:
550 /* match the get from fsnotify_find_mark() */
551 fsnotify_put_mark(fsn_mark);
552
553 return ret;
554 }
555
inotify_new_watch(struct fsnotify_group * group,struct inode * inode,u32 arg)556 static int inotify_new_watch(struct fsnotify_group *group,
557 struct inode *inode,
558 u32 arg)
559 {
560 struct inotify_inode_mark *tmp_i_mark;
561 __u32 mask;
562 int ret;
563 struct idr *idr = &group->inotify_data.idr;
564 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
565
566 mask = inotify_arg_to_mask(inode, arg);
567
568 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
569 if (unlikely(!tmp_i_mark))
570 return -ENOMEM;
571
572 fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
573 tmp_i_mark->fsn_mark.mask = mask;
574 tmp_i_mark->wd = -1;
575
576 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
577 if (ret)
578 goto out_err;
579
580 /* increment the number of watches the user has */
581 if (!inc_inotify_watches(group->inotify_data.ucounts)) {
582 inotify_remove_from_idr(group, tmp_i_mark);
583 ret = -ENOSPC;
584 goto out_err;
585 }
586
587 /* we are on the idr, now get on the inode */
588 ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
589 if (ret) {
590 /* we failed to get on the inode, get off the idr */
591 inotify_remove_from_idr(group, tmp_i_mark);
592 goto out_err;
593 }
594
595
596 /* return the watch descriptor for this new mark */
597 ret = tmp_i_mark->wd;
598
599 out_err:
600 /* match the ref from fsnotify_init_mark() */
601 fsnotify_put_mark(&tmp_i_mark->fsn_mark);
602
603 return ret;
604 }
605
inotify_update_watch(struct fsnotify_group * group,struct inode * inode,u32 arg)606 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
607 {
608 int ret = 0;
609
610 mutex_lock(&group->mark_mutex);
611 /* try to update and existing watch with the new arg */
612 ret = inotify_update_existing_watch(group, inode, arg);
613 /* no mark present, try to add a new one */
614 if (ret == -ENOENT)
615 ret = inotify_new_watch(group, inode, arg);
616 mutex_unlock(&group->mark_mutex);
617
618 return ret;
619 }
620
inotify_new_group(unsigned int max_events)621 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
622 {
623 struct fsnotify_group *group;
624 struct inotify_event_info *oevent;
625
626 group = fsnotify_alloc_group(&inotify_fsnotify_ops);
627 if (IS_ERR(group))
628 return group;
629
630 oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
631 if (unlikely(!oevent)) {
632 fsnotify_destroy_group(group);
633 return ERR_PTR(-ENOMEM);
634 }
635 group->overflow_event = &oevent->fse;
636 fsnotify_init_event(group->overflow_event, 0);
637 oevent->mask = FS_Q_OVERFLOW;
638 oevent->wd = -1;
639 oevent->sync_cookie = 0;
640 oevent->name_len = 0;
641
642 group->max_events = max_events;
643 group->memcg = get_mem_cgroup_from_mm(current->mm);
644
645 spin_lock_init(&group->inotify_data.idr_lock);
646 idr_init(&group->inotify_data.idr);
647 group->inotify_data.ucounts = inc_ucount(current_user_ns(),
648 current_euid(),
649 UCOUNT_INOTIFY_INSTANCES);
650
651 if (!group->inotify_data.ucounts) {
652 fsnotify_destroy_group(group);
653 return ERR_PTR(-EMFILE);
654 }
655
656 return group;
657 }
658
659
660 /* inotify syscalls */
do_inotify_init(int flags)661 static int do_inotify_init(int flags)
662 {
663 struct fsnotify_group *group;
664 int ret;
665
666 /* Check the IN_* constants for consistency. */
667 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
668 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
669
670 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
671 return -EINVAL;
672
673 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
674 group = inotify_new_group(inotify_max_queued_events);
675 if (IS_ERR(group))
676 return PTR_ERR(group);
677
678 ret = anon_inode_getfd("inotify", &inotify_fops, group,
679 O_RDONLY | flags);
680 if (ret < 0)
681 fsnotify_destroy_group(group);
682
683 return ret;
684 }
685
SYSCALL_DEFINE1(inotify_init1,int,flags)686 SYSCALL_DEFINE1(inotify_init1, int, flags)
687 {
688 return do_inotify_init(flags);
689 }
690
SYSCALL_DEFINE0(inotify_init)691 SYSCALL_DEFINE0(inotify_init)
692 {
693 return do_inotify_init(0);
694 }
695
SYSCALL_DEFINE3(inotify_add_watch,int,fd,const char __user *,pathname,u32,mask)696 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
697 u32, mask)
698 {
699 struct fsnotify_group *group;
700 struct inode *inode;
701 struct path path;
702 struct path alteredpath;
703 struct path *canonical_path = &path;
704 struct fd f;
705 int ret;
706 unsigned flags = 0;
707
708 /*
709 * We share a lot of code with fs/dnotify. We also share
710 * the bit layout between inotify's IN_* and the fsnotify
711 * FS_*. This check ensures that only the inotify IN_*
712 * bits get passed in and set in watches/events.
713 */
714 if (unlikely(mask & ~ALL_INOTIFY_BITS))
715 return -EINVAL;
716 /*
717 * Require at least one valid bit set in the mask.
718 * Without _something_ set, we would have no events to
719 * watch for.
720 */
721 if (unlikely(!(mask & ALL_INOTIFY_BITS)))
722 return -EINVAL;
723
724 f = fdget(fd);
725 if (unlikely(!f.file))
726 return -EBADF;
727
728 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
729 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
730 ret = -EINVAL;
731 goto fput_and_out;
732 }
733
734 /* verify that this is indeed an inotify instance */
735 if (unlikely(f.file->f_op != &inotify_fops)) {
736 ret = -EINVAL;
737 goto fput_and_out;
738 }
739
740 if (!(mask & IN_DONT_FOLLOW))
741 flags |= LOOKUP_FOLLOW;
742 if (mask & IN_ONLYDIR)
743 flags |= LOOKUP_DIRECTORY;
744
745 ret = inotify_find_inode(pathname, &path, flags,
746 (mask & IN_ALL_EVENTS));
747 if (ret)
748 goto fput_and_out;
749
750 /* support stacked filesystems */
751 if (path.dentry && path.dentry->d_op) {
752 if (path.dentry->d_op->d_canonical_path) {
753 path.dentry->d_op->d_canonical_path(&path,
754 &alteredpath);
755 canonical_path = &alteredpath;
756 path_put(&path);
757 }
758 }
759
760 /* inode held in place by reference to path; group by fget on fd */
761 inode = canonical_path->dentry->d_inode;
762 group = f.file->private_data;
763
764 /* create/update an inode mark */
765 ret = inotify_update_watch(group, inode, mask);
766 path_put(canonical_path);
767 fput_and_out:
768 fdput(f);
769 return ret;
770 }
771
SYSCALL_DEFINE2(inotify_rm_watch,int,fd,__s32,wd)772 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
773 {
774 struct fsnotify_group *group;
775 struct inotify_inode_mark *i_mark;
776 struct fd f;
777 int ret = -EINVAL;
778
779 f = fdget(fd);
780 if (unlikely(!f.file))
781 return -EBADF;
782
783 /* verify that this is indeed an inotify instance */
784 if (unlikely(f.file->f_op != &inotify_fops))
785 goto out;
786
787 group = f.file->private_data;
788
789 i_mark = inotify_idr_find(group, wd);
790 if (unlikely(!i_mark))
791 goto out;
792
793 ret = 0;
794
795 fsnotify_destroy_mark(&i_mark->fsn_mark, group);
796
797 /* match ref taken by inotify_idr_find */
798 fsnotify_put_mark(&i_mark->fsn_mark);
799
800 out:
801 fdput(f);
802 return ret;
803 }
804
805 /*
806 * inotify_user_setup - Our initialization function. Note that we cannot return
807 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
808 * must result in panic().
809 */
inotify_user_setup(void)810 static int __init inotify_user_setup(void)
811 {
812 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
813 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
814 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
815 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
816 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
817 BUILD_BUG_ON(IN_OPEN != FS_OPEN);
818 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
819 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
820 BUILD_BUG_ON(IN_CREATE != FS_CREATE);
821 BUILD_BUG_ON(IN_DELETE != FS_DELETE);
822 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
823 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
824 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
825 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
826 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
827 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
828 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
829 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
830
831 BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
832
833 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
834 SLAB_PANIC|SLAB_ACCOUNT);
835
836 inotify_max_queued_events = 16384;
837 init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
838 init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
839
840 return 0;
841 }
842 fs_initcall(inotify_user_setup);
843