1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17
18 #include <asm/ioctls.h>
19
20 #include "../../mount.h"
21 #include "../fdinfo.h"
22 #include "fanotify.h"
23
24 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
25 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
26 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
27
28 /*
29 * All flags that may be specified in parameter event_f_flags of fanotify_init.
30 *
31 * Internal and external open flags are stored together in field f_flags of
32 * struct file. Only external open flags shall be allowed in event_f_flags.
33 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
34 * excluded.
35 */
36 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
37 O_ACCMODE | O_APPEND | O_NONBLOCK | \
38 __O_SYNC | O_DSYNC | O_CLOEXEC | \
39 O_LARGEFILE | O_NOATIME )
40
41 extern const struct fsnotify_ops fanotify_fsnotify_ops;
42
43 static struct kmem_cache *fanotify_mark_cache __read_mostly;
44 struct kmem_cache *fanotify_event_cachep __read_mostly;
45 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
46
47 /*
48 * Get an fsnotify notification event if one exists and is small
49 * enough to fit in "count". Return an error pointer if the count
50 * is not large enough.
51 *
52 * Called with the group->notification_mutex held.
53 */
get_one_event(struct fsnotify_group * group,size_t count)54 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
55 size_t count)
56 {
57 BUG_ON(!mutex_is_locked(&group->notification_mutex));
58
59 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
60
61 if (fsnotify_notify_queue_is_empty(group))
62 return NULL;
63
64 if (FAN_EVENT_METADATA_LEN > count)
65 return ERR_PTR(-EINVAL);
66
67 /* held the notification_mutex the whole time, so this is the
68 * same event we peeked above */
69 return fsnotify_remove_first_event(group);
70 }
71
create_fd(struct fsnotify_group * group,struct fanotify_event_info * event,struct file ** file)72 static int create_fd(struct fsnotify_group *group,
73 struct fanotify_event_info *event,
74 struct file **file)
75 {
76 int client_fd;
77 struct file *new_file;
78
79 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
80
81 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
82 if (client_fd < 0)
83 return client_fd;
84
85 /*
86 * we need a new file handle for the userspace program so it can read even if it was
87 * originally opened O_WRONLY.
88 */
89 /* it's possible this event was an overflow event. in that case dentry and mnt
90 * are NULL; That's fine, just don't call dentry open */
91 if (event->path.dentry && event->path.mnt)
92 new_file = dentry_open(&event->path,
93 group->fanotify_data.f_flags | FMODE_NONOTIFY,
94 current_cred());
95 else
96 new_file = ERR_PTR(-EOVERFLOW);
97 if (IS_ERR(new_file)) {
98 /*
99 * we still send an event even if we can't open the file. this
100 * can happen when say tasks are gone and we try to open their
101 * /proc files or we try to open a WRONLY file like in sysfs
102 * we just send the errno to userspace since there isn't much
103 * else we can do.
104 */
105 put_unused_fd(client_fd);
106 client_fd = PTR_ERR(new_file);
107 } else {
108 *file = new_file;
109 }
110
111 return client_fd;
112 }
113
fill_event_metadata(struct fsnotify_group * group,struct fanotify_event_metadata * metadata,struct fsnotify_event * fsn_event,struct file ** file)114 static int fill_event_metadata(struct fsnotify_group *group,
115 struct fanotify_event_metadata *metadata,
116 struct fsnotify_event *fsn_event,
117 struct file **file)
118 {
119 int ret = 0;
120 struct fanotify_event_info *event;
121
122 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
123 group, metadata, fsn_event);
124
125 *file = NULL;
126 event = container_of(fsn_event, struct fanotify_event_info, fse);
127 metadata->event_len = FAN_EVENT_METADATA_LEN;
128 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
129 metadata->vers = FANOTIFY_METADATA_VERSION;
130 metadata->reserved = 0;
131 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
132 metadata->pid = pid_vnr(event->tgid);
133 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
134 metadata->fd = FAN_NOFD;
135 else {
136 metadata->fd = create_fd(group, event, file);
137 if (metadata->fd < 0)
138 ret = metadata->fd;
139 }
140
141 return ret;
142 }
143
144 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
dequeue_event(struct fsnotify_group * group,int fd)145 static struct fanotify_perm_event_info *dequeue_event(
146 struct fsnotify_group *group, int fd)
147 {
148 struct fanotify_perm_event_info *event, *return_e = NULL;
149
150 spin_lock(&group->fanotify_data.access_lock);
151 list_for_each_entry(event, &group->fanotify_data.access_list,
152 fae.fse.list) {
153 if (event->fd != fd)
154 continue;
155
156 list_del_init(&event->fae.fse.list);
157 return_e = event;
158 break;
159 }
160 spin_unlock(&group->fanotify_data.access_lock);
161
162 pr_debug("%s: found return_re=%p\n", __func__, return_e);
163
164 return return_e;
165 }
166
process_access_response(struct fsnotify_group * group,struct fanotify_response * response_struct)167 static int process_access_response(struct fsnotify_group *group,
168 struct fanotify_response *response_struct)
169 {
170 struct fanotify_perm_event_info *event;
171 int fd = response_struct->fd;
172 int response = response_struct->response;
173
174 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
175 fd, response);
176 /*
177 * make sure the response is valid, if invalid we do nothing and either
178 * userspace can send a valid response or we will clean it up after the
179 * timeout
180 */
181 switch (response) {
182 case FAN_ALLOW:
183 case FAN_DENY:
184 break;
185 default:
186 return -EINVAL;
187 }
188
189 if (fd < 0)
190 return -EINVAL;
191
192 event = dequeue_event(group, fd);
193 if (!event)
194 return -ENOENT;
195
196 event->response = response;
197 wake_up(&group->fanotify_data.access_waitq);
198
199 return 0;
200 }
201 #endif
202
copy_event_to_user(struct fsnotify_group * group,struct fsnotify_event * event,char __user * buf)203 static ssize_t copy_event_to_user(struct fsnotify_group *group,
204 struct fsnotify_event *event,
205 char __user *buf)
206 {
207 struct fanotify_event_metadata fanotify_event_metadata;
208 struct file *f;
209 int fd, ret;
210
211 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
212
213 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
214 if (ret < 0)
215 return ret;
216
217 fd = fanotify_event_metadata.fd;
218 ret = -EFAULT;
219 if (copy_to_user(buf, &fanotify_event_metadata,
220 fanotify_event_metadata.event_len))
221 goto out_close_fd;
222
223 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
224 if (event->mask & FAN_ALL_PERM_EVENTS)
225 FANOTIFY_PE(event)->fd = fd;
226 #endif
227
228 if (fd != FAN_NOFD)
229 fd_install(fd, f);
230 return fanotify_event_metadata.event_len;
231
232 out_close_fd:
233 if (fd != FAN_NOFD) {
234 put_unused_fd(fd);
235 fput(f);
236 }
237 return ret;
238 }
239
240 /* intofiy userspace file descriptor functions */
fanotify_poll(struct file * file,poll_table * wait)241 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
242 {
243 struct fsnotify_group *group = file->private_data;
244 int ret = 0;
245
246 poll_wait(file, &group->notification_waitq, wait);
247 mutex_lock(&group->notification_mutex);
248 if (!fsnotify_notify_queue_is_empty(group))
249 ret = POLLIN | POLLRDNORM;
250 mutex_unlock(&group->notification_mutex);
251
252 return ret;
253 }
254
fanotify_read(struct file * file,char __user * buf,size_t count,loff_t * pos)255 static ssize_t fanotify_read(struct file *file, char __user *buf,
256 size_t count, loff_t *pos)
257 {
258 struct fsnotify_group *group;
259 struct fsnotify_event *kevent;
260 char __user *start;
261 int ret;
262 DEFINE_WAIT(wait);
263
264 start = buf;
265 group = file->private_data;
266
267 pr_debug("%s: group=%p\n", __func__, group);
268
269 while (1) {
270 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
271
272 mutex_lock(&group->notification_mutex);
273 kevent = get_one_event(group, count);
274 mutex_unlock(&group->notification_mutex);
275
276 if (IS_ERR(kevent)) {
277 ret = PTR_ERR(kevent);
278 break;
279 }
280
281 if (!kevent) {
282 ret = -EAGAIN;
283 if (file->f_flags & O_NONBLOCK)
284 break;
285
286 ret = -ERESTARTSYS;
287 if (signal_pending(current))
288 break;
289
290 if (start != buf)
291 break;
292 schedule();
293 continue;
294 }
295
296 ret = copy_event_to_user(group, kevent, buf);
297 /*
298 * Permission events get queued to wait for response. Other
299 * events can be destroyed now.
300 */
301 if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
302 fsnotify_destroy_event(group, kevent);
303 if (ret < 0)
304 break;
305 } else {
306 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
307 if (ret < 0) {
308 FANOTIFY_PE(kevent)->response = FAN_DENY;
309 wake_up(&group->fanotify_data.access_waitq);
310 break;
311 }
312 spin_lock(&group->fanotify_data.access_lock);
313 list_add_tail(&kevent->list,
314 &group->fanotify_data.access_list);
315 spin_unlock(&group->fanotify_data.access_lock);
316 #endif
317 }
318 buf += ret;
319 count -= ret;
320 }
321
322 finish_wait(&group->notification_waitq, &wait);
323 if (start != buf && ret != -EFAULT)
324 ret = buf - start;
325 return ret;
326 }
327
fanotify_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)328 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
329 {
330 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
331 struct fanotify_response response = { .fd = -1, .response = -1 };
332 struct fsnotify_group *group;
333 int ret;
334
335 group = file->private_data;
336
337 if (count > sizeof(response))
338 count = sizeof(response);
339
340 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
341
342 if (copy_from_user(&response, buf, count))
343 return -EFAULT;
344
345 ret = process_access_response(group, &response);
346 if (ret < 0)
347 count = ret;
348
349 return count;
350 #else
351 return -EINVAL;
352 #endif
353 }
354
fanotify_release(struct inode * ignored,struct file * file)355 static int fanotify_release(struct inode *ignored, struct file *file)
356 {
357 struct fsnotify_group *group = file->private_data;
358
359 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
360 struct fanotify_perm_event_info *event, *next;
361 struct fsnotify_event *fsn_event;
362
363 /*
364 * Stop new events from arriving in the notification queue. since
365 * userspace cannot use fanotify fd anymore, no event can enter or
366 * leave access_list by now either.
367 */
368 fsnotify_group_stop_queueing(group);
369
370 /*
371 * Process all permission events on access_list and notification queue
372 * and simulate reply from userspace.
373 */
374 spin_lock(&group->fanotify_data.access_lock);
375 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
376 fae.fse.list) {
377 pr_debug("%s: found group=%p event=%p\n", __func__, group,
378 event);
379
380 list_del_init(&event->fae.fse.list);
381 event->response = FAN_ALLOW;
382 }
383 spin_unlock(&group->fanotify_data.access_lock);
384
385 /*
386 * Destroy all non-permission events. For permission events just
387 * dequeue them and set the response. They will be freed once the
388 * response is consumed and fanotify_get_response() returns.
389 */
390 mutex_lock(&group->notification_mutex);
391 while (!fsnotify_notify_queue_is_empty(group)) {
392 fsn_event = fsnotify_remove_first_event(group);
393 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
394 fsnotify_destroy_event(group, fsn_event);
395 else
396 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
397 }
398 mutex_unlock(&group->notification_mutex);
399
400 /* Response for all permission events it set, wakeup waiters */
401 wake_up(&group->fanotify_data.access_waitq);
402 #endif
403
404 /* matches the fanotify_init->fsnotify_alloc_group */
405 fsnotify_destroy_group(group);
406
407 return 0;
408 }
409
fanotify_ioctl(struct file * file,unsigned int cmd,unsigned long arg)410 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
411 {
412 struct fsnotify_group *group;
413 struct fsnotify_event *fsn_event;
414 void __user *p;
415 int ret = -ENOTTY;
416 size_t send_len = 0;
417
418 group = file->private_data;
419
420 p = (void __user *) arg;
421
422 switch (cmd) {
423 case FIONREAD:
424 mutex_lock(&group->notification_mutex);
425 list_for_each_entry(fsn_event, &group->notification_list, list)
426 send_len += FAN_EVENT_METADATA_LEN;
427 mutex_unlock(&group->notification_mutex);
428 ret = put_user(send_len, (int __user *) p);
429 break;
430 }
431
432 return ret;
433 }
434
435 static const struct file_operations fanotify_fops = {
436 .show_fdinfo = fanotify_show_fdinfo,
437 .poll = fanotify_poll,
438 .read = fanotify_read,
439 .write = fanotify_write,
440 .fasync = NULL,
441 .release = fanotify_release,
442 .unlocked_ioctl = fanotify_ioctl,
443 .compat_ioctl = fanotify_ioctl,
444 .llseek = noop_llseek,
445 };
446
fanotify_free_mark(struct fsnotify_mark * fsn_mark)447 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
448 {
449 kmem_cache_free(fanotify_mark_cache, fsn_mark);
450 }
451
fanotify_find_path(int dfd,const char __user * filename,struct path * path,unsigned int flags)452 static int fanotify_find_path(int dfd, const char __user *filename,
453 struct path *path, unsigned int flags)
454 {
455 int ret;
456
457 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
458 dfd, filename, flags);
459
460 if (filename == NULL) {
461 struct fd f = fdget(dfd);
462
463 ret = -EBADF;
464 if (!f.file)
465 goto out;
466
467 ret = -ENOTDIR;
468 if ((flags & FAN_MARK_ONLYDIR) &&
469 !(S_ISDIR(file_inode(f.file)->i_mode))) {
470 fdput(f);
471 goto out;
472 }
473
474 *path = f.file->f_path;
475 path_get(path);
476 fdput(f);
477 } else {
478 unsigned int lookup_flags = 0;
479
480 if (!(flags & FAN_MARK_DONT_FOLLOW))
481 lookup_flags |= LOOKUP_FOLLOW;
482 if (flags & FAN_MARK_ONLYDIR)
483 lookup_flags |= LOOKUP_DIRECTORY;
484
485 ret = user_path_at(dfd, filename, lookup_flags, path);
486 if (ret)
487 goto out;
488 }
489
490 /* you can only watch an inode if you have read permissions on it */
491 ret = inode_permission2(path->mnt, path->dentry->d_inode, MAY_READ);
492 if (ret)
493 path_put(path);
494 out:
495 return ret;
496 }
497
fanotify_mark_remove_from_mask(struct fsnotify_mark * fsn_mark,__u32 mask,unsigned int flags,int * destroy)498 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
499 __u32 mask,
500 unsigned int flags,
501 int *destroy)
502 {
503 __u32 oldmask;
504
505 spin_lock(&fsn_mark->lock);
506 if (!(flags & FAN_MARK_IGNORED_MASK)) {
507 oldmask = fsn_mark->mask;
508 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
509 } else {
510 oldmask = fsn_mark->ignored_mask;
511 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
512 }
513 spin_unlock(&fsn_mark->lock);
514
515 *destroy = !(oldmask & ~mask);
516
517 return mask & oldmask;
518 }
519
fanotify_remove_vfsmount_mark(struct fsnotify_group * group,struct vfsmount * mnt,__u32 mask,unsigned int flags)520 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
521 struct vfsmount *mnt, __u32 mask,
522 unsigned int flags)
523 {
524 struct fsnotify_mark *fsn_mark = NULL;
525 __u32 removed;
526 int destroy_mark;
527
528 mutex_lock(&group->mark_mutex);
529 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
530 if (!fsn_mark) {
531 mutex_unlock(&group->mark_mutex);
532 return -ENOENT;
533 }
534
535 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
536 &destroy_mark);
537 if (destroy_mark)
538 fsnotify_destroy_mark_locked(fsn_mark, group);
539 mutex_unlock(&group->mark_mutex);
540
541 fsnotify_put_mark(fsn_mark);
542 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
543 fsnotify_recalc_vfsmount_mask(mnt);
544
545 return 0;
546 }
547
fanotify_remove_inode_mark(struct fsnotify_group * group,struct inode * inode,__u32 mask,unsigned int flags)548 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
549 struct inode *inode, __u32 mask,
550 unsigned int flags)
551 {
552 struct fsnotify_mark *fsn_mark = NULL;
553 __u32 removed;
554 int destroy_mark;
555
556 mutex_lock(&group->mark_mutex);
557 fsn_mark = fsnotify_find_inode_mark(group, inode);
558 if (!fsn_mark) {
559 mutex_unlock(&group->mark_mutex);
560 return -ENOENT;
561 }
562
563 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
564 &destroy_mark);
565 if (destroy_mark)
566 fsnotify_destroy_mark_locked(fsn_mark, group);
567 mutex_unlock(&group->mark_mutex);
568
569 /* matches the fsnotify_find_inode_mark() */
570 fsnotify_put_mark(fsn_mark);
571 if (removed & inode->i_fsnotify_mask)
572 fsnotify_recalc_inode_mask(inode);
573
574 return 0;
575 }
576
fanotify_mark_add_to_mask(struct fsnotify_mark * fsn_mark,__u32 mask,unsigned int flags)577 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
578 __u32 mask,
579 unsigned int flags)
580 {
581 __u32 oldmask = -1;
582
583 spin_lock(&fsn_mark->lock);
584 if (!(flags & FAN_MARK_IGNORED_MASK)) {
585 oldmask = fsn_mark->mask;
586 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
587 } else {
588 __u32 tmask = fsn_mark->ignored_mask | mask;
589 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
590 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
591 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
592 }
593
594 if (!(flags & FAN_MARK_ONDIR)) {
595 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
596 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
597 }
598
599 spin_unlock(&fsn_mark->lock);
600
601 return mask & ~oldmask;
602 }
603
fanotify_add_new_mark(struct fsnotify_group * group,struct inode * inode,struct vfsmount * mnt)604 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
605 struct inode *inode,
606 struct vfsmount *mnt)
607 {
608 struct fsnotify_mark *mark;
609 int ret;
610
611 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
612 return ERR_PTR(-ENOSPC);
613
614 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
615 if (!mark)
616 return ERR_PTR(-ENOMEM);
617
618 fsnotify_init_mark(mark, fanotify_free_mark);
619 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
620 if (ret) {
621 fsnotify_put_mark(mark);
622 return ERR_PTR(ret);
623 }
624
625 return mark;
626 }
627
628
fanotify_add_vfsmount_mark(struct fsnotify_group * group,struct vfsmount * mnt,__u32 mask,unsigned int flags)629 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
630 struct vfsmount *mnt, __u32 mask,
631 unsigned int flags)
632 {
633 struct fsnotify_mark *fsn_mark;
634 __u32 added;
635
636 mutex_lock(&group->mark_mutex);
637 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
638 if (!fsn_mark) {
639 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
640 if (IS_ERR(fsn_mark)) {
641 mutex_unlock(&group->mark_mutex);
642 return PTR_ERR(fsn_mark);
643 }
644 }
645 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
646 mutex_unlock(&group->mark_mutex);
647
648 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
649 fsnotify_recalc_vfsmount_mask(mnt);
650
651 fsnotify_put_mark(fsn_mark);
652 return 0;
653 }
654
fanotify_add_inode_mark(struct fsnotify_group * group,struct inode * inode,__u32 mask,unsigned int flags)655 static int fanotify_add_inode_mark(struct fsnotify_group *group,
656 struct inode *inode, __u32 mask,
657 unsigned int flags)
658 {
659 struct fsnotify_mark *fsn_mark;
660 __u32 added;
661
662 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
663
664 /*
665 * If some other task has this inode open for write we should not add
666 * an ignored mark, unless that ignored mark is supposed to survive
667 * modification changes anyway.
668 */
669 if ((flags & FAN_MARK_IGNORED_MASK) &&
670 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
671 (atomic_read(&inode->i_writecount) > 0))
672 return 0;
673
674 mutex_lock(&group->mark_mutex);
675 fsn_mark = fsnotify_find_inode_mark(group, inode);
676 if (!fsn_mark) {
677 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
678 if (IS_ERR(fsn_mark)) {
679 mutex_unlock(&group->mark_mutex);
680 return PTR_ERR(fsn_mark);
681 }
682 }
683 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
684 mutex_unlock(&group->mark_mutex);
685
686 if (added & ~inode->i_fsnotify_mask)
687 fsnotify_recalc_inode_mask(inode);
688
689 fsnotify_put_mark(fsn_mark);
690 return 0;
691 }
692
693 /* fanotify syscalls */
SYSCALL_DEFINE2(fanotify_init,unsigned int,flags,unsigned int,event_f_flags)694 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
695 {
696 struct fsnotify_group *group;
697 int f_flags, fd;
698 struct user_struct *user;
699 struct fanotify_event_info *oevent;
700
701 pr_debug("%s: flags=%d event_f_flags=%d\n",
702 __func__, flags, event_f_flags);
703
704 if (!capable(CAP_SYS_ADMIN))
705 return -EPERM;
706
707 if (flags & ~FAN_ALL_INIT_FLAGS)
708 return -EINVAL;
709
710 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
711 return -EINVAL;
712
713 switch (event_f_flags & O_ACCMODE) {
714 case O_RDONLY:
715 case O_RDWR:
716 case O_WRONLY:
717 break;
718 default:
719 return -EINVAL;
720 }
721
722 user = get_current_user();
723 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
724 free_uid(user);
725 return -EMFILE;
726 }
727
728 f_flags = O_RDWR | FMODE_NONOTIFY;
729 if (flags & FAN_CLOEXEC)
730 f_flags |= O_CLOEXEC;
731 if (flags & FAN_NONBLOCK)
732 f_flags |= O_NONBLOCK;
733
734 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
735 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
736 if (IS_ERR(group)) {
737 free_uid(user);
738 return PTR_ERR(group);
739 }
740
741 group->fanotify_data.user = user;
742 atomic_inc(&user->fanotify_listeners);
743
744 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
745 if (unlikely(!oevent)) {
746 fd = -ENOMEM;
747 goto out_destroy_group;
748 }
749 group->overflow_event = &oevent->fse;
750
751 if (force_o_largefile())
752 event_f_flags |= O_LARGEFILE;
753 group->fanotify_data.f_flags = event_f_flags;
754 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
755 spin_lock_init(&group->fanotify_data.access_lock);
756 init_waitqueue_head(&group->fanotify_data.access_waitq);
757 INIT_LIST_HEAD(&group->fanotify_data.access_list);
758 #endif
759 switch (flags & FAN_ALL_CLASS_BITS) {
760 case FAN_CLASS_NOTIF:
761 group->priority = FS_PRIO_0;
762 break;
763 case FAN_CLASS_CONTENT:
764 group->priority = FS_PRIO_1;
765 break;
766 case FAN_CLASS_PRE_CONTENT:
767 group->priority = FS_PRIO_2;
768 break;
769 default:
770 fd = -EINVAL;
771 goto out_destroy_group;
772 }
773
774 if (flags & FAN_UNLIMITED_QUEUE) {
775 fd = -EPERM;
776 if (!capable(CAP_SYS_ADMIN))
777 goto out_destroy_group;
778 group->max_events = UINT_MAX;
779 } else {
780 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
781 }
782
783 if (flags & FAN_UNLIMITED_MARKS) {
784 fd = -EPERM;
785 if (!capable(CAP_SYS_ADMIN))
786 goto out_destroy_group;
787 group->fanotify_data.max_marks = UINT_MAX;
788 } else {
789 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
790 }
791
792 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
793 if (fd < 0)
794 goto out_destroy_group;
795
796 return fd;
797
798 out_destroy_group:
799 fsnotify_destroy_group(group);
800 return fd;
801 }
802
SYSCALL_DEFINE5(fanotify_mark,int,fanotify_fd,unsigned int,flags,__u64,mask,int,dfd,const char __user *,pathname)803 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
804 __u64, mask, int, dfd,
805 const char __user *, pathname)
806 {
807 struct inode *inode = NULL;
808 struct vfsmount *mnt = NULL;
809 struct fsnotify_group *group;
810 struct fd f;
811 struct path path;
812 int ret;
813
814 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
815 __func__, fanotify_fd, flags, dfd, pathname, mask);
816
817 /* we only use the lower 32 bits as of right now. */
818 if (mask & ((__u64)0xffffffff << 32))
819 return -EINVAL;
820
821 if (flags & ~FAN_ALL_MARK_FLAGS)
822 return -EINVAL;
823 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
824 case FAN_MARK_ADD: /* fallthrough */
825 case FAN_MARK_REMOVE:
826 if (!mask)
827 return -EINVAL;
828 break;
829 case FAN_MARK_FLUSH:
830 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
831 return -EINVAL;
832 break;
833 default:
834 return -EINVAL;
835 }
836
837 if (mask & FAN_ONDIR) {
838 flags |= FAN_MARK_ONDIR;
839 mask &= ~FAN_ONDIR;
840 }
841
842 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
843 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
844 #else
845 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
846 #endif
847 return -EINVAL;
848
849 f = fdget(fanotify_fd);
850 if (unlikely(!f.file))
851 return -EBADF;
852
853 /* verify that this is indeed an fanotify instance */
854 ret = -EINVAL;
855 if (unlikely(f.file->f_op != &fanotify_fops))
856 goto fput_and_out;
857 group = f.file->private_data;
858
859 /*
860 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
861 * allowed to set permissions events.
862 */
863 ret = -EINVAL;
864 if (mask & FAN_ALL_PERM_EVENTS &&
865 group->priority == FS_PRIO_0)
866 goto fput_and_out;
867
868 if (flags & FAN_MARK_FLUSH) {
869 ret = 0;
870 if (flags & FAN_MARK_MOUNT)
871 fsnotify_clear_vfsmount_marks_by_group(group);
872 else
873 fsnotify_clear_inode_marks_by_group(group);
874 goto fput_and_out;
875 }
876
877 ret = fanotify_find_path(dfd, pathname, &path, flags);
878 if (ret)
879 goto fput_and_out;
880
881 /* inode held in place by reference to path; group by fget on fd */
882 if (!(flags & FAN_MARK_MOUNT))
883 inode = path.dentry->d_inode;
884 else
885 mnt = path.mnt;
886
887 /* create/update an inode mark */
888 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
889 case FAN_MARK_ADD:
890 if (flags & FAN_MARK_MOUNT)
891 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
892 else
893 ret = fanotify_add_inode_mark(group, inode, mask, flags);
894 break;
895 case FAN_MARK_REMOVE:
896 if (flags & FAN_MARK_MOUNT)
897 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
898 else
899 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
900 break;
901 default:
902 ret = -EINVAL;
903 }
904
905 path_put(&path);
906 fput_and_out:
907 fdput(f);
908 return ret;
909 }
910
911 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(fanotify_mark,int,fanotify_fd,unsigned int,flags,__u32,mask0,__u32,mask1,int,dfd,const char __user *,pathname)912 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
913 int, fanotify_fd, unsigned int, flags,
914 __u32, mask0, __u32, mask1, int, dfd,
915 const char __user *, pathname)
916 {
917 return sys_fanotify_mark(fanotify_fd, flags,
918 #ifdef __BIG_ENDIAN
919 ((__u64)mask0 << 32) | mask1,
920 #else
921 ((__u64)mask1 << 32) | mask0,
922 #endif
923 dfd, pathname);
924 }
925 #endif
926
927 /*
928 * fanotify_user_setup - Our initialization function. Note that we cannot return
929 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
930 * must result in panic().
931 */
fanotify_user_setup(void)932 static int __init fanotify_user_setup(void)
933 {
934 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
935 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
936 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
937 fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
938 SLAB_PANIC);
939 #endif
940
941 return 0;
942 }
943 device_initcall(fanotify_user_setup);
944