• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17 
18 #include <asm/ioctls.h>
19 
20 #include "../../mount.h"
21 #include "../fdinfo.h"
22 #include "fanotify.h"
23 
24 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
25 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
26 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
27 
28 /*
29  * All flags that may be specified in parameter event_f_flags of fanotify_init.
30  *
31  * Internal and external open flags are stored together in field f_flags of
32  * struct file. Only external open flags shall be allowed in event_f_flags.
33  * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
34  * excluded.
35  */
36 #define	FANOTIFY_INIT_ALL_EVENT_F_BITS				( \
37 		O_ACCMODE	| O_APPEND	| O_NONBLOCK	| \
38 		__O_SYNC	| O_DSYNC	| O_CLOEXEC     | \
39 		O_LARGEFILE	| O_NOATIME	)
40 
41 extern const struct fsnotify_ops fanotify_fsnotify_ops;
42 
43 static struct kmem_cache *fanotify_mark_cache __read_mostly;
44 struct kmem_cache *fanotify_event_cachep __read_mostly;
45 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
46 
47 /*
48  * Get an fsnotify notification event if one exists and is small
49  * enough to fit in "count". Return an error pointer if the count
50  * is not large enough.
51  *
52  * Called with the group->notification_lock held.
53  */
get_one_event(struct fsnotify_group * group,size_t count)54 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
55 					    size_t count)
56 {
57 	assert_spin_locked(&group->notification_lock);
58 
59 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
60 
61 	if (fsnotify_notify_queue_is_empty(group))
62 		return NULL;
63 
64 	if (FAN_EVENT_METADATA_LEN > count)
65 		return ERR_PTR(-EINVAL);
66 
67 	/* held the notification_lock the whole time, so this is the
68 	 * same event we peeked above */
69 	return fsnotify_remove_first_event(group);
70 }
71 
create_fd(struct fsnotify_group * group,struct fanotify_event_info * event,struct file ** file)72 static int create_fd(struct fsnotify_group *group,
73 		     struct fanotify_event_info *event,
74 		     struct file **file)
75 {
76 	int client_fd;
77 	struct file *new_file;
78 
79 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
80 
81 	client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
82 	if (client_fd < 0)
83 		return client_fd;
84 
85 	/*
86 	 * we need a new file handle for the userspace program so it can read even if it was
87 	 * originally opened O_WRONLY.
88 	 */
89 	/* it's possible this event was an overflow event.  in that case dentry and mnt
90 	 * are NULL;  That's fine, just don't call dentry open */
91 	if (event->path.dentry && event->path.mnt)
92 		new_file = dentry_open(&event->path,
93 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
94 				       current_cred());
95 	else
96 		new_file = ERR_PTR(-EOVERFLOW);
97 	if (IS_ERR(new_file)) {
98 		/*
99 		 * we still send an event even if we can't open the file.  this
100 		 * can happen when say tasks are gone and we try to open their
101 		 * /proc files or we try to open a WRONLY file like in sysfs
102 		 * we just send the errno to userspace since there isn't much
103 		 * else we can do.
104 		 */
105 		put_unused_fd(client_fd);
106 		client_fd = PTR_ERR(new_file);
107 	} else {
108 		*file = new_file;
109 	}
110 
111 	return client_fd;
112 }
113 
fill_event_metadata(struct fsnotify_group * group,struct fanotify_event_metadata * metadata,struct fsnotify_event * fsn_event,struct file ** file)114 static int fill_event_metadata(struct fsnotify_group *group,
115 			       struct fanotify_event_metadata *metadata,
116 			       struct fsnotify_event *fsn_event,
117 			       struct file **file)
118 {
119 	int ret = 0;
120 	struct fanotify_event_info *event;
121 
122 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
123 		 group, metadata, fsn_event);
124 
125 	*file = NULL;
126 	event = container_of(fsn_event, struct fanotify_event_info, fse);
127 	metadata->event_len = FAN_EVENT_METADATA_LEN;
128 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
129 	metadata->vers = FANOTIFY_METADATA_VERSION;
130 	metadata->reserved = 0;
131 	metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
132 	metadata->pid = pid_vnr(event->tgid);
133 	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
134 		metadata->fd = FAN_NOFD;
135 	else {
136 		metadata->fd = create_fd(group, event, file);
137 		if (metadata->fd < 0)
138 			ret = metadata->fd;
139 	}
140 
141 	return ret;
142 }
143 
144 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
dequeue_event(struct fsnotify_group * group,int fd)145 static struct fanotify_perm_event_info *dequeue_event(
146 				struct fsnotify_group *group, int fd)
147 {
148 	struct fanotify_perm_event_info *event, *return_e = NULL;
149 
150 	spin_lock(&group->notification_lock);
151 	list_for_each_entry(event, &group->fanotify_data.access_list,
152 			    fae.fse.list) {
153 		if (event->fd != fd)
154 			continue;
155 
156 		list_del_init(&event->fae.fse.list);
157 		return_e = event;
158 		break;
159 	}
160 	spin_unlock(&group->notification_lock);
161 
162 	pr_debug("%s: found return_re=%p\n", __func__, return_e);
163 
164 	return return_e;
165 }
166 
process_access_response(struct fsnotify_group * group,struct fanotify_response * response_struct)167 static int process_access_response(struct fsnotify_group *group,
168 				   struct fanotify_response *response_struct)
169 {
170 	struct fanotify_perm_event_info *event;
171 	int fd = response_struct->fd;
172 	int response = response_struct->response;
173 
174 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
175 		 fd, response);
176 	/*
177 	 * make sure the response is valid, if invalid we do nothing and either
178 	 * userspace can send a valid response or we will clean it up after the
179 	 * timeout
180 	 */
181 	switch (response) {
182 	case FAN_ALLOW:
183 	case FAN_DENY:
184 		break;
185 	default:
186 		return -EINVAL;
187 	}
188 
189 	if (fd < 0)
190 		return -EINVAL;
191 
192 	event = dequeue_event(group, fd);
193 	if (!event)
194 		return -ENOENT;
195 
196 	event->response = response;
197 	wake_up(&group->fanotify_data.access_waitq);
198 
199 	return 0;
200 }
201 #endif
202 
copy_event_to_user(struct fsnotify_group * group,struct fsnotify_event * event,char __user * buf)203 static ssize_t copy_event_to_user(struct fsnotify_group *group,
204 				  struct fsnotify_event *event,
205 				  char __user *buf)
206 {
207 	struct fanotify_event_metadata fanotify_event_metadata;
208 	struct file *f;
209 	int fd, ret;
210 
211 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
212 
213 	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
214 	if (ret < 0)
215 		return ret;
216 
217 	fd = fanotify_event_metadata.fd;
218 	ret = -EFAULT;
219 	if (copy_to_user(buf, &fanotify_event_metadata,
220 			 fanotify_event_metadata.event_len))
221 		goto out_close_fd;
222 
223 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
224 	if (event->mask & FAN_ALL_PERM_EVENTS)
225 		FANOTIFY_PE(event)->fd = fd;
226 #endif
227 
228 	if (fd != FAN_NOFD)
229 		fd_install(fd, f);
230 	return fanotify_event_metadata.event_len;
231 
232 out_close_fd:
233 	if (fd != FAN_NOFD) {
234 		put_unused_fd(fd);
235 		fput(f);
236 	}
237 	return ret;
238 }
239 
240 /* intofiy userspace file descriptor functions */
fanotify_poll(struct file * file,poll_table * wait)241 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
242 {
243 	struct fsnotify_group *group = file->private_data;
244 	int ret = 0;
245 
246 	poll_wait(file, &group->notification_waitq, wait);
247 	spin_lock(&group->notification_lock);
248 	if (!fsnotify_notify_queue_is_empty(group))
249 		ret = POLLIN | POLLRDNORM;
250 	spin_unlock(&group->notification_lock);
251 
252 	return ret;
253 }
254 
fanotify_read(struct file * file,char __user * buf,size_t count,loff_t * pos)255 static ssize_t fanotify_read(struct file *file, char __user *buf,
256 			     size_t count, loff_t *pos)
257 {
258 	struct fsnotify_group *group;
259 	struct fsnotify_event *kevent;
260 	char __user *start;
261 	int ret;
262 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
263 
264 	start = buf;
265 	group = file->private_data;
266 
267 	pr_debug("%s: group=%p\n", __func__, group);
268 
269 	add_wait_queue(&group->notification_waitq, &wait);
270 	while (1) {
271 		spin_lock(&group->notification_lock);
272 		kevent = get_one_event(group, count);
273 		spin_unlock(&group->notification_lock);
274 
275 		if (IS_ERR(kevent)) {
276 			ret = PTR_ERR(kevent);
277 			break;
278 		}
279 
280 		if (!kevent) {
281 			ret = -EAGAIN;
282 			if (file->f_flags & O_NONBLOCK)
283 				break;
284 
285 			ret = -ERESTARTSYS;
286 			if (signal_pending(current))
287 				break;
288 
289 			if (start != buf)
290 				break;
291 
292 			wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
293 			continue;
294 		}
295 
296 		ret = copy_event_to_user(group, kevent, buf);
297 		if (unlikely(ret == -EOPENSTALE)) {
298 			/*
299 			 * We cannot report events with stale fd so drop it.
300 			 * Setting ret to 0 will continue the event loop and
301 			 * do the right thing if there are no more events to
302 			 * read (i.e. return bytes read, -EAGAIN or wait).
303 			 */
304 			ret = 0;
305 		}
306 
307 		/*
308 		 * Permission events get queued to wait for response.  Other
309 		 * events can be destroyed now.
310 		 */
311 		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
312 			fsnotify_destroy_event(group, kevent);
313 		} else {
314 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
315 			if (ret <= 0) {
316 				FANOTIFY_PE(kevent)->response = FAN_DENY;
317 				wake_up(&group->fanotify_data.access_waitq);
318 			} else {
319 				spin_lock(&group->notification_lock);
320 				list_add_tail(&kevent->list,
321 					&group->fanotify_data.access_list);
322 				spin_unlock(&group->notification_lock);
323 			}
324 #endif
325 		}
326 		if (ret < 0)
327 			break;
328 		buf += ret;
329 		count -= ret;
330 	}
331 	remove_wait_queue(&group->notification_waitq, &wait);
332 
333 	if (start != buf && ret != -EFAULT)
334 		ret = buf - start;
335 	return ret;
336 }
337 
fanotify_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)338 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
339 {
340 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
341 	struct fanotify_response response = { .fd = -1, .response = -1 };
342 	struct fsnotify_group *group;
343 	int ret;
344 
345 	group = file->private_data;
346 
347 	if (count > sizeof(response))
348 		count = sizeof(response);
349 
350 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
351 
352 	if (copy_from_user(&response, buf, count))
353 		return -EFAULT;
354 
355 	ret = process_access_response(group, &response);
356 	if (ret < 0)
357 		count = ret;
358 
359 	return count;
360 #else
361 	return -EINVAL;
362 #endif
363 }
364 
fanotify_release(struct inode * ignored,struct file * file)365 static int fanotify_release(struct inode *ignored, struct file *file)
366 {
367 	struct fsnotify_group *group = file->private_data;
368 
369 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
370 	struct fanotify_perm_event_info *event, *next;
371 	struct fsnotify_event *fsn_event;
372 
373 	/*
374 	 * Stop new events from arriving in the notification queue. since
375 	 * userspace cannot use fanotify fd anymore, no event can enter or
376 	 * leave access_list by now either.
377 	 */
378 	fsnotify_group_stop_queueing(group);
379 
380 	/*
381 	 * Process all permission events on access_list and notification queue
382 	 * and simulate reply from userspace.
383 	 */
384 	spin_lock(&group->notification_lock);
385 	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
386 				 fae.fse.list) {
387 		pr_debug("%s: found group=%p event=%p\n", __func__, group,
388 			 event);
389 
390 		list_del_init(&event->fae.fse.list);
391 		event->response = FAN_ALLOW;
392 	}
393 
394 	/*
395 	 * Destroy all non-permission events. For permission events just
396 	 * dequeue them and set the response. They will be freed once the
397 	 * response is consumed and fanotify_get_response() returns.
398 	 */
399 	while (!fsnotify_notify_queue_is_empty(group)) {
400 		fsn_event = fsnotify_remove_first_event(group);
401 		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
402 			spin_unlock(&group->notification_lock);
403 			fsnotify_destroy_event(group, fsn_event);
404 			spin_lock(&group->notification_lock);
405 		} else
406 			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
407 	}
408 	spin_unlock(&group->notification_lock);
409 
410 	/* Response for all permission events it set, wakeup waiters */
411 	wake_up(&group->fanotify_data.access_waitq);
412 #endif
413 
414 	/* matches the fanotify_init->fsnotify_alloc_group */
415 	fsnotify_destroy_group(group);
416 
417 	return 0;
418 }
419 
fanotify_ioctl(struct file * file,unsigned int cmd,unsigned long arg)420 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
421 {
422 	struct fsnotify_group *group;
423 	struct fsnotify_event *fsn_event;
424 	void __user *p;
425 	int ret = -ENOTTY;
426 	size_t send_len = 0;
427 
428 	group = file->private_data;
429 
430 	p = (void __user *) arg;
431 
432 	switch (cmd) {
433 	case FIONREAD:
434 		spin_lock(&group->notification_lock);
435 		list_for_each_entry(fsn_event, &group->notification_list, list)
436 			send_len += FAN_EVENT_METADATA_LEN;
437 		spin_unlock(&group->notification_lock);
438 		ret = put_user(send_len, (int __user *) p);
439 		break;
440 	}
441 
442 	return ret;
443 }
444 
445 static const struct file_operations fanotify_fops = {
446 	.show_fdinfo	= fanotify_show_fdinfo,
447 	.poll		= fanotify_poll,
448 	.read		= fanotify_read,
449 	.write		= fanotify_write,
450 	.fasync		= NULL,
451 	.release	= fanotify_release,
452 	.unlocked_ioctl	= fanotify_ioctl,
453 	.compat_ioctl	= fanotify_ioctl,
454 	.llseek		= noop_llseek,
455 };
456 
fanotify_free_mark(struct fsnotify_mark * fsn_mark)457 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
458 {
459 	kmem_cache_free(fanotify_mark_cache, fsn_mark);
460 }
461 
fanotify_find_path(int dfd,const char __user * filename,struct path * path,unsigned int flags)462 static int fanotify_find_path(int dfd, const char __user *filename,
463 			      struct path *path, unsigned int flags)
464 {
465 	int ret;
466 
467 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
468 		 dfd, filename, flags);
469 
470 	if (filename == NULL) {
471 		struct fd f = fdget(dfd);
472 
473 		ret = -EBADF;
474 		if (!f.file)
475 			goto out;
476 
477 		ret = -ENOTDIR;
478 		if ((flags & FAN_MARK_ONLYDIR) &&
479 		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
480 			fdput(f);
481 			goto out;
482 		}
483 
484 		*path = f.file->f_path;
485 		path_get(path);
486 		fdput(f);
487 	} else {
488 		unsigned int lookup_flags = 0;
489 
490 		if (!(flags & FAN_MARK_DONT_FOLLOW))
491 			lookup_flags |= LOOKUP_FOLLOW;
492 		if (flags & FAN_MARK_ONLYDIR)
493 			lookup_flags |= LOOKUP_DIRECTORY;
494 
495 		ret = user_path_at(dfd, filename, lookup_flags, path);
496 		if (ret)
497 			goto out;
498 	}
499 
500 	/* you can only watch an inode if you have read permissions on it */
501 	ret = inode_permission2(path->mnt, path->dentry->d_inode, MAY_READ);
502 	if (ret)
503 		path_put(path);
504 out:
505 	return ret;
506 }
507 
fanotify_mark_remove_from_mask(struct fsnotify_mark * fsn_mark,__u32 mask,unsigned int flags,int * destroy)508 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
509 					    __u32 mask,
510 					    unsigned int flags,
511 					    int *destroy)
512 {
513 	__u32 oldmask = 0;
514 
515 	spin_lock(&fsn_mark->lock);
516 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
517 		__u32 tmask = fsn_mark->mask & ~mask;
518 
519 		if (flags & FAN_MARK_ONDIR)
520 			tmask &= ~FAN_ONDIR;
521 
522 		oldmask = fsn_mark->mask;
523 		fsnotify_set_mark_mask_locked(fsn_mark, tmask);
524 	} else {
525 		__u32 tmask = fsn_mark->ignored_mask & ~mask;
526 		if (flags & FAN_MARK_ONDIR)
527 			tmask &= ~FAN_ONDIR;
528 
529 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
530 	}
531 	*destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
532 	spin_unlock(&fsn_mark->lock);
533 
534 	return mask & oldmask;
535 }
536 
fanotify_remove_vfsmount_mark(struct fsnotify_group * group,struct vfsmount * mnt,__u32 mask,unsigned int flags)537 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
538 					 struct vfsmount *mnt, __u32 mask,
539 					 unsigned int flags)
540 {
541 	struct fsnotify_mark *fsn_mark = NULL;
542 	__u32 removed;
543 	int destroy_mark;
544 
545 	mutex_lock(&group->mark_mutex);
546 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
547 	if (!fsn_mark) {
548 		mutex_unlock(&group->mark_mutex);
549 		return -ENOENT;
550 	}
551 
552 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
553 						 &destroy_mark);
554 	if (destroy_mark)
555 		fsnotify_detach_mark(fsn_mark);
556 	mutex_unlock(&group->mark_mutex);
557 	if (destroy_mark)
558 		fsnotify_free_mark(fsn_mark);
559 
560 	fsnotify_put_mark(fsn_mark);
561 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
562 		fsnotify_recalc_vfsmount_mask(mnt);
563 
564 	return 0;
565 }
566 
fanotify_remove_inode_mark(struct fsnotify_group * group,struct inode * inode,__u32 mask,unsigned int flags)567 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
568 				      struct inode *inode, __u32 mask,
569 				      unsigned int flags)
570 {
571 	struct fsnotify_mark *fsn_mark = NULL;
572 	__u32 removed;
573 	int destroy_mark;
574 
575 	mutex_lock(&group->mark_mutex);
576 	fsn_mark = fsnotify_find_inode_mark(group, inode);
577 	if (!fsn_mark) {
578 		mutex_unlock(&group->mark_mutex);
579 		return -ENOENT;
580 	}
581 
582 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
583 						 &destroy_mark);
584 	if (destroy_mark)
585 		fsnotify_detach_mark(fsn_mark);
586 	mutex_unlock(&group->mark_mutex);
587 	if (destroy_mark)
588 		fsnotify_free_mark(fsn_mark);
589 
590 	/* matches the fsnotify_find_inode_mark() */
591 	fsnotify_put_mark(fsn_mark);
592 	if (removed & inode->i_fsnotify_mask)
593 		fsnotify_recalc_inode_mask(inode);
594 
595 	return 0;
596 }
597 
fanotify_mark_add_to_mask(struct fsnotify_mark * fsn_mark,__u32 mask,unsigned int flags)598 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
599 				       __u32 mask,
600 				       unsigned int flags)
601 {
602 	__u32 oldmask = -1;
603 
604 	spin_lock(&fsn_mark->lock);
605 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
606 		__u32 tmask = fsn_mark->mask | mask;
607 
608 		if (flags & FAN_MARK_ONDIR)
609 			tmask |= FAN_ONDIR;
610 
611 		oldmask = fsn_mark->mask;
612 		fsnotify_set_mark_mask_locked(fsn_mark, tmask);
613 	} else {
614 		__u32 tmask = fsn_mark->ignored_mask | mask;
615 		if (flags & FAN_MARK_ONDIR)
616 			tmask |= FAN_ONDIR;
617 
618 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
619 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
620 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
621 	}
622 	spin_unlock(&fsn_mark->lock);
623 
624 	return mask & ~oldmask;
625 }
626 
fanotify_add_new_mark(struct fsnotify_group * group,struct inode * inode,struct vfsmount * mnt)627 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
628 						   struct inode *inode,
629 						   struct vfsmount *mnt)
630 {
631 	struct fsnotify_mark *mark;
632 	int ret;
633 
634 	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
635 		return ERR_PTR(-ENOSPC);
636 
637 	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
638 	if (!mark)
639 		return ERR_PTR(-ENOMEM);
640 
641 	fsnotify_init_mark(mark, fanotify_free_mark);
642 	ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
643 	if (ret) {
644 		fsnotify_put_mark(mark);
645 		return ERR_PTR(ret);
646 	}
647 
648 	return mark;
649 }
650 
651 
fanotify_add_vfsmount_mark(struct fsnotify_group * group,struct vfsmount * mnt,__u32 mask,unsigned int flags)652 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
653 				      struct vfsmount *mnt, __u32 mask,
654 				      unsigned int flags)
655 {
656 	struct fsnotify_mark *fsn_mark;
657 	__u32 added;
658 
659 	mutex_lock(&group->mark_mutex);
660 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
661 	if (!fsn_mark) {
662 		fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
663 		if (IS_ERR(fsn_mark)) {
664 			mutex_unlock(&group->mark_mutex);
665 			return PTR_ERR(fsn_mark);
666 		}
667 	}
668 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
669 	mutex_unlock(&group->mark_mutex);
670 
671 	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
672 		fsnotify_recalc_vfsmount_mask(mnt);
673 
674 	fsnotify_put_mark(fsn_mark);
675 	return 0;
676 }
677 
fanotify_add_inode_mark(struct fsnotify_group * group,struct inode * inode,__u32 mask,unsigned int flags)678 static int fanotify_add_inode_mark(struct fsnotify_group *group,
679 				   struct inode *inode, __u32 mask,
680 				   unsigned int flags)
681 {
682 	struct fsnotify_mark *fsn_mark;
683 	__u32 added;
684 
685 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
686 
687 	/*
688 	 * If some other task has this inode open for write we should not add
689 	 * an ignored mark, unless that ignored mark is supposed to survive
690 	 * modification changes anyway.
691 	 */
692 	if ((flags & FAN_MARK_IGNORED_MASK) &&
693 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
694 	    (atomic_read(&inode->i_writecount) > 0))
695 		return 0;
696 
697 	mutex_lock(&group->mark_mutex);
698 	fsn_mark = fsnotify_find_inode_mark(group, inode);
699 	if (!fsn_mark) {
700 		fsn_mark = fanotify_add_new_mark(group, inode, NULL);
701 		if (IS_ERR(fsn_mark)) {
702 			mutex_unlock(&group->mark_mutex);
703 			return PTR_ERR(fsn_mark);
704 		}
705 	}
706 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
707 	mutex_unlock(&group->mark_mutex);
708 
709 	if (added & ~inode->i_fsnotify_mask)
710 		fsnotify_recalc_inode_mask(inode);
711 
712 	fsnotify_put_mark(fsn_mark);
713 	return 0;
714 }
715 
716 /* fanotify syscalls */
SYSCALL_DEFINE2(fanotify_init,unsigned int,flags,unsigned int,event_f_flags)717 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
718 {
719 	struct fsnotify_group *group;
720 	int f_flags, fd;
721 	struct user_struct *user;
722 	struct fanotify_event_info *oevent;
723 
724 	pr_debug("%s: flags=%d event_f_flags=%d\n",
725 		__func__, flags, event_f_flags);
726 
727 	if (!capable(CAP_SYS_ADMIN))
728 		return -EPERM;
729 
730 	if (flags & ~FAN_ALL_INIT_FLAGS)
731 		return -EINVAL;
732 
733 	if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
734 		return -EINVAL;
735 
736 	switch (event_f_flags & O_ACCMODE) {
737 	case O_RDONLY:
738 	case O_RDWR:
739 	case O_WRONLY:
740 		break;
741 	default:
742 		return -EINVAL;
743 	}
744 
745 	user = get_current_user();
746 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
747 		free_uid(user);
748 		return -EMFILE;
749 	}
750 
751 	f_flags = O_RDWR | FMODE_NONOTIFY;
752 	if (flags & FAN_CLOEXEC)
753 		f_flags |= O_CLOEXEC;
754 	if (flags & FAN_NONBLOCK)
755 		f_flags |= O_NONBLOCK;
756 
757 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
758 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
759 	if (IS_ERR(group)) {
760 		free_uid(user);
761 		return PTR_ERR(group);
762 	}
763 
764 	group->fanotify_data.user = user;
765 	atomic_inc(&user->fanotify_listeners);
766 
767 	oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
768 	if (unlikely(!oevent)) {
769 		fd = -ENOMEM;
770 		goto out_destroy_group;
771 	}
772 	group->overflow_event = &oevent->fse;
773 
774 	if (force_o_largefile())
775 		event_f_flags |= O_LARGEFILE;
776 	group->fanotify_data.f_flags = event_f_flags;
777 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
778 	init_waitqueue_head(&group->fanotify_data.access_waitq);
779 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
780 #endif
781 	switch (flags & FAN_ALL_CLASS_BITS) {
782 	case FAN_CLASS_NOTIF:
783 		group->priority = FS_PRIO_0;
784 		break;
785 	case FAN_CLASS_CONTENT:
786 		group->priority = FS_PRIO_1;
787 		break;
788 	case FAN_CLASS_PRE_CONTENT:
789 		group->priority = FS_PRIO_2;
790 		break;
791 	default:
792 		fd = -EINVAL;
793 		goto out_destroy_group;
794 	}
795 
796 	if (flags & FAN_UNLIMITED_QUEUE) {
797 		fd = -EPERM;
798 		if (!capable(CAP_SYS_ADMIN))
799 			goto out_destroy_group;
800 		group->max_events = UINT_MAX;
801 	} else {
802 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
803 	}
804 
805 	if (flags & FAN_UNLIMITED_MARKS) {
806 		fd = -EPERM;
807 		if (!capable(CAP_SYS_ADMIN))
808 			goto out_destroy_group;
809 		group->fanotify_data.max_marks = UINT_MAX;
810 	} else {
811 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
812 	}
813 
814 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
815 	if (fd < 0)
816 		goto out_destroy_group;
817 
818 	return fd;
819 
820 out_destroy_group:
821 	fsnotify_destroy_group(group);
822 	return fd;
823 }
824 
SYSCALL_DEFINE5(fanotify_mark,int,fanotify_fd,unsigned int,flags,__u64,mask,int,dfd,const char __user *,pathname)825 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
826 			      __u64, mask, int, dfd,
827 			      const char  __user *, pathname)
828 {
829 	struct inode *inode = NULL;
830 	struct vfsmount *mnt = NULL;
831 	struct fsnotify_group *group;
832 	struct fd f;
833 	struct path path;
834 	int ret;
835 
836 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
837 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
838 
839 	/* we only use the lower 32 bits as of right now. */
840 	if (mask & ((__u64)0xffffffff << 32))
841 		return -EINVAL;
842 
843 	if (flags & ~FAN_ALL_MARK_FLAGS)
844 		return -EINVAL;
845 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
846 	case FAN_MARK_ADD:		/* fallthrough */
847 	case FAN_MARK_REMOVE:
848 		if (!mask)
849 			return -EINVAL;
850 		break;
851 	case FAN_MARK_FLUSH:
852 		if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
853 			return -EINVAL;
854 		break;
855 	default:
856 		return -EINVAL;
857 	}
858 
859 	if (mask & FAN_ONDIR) {
860 		flags |= FAN_MARK_ONDIR;
861 		mask &= ~FAN_ONDIR;
862 	}
863 
864 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
865 	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
866 #else
867 	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
868 #endif
869 		return -EINVAL;
870 
871 	f = fdget(fanotify_fd);
872 	if (unlikely(!f.file))
873 		return -EBADF;
874 
875 	/* verify that this is indeed an fanotify instance */
876 	ret = -EINVAL;
877 	if (unlikely(f.file->f_op != &fanotify_fops))
878 		goto fput_and_out;
879 	group = f.file->private_data;
880 
881 	/*
882 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
883 	 * allowed to set permissions events.
884 	 */
885 	ret = -EINVAL;
886 	if (mask & FAN_ALL_PERM_EVENTS &&
887 	    group->priority == FS_PRIO_0)
888 		goto fput_and_out;
889 
890 	if (flags & FAN_MARK_FLUSH) {
891 		ret = 0;
892 		if (flags & FAN_MARK_MOUNT)
893 			fsnotify_clear_vfsmount_marks_by_group(group);
894 		else
895 			fsnotify_clear_inode_marks_by_group(group);
896 		goto fput_and_out;
897 	}
898 
899 	ret = fanotify_find_path(dfd, pathname, &path, flags);
900 	if (ret)
901 		goto fput_and_out;
902 
903 	/* inode held in place by reference to path; group by fget on fd */
904 	if (!(flags & FAN_MARK_MOUNT))
905 		inode = path.dentry->d_inode;
906 	else
907 		mnt = path.mnt;
908 
909 	/* create/update an inode mark */
910 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
911 	case FAN_MARK_ADD:
912 		if (flags & FAN_MARK_MOUNT)
913 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
914 		else
915 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
916 		break;
917 	case FAN_MARK_REMOVE:
918 		if (flags & FAN_MARK_MOUNT)
919 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
920 		else
921 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
922 		break;
923 	default:
924 		ret = -EINVAL;
925 	}
926 
927 	path_put(&path);
928 fput_and_out:
929 	fdput(f);
930 	return ret;
931 }
932 
933 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(fanotify_mark,int,fanotify_fd,unsigned int,flags,__u32,mask0,__u32,mask1,int,dfd,const char __user *,pathname)934 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
935 				int, fanotify_fd, unsigned int, flags,
936 				__u32, mask0, __u32, mask1, int, dfd,
937 				const char  __user *, pathname)
938 {
939 	return sys_fanotify_mark(fanotify_fd, flags,
940 #ifdef __BIG_ENDIAN
941 				((__u64)mask0 << 32) | mask1,
942 #else
943 				((__u64)mask1 << 32) | mask0,
944 #endif
945 				 dfd, pathname);
946 }
947 #endif
948 
949 /*
950  * fanotify_user_setup - Our initialization function.  Note that we cannot return
951  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
952  * must result in panic().
953  */
fanotify_user_setup(void)954 static int __init fanotify_user_setup(void)
955 {
956 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
957 	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
958 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
959 	fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
960 						SLAB_PANIC);
961 #endif
962 
963 	return 0;
964 }
965 device_initcall(fanotify_user_setup);
966