• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  fs/eventpoll.c (Efficient event retrieval implementation)
4  *  Copyright (C) 2001,...,2009	 Davide Libenzi
5  *
6  *  Davide Libenzi <davidel@xmailserver.org>
7  */
8 
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/sched/signal.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/signal.h>
15 #include <linux/errno.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/poll.h>
19 #include <linux/string.h>
20 #include <linux/list.h>
21 #include <linux/hash.h>
22 #include <linux/spinlock.h>
23 #include <linux/syscalls.h>
24 #include <linux/rbtree.h>
25 #include <linux/wait.h>
26 #include <linux/eventpoll.h>
27 #include <linux/mount.h>
28 #include <linux/bitops.h>
29 #include <linux/mutex.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/device.h>
32 #include <linux/freezer.h>
33 #include <linux/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/mman.h>
36 #include <linux/atomic.h>
37 #include <linux/proc_fs.h>
38 #include <linux/seq_file.h>
39 #include <linux/compat.h>
40 #include <linux/rculist.h>
41 #include <net/busy_poll.h>
42 
43 #include <trace/hooks/fs.h>
44 
45 /*
46  * LOCKING:
47  * There are three level of locking required by epoll :
48  *
49  * 1) epmutex (mutex)
50  * 2) ep->mtx (mutex)
51  * 3) ep->lock (rwlock)
52  *
53  * The acquire order is the one listed above, from 1 to 3.
54  * We need a rwlock (ep->lock) because we manipulate objects
55  * from inside the poll callback, that might be triggered from
56  * a wake_up() that in turn might be called from IRQ context.
57  * So we can't sleep inside the poll callback and hence we need
58  * a spinlock. During the event transfer loop (from kernel to
59  * user space) we could end up sleeping due a copy_to_user(), so
60  * we need a lock that will allow us to sleep. This lock is a
61  * mutex (ep->mtx). It is acquired during the event transfer loop,
62  * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
63  * Then we also need a global mutex to serialize eventpoll_release_file()
64  * and ep_free().
65  * This mutex is acquired by ep_free() during the epoll file
66  * cleanup path and it is also acquired by eventpoll_release_file()
67  * if a file has been pushed inside an epoll set and it is then
68  * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
69  * It is also acquired when inserting an epoll fd onto another epoll
70  * fd. We do this so that we walk the epoll tree and ensure that this
71  * insertion does not create a cycle of epoll file descriptors, which
72  * could lead to deadlock. We need a global mutex to prevent two
73  * simultaneous inserts (A into B and B into A) from racing and
74  * constructing a cycle without either insert observing that it is
75  * going to.
76  * It is necessary to acquire multiple "ep->mtx"es at once in the
77  * case when one epoll fd is added to another. In this case, we
78  * always acquire the locks in the order of nesting (i.e. after
79  * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
80  * before e2->mtx). Since we disallow cycles of epoll file
81  * descriptors, this ensures that the mutexes are well-ordered. In
82  * order to communicate this nesting to lockdep, when walking a tree
83  * of epoll file descriptors, we use the current recursion depth as
84  * the lockdep subkey.
85  * It is possible to drop the "ep->mtx" and to use the global
86  * mutex "epmutex" (together with "ep->lock") to have it working,
87  * but having "ep->mtx" will make the interface more scalable.
88  * Events that require holding "epmutex" are very rare, while for
89  * normal operations the epoll private "ep->mtx" will guarantee
90  * a better scalability.
91  */
92 
93 /* Epoll private bits inside the event mask */
94 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
95 
96 #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
97 
98 #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
99 				EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
100 
101 /* Maximum number of nesting allowed inside epoll sets */
102 #define EP_MAX_NESTS 4
103 
104 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
105 
106 #define EP_UNACTIVE_PTR ((void *) -1L)
107 
108 #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
109 
110 struct epoll_filefd {
111 	struct file *file;
112 	int fd;
113 } __packed;
114 
115 /* Wait structure used by the poll hooks */
116 struct eppoll_entry {
117 	/* List header used to link this structure to the "struct epitem" */
118 	struct eppoll_entry *next;
119 
120 	/* The "base" pointer is set to the container "struct epitem" */
121 	struct epitem *base;
122 
123 	/*
124 	 * Wait queue item that will be linked to the target file wait
125 	 * queue head.
126 	 */
127 	wait_queue_entry_t wait;
128 
129 	/* The wait queue head that linked the "wait" wait queue item */
130 	wait_queue_head_t *whead;
131 };
132 
133 /*
134  * Each file descriptor added to the eventpoll interface will
135  * have an entry of this type linked to the "rbr" RB tree.
136  * Avoid increasing the size of this struct, there can be many thousands
137  * of these on a server and we do not want this to take another cache line.
138  */
139 struct epitem {
140 	union {
141 		/* RB tree node links this structure to the eventpoll RB tree */
142 		struct rb_node rbn;
143 		/* Used to free the struct epitem */
144 		struct rcu_head rcu;
145 	};
146 
147 	/* List header used to link this structure to the eventpoll ready list */
148 	struct list_head rdllink;
149 
150 	/*
151 	 * Works together "struct eventpoll"->ovflist in keeping the
152 	 * single linked chain of items.
153 	 */
154 	struct epitem *next;
155 
156 	/* The file descriptor information this item refers to */
157 	struct epoll_filefd ffd;
158 
159 	/* List containing poll wait queues */
160 	struct eppoll_entry *pwqlist;
161 
162 	/* The "container" of this item */
163 	struct eventpoll *ep;
164 
165 	/* List header used to link this item to the "struct file" items list */
166 	struct hlist_node fllink;
167 
168 	/* wakeup_source used when EPOLLWAKEUP is set */
169 	struct wakeup_source __rcu *ws;
170 
171 	/* The structure that describe the interested events and the source fd */
172 	struct epoll_event event;
173 };
174 
175 /*
176  * This structure is stored inside the "private_data" member of the file
177  * structure and represents the main data structure for the eventpoll
178  * interface.
179  */
180 struct eventpoll {
181 	/*
182 	 * This mutex is used to ensure that files are not removed
183 	 * while epoll is using them. This is held during the event
184 	 * collection loop, the file cleanup path, the epoll file exit
185 	 * code and the ctl operations.
186 	 */
187 	struct mutex mtx;
188 
189 	/* Wait queue used by sys_epoll_wait() */
190 	wait_queue_head_t wq;
191 
192 	/* Wait queue used by file->poll() */
193 	wait_queue_head_t poll_wait;
194 
195 	/* List of ready file descriptors */
196 	struct list_head rdllist;
197 
198 	/* Lock which protects rdllist and ovflist */
199 	rwlock_t lock;
200 
201 	/* RB tree root used to store monitored fd structs */
202 	struct rb_root_cached rbr;
203 
204 	/*
205 	 * This is a single linked list that chains all the "struct epitem" that
206 	 * happened while transferring ready events to userspace w/out
207 	 * holding ->lock.
208 	 */
209 	struct epitem *ovflist;
210 
211 	/* wakeup_source used when ep_scan_ready_list is running */
212 	struct wakeup_source *ws;
213 
214 	/* The user that created the eventpoll descriptor */
215 	struct user_struct *user;
216 
217 	struct file *file;
218 
219 	/* used to optimize loop detection check */
220 	u64 gen;
221 	struct hlist_head refs;
222 
223 #ifdef CONFIG_NET_RX_BUSY_POLL
224 	/* used to track busy poll napi_id */
225 	unsigned int napi_id;
226 #endif
227 
228 #ifdef CONFIG_DEBUG_LOCK_ALLOC
229 	/* tracks wakeup nests for lockdep validation */
230 	u8 nests;
231 #endif
232 };
233 
234 /* Wrapper struct used by poll queueing */
235 struct ep_pqueue {
236 	poll_table pt;
237 	struct epitem *epi;
238 };
239 
240 /*
241  * Configuration options available inside /proc/sys/fs/epoll/
242  */
243 /* Maximum number of epoll watched descriptors, per user */
244 static long max_user_watches __read_mostly;
245 
246 /*
247  * This mutex is used to serialize ep_free() and eventpoll_release_file().
248  */
249 static DEFINE_MUTEX(epmutex);
250 
251 static u64 loop_check_gen = 0;
252 
253 /* Used to check for epoll file descriptor inclusion loops */
254 static struct eventpoll *inserting_into;
255 
256 /* Slab cache used to allocate "struct epitem" */
257 static struct kmem_cache *epi_cache __read_mostly;
258 
259 /* Slab cache used to allocate "struct eppoll_entry" */
260 static struct kmem_cache *pwq_cache __read_mostly;
261 
262 /*
263  * List of files with newly added links, where we may need to limit the number
264  * of emanating paths. Protected by the epmutex.
265  */
266 struct epitems_head {
267 	struct hlist_head epitems;
268 	struct epitems_head *next;
269 };
270 static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR;
271 
272 static struct kmem_cache *ephead_cache __read_mostly;
273 
free_ephead(struct epitems_head * head)274 static inline void free_ephead(struct epitems_head *head)
275 {
276 	if (head)
277 		kmem_cache_free(ephead_cache, head);
278 }
279 
list_file(struct file * file)280 static void list_file(struct file *file)
281 {
282 	struct epitems_head *head;
283 
284 	head = container_of(file->f_ep, struct epitems_head, epitems);
285 	if (!head->next) {
286 		head->next = tfile_check_list;
287 		tfile_check_list = head;
288 	}
289 }
290 
unlist_file(struct epitems_head * head)291 static void unlist_file(struct epitems_head *head)
292 {
293 	struct epitems_head *to_free = head;
294 	struct hlist_node *p = rcu_dereference(hlist_first_rcu(&head->epitems));
295 	if (p) {
296 		struct epitem *epi= container_of(p, struct epitem, fllink);
297 		spin_lock(&epi->ffd.file->f_lock);
298 		if (!hlist_empty(&head->epitems))
299 			to_free = NULL;
300 		head->next = NULL;
301 		spin_unlock(&epi->ffd.file->f_lock);
302 	}
303 	free_ephead(to_free);
304 }
305 
306 #ifdef CONFIG_SYSCTL
307 
308 #include <linux/sysctl.h>
309 
310 static long long_zero;
311 static long long_max = LONG_MAX;
312 
313 struct ctl_table epoll_table[] = {
314 	{
315 		.procname	= "max_user_watches",
316 		.data		= &max_user_watches,
317 		.maxlen		= sizeof(max_user_watches),
318 		.mode		= 0644,
319 		.proc_handler	= proc_doulongvec_minmax,
320 		.extra1		= &long_zero,
321 		.extra2		= &long_max,
322 	},
323 	{ }
324 };
325 #endif /* CONFIG_SYSCTL */
326 
327 static const struct file_operations eventpoll_fops;
328 
is_file_epoll(struct file * f)329 static inline int is_file_epoll(struct file *f)
330 {
331 	return f->f_op == &eventpoll_fops;
332 }
333 
334 /* Setup the structure that is used as key for the RB tree */
ep_set_ffd(struct epoll_filefd * ffd,struct file * file,int fd)335 static inline void ep_set_ffd(struct epoll_filefd *ffd,
336 			      struct file *file, int fd)
337 {
338 	ffd->file = file;
339 	ffd->fd = fd;
340 }
341 
342 /* Compare RB tree keys */
ep_cmp_ffd(struct epoll_filefd * p1,struct epoll_filefd * p2)343 static inline int ep_cmp_ffd(struct epoll_filefd *p1,
344 			     struct epoll_filefd *p2)
345 {
346 	return (p1->file > p2->file ? +1:
347 	        (p1->file < p2->file ? -1 : p1->fd - p2->fd));
348 }
349 
350 /* Tells us if the item is currently linked */
ep_is_linked(struct epitem * epi)351 static inline int ep_is_linked(struct epitem *epi)
352 {
353 	return !list_empty(&epi->rdllink);
354 }
355 
ep_pwq_from_wait(wait_queue_entry_t * p)356 static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
357 {
358 	return container_of(p, struct eppoll_entry, wait);
359 }
360 
361 /* Get the "struct epitem" from a wait queue pointer */
ep_item_from_wait(wait_queue_entry_t * p)362 static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
363 {
364 	return container_of(p, struct eppoll_entry, wait)->base;
365 }
366 
367 /**
368  * ep_events_available - Checks if ready events might be available.
369  *
370  * @ep: Pointer to the eventpoll context.
371  *
372  * Return: a value different than %zero if ready events are available,
373  *          or %zero otherwise.
374  */
ep_events_available(struct eventpoll * ep)375 static inline int ep_events_available(struct eventpoll *ep)
376 {
377 	return !list_empty_careful(&ep->rdllist) ||
378 		READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;
379 }
380 
381 #ifdef CONFIG_NET_RX_BUSY_POLL
ep_busy_loop_end(void * p,unsigned long start_time)382 static bool ep_busy_loop_end(void *p, unsigned long start_time)
383 {
384 	struct eventpoll *ep = p;
385 
386 	return ep_events_available(ep) || busy_loop_timeout(start_time);
387 }
388 
389 /*
390  * Busy poll if globally on and supporting sockets found && no events,
391  * busy loop will return if need_resched or ep_events_available.
392  *
393  * we must do our busy polling with irqs enabled
394  */
ep_busy_loop(struct eventpoll * ep,int nonblock)395 static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
396 {
397 	unsigned int napi_id = READ_ONCE(ep->napi_id);
398 
399 	if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) {
400 		napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false,
401 			       BUSY_POLL_BUDGET);
402 		if (ep_events_available(ep))
403 			return true;
404 		/*
405 		 * Busy poll timed out.  Drop NAPI ID for now, we can add
406 		 * it back in when we have moved a socket with a valid NAPI
407 		 * ID onto the ready list.
408 		 */
409 		ep->napi_id = 0;
410 		return false;
411 	}
412 	return false;
413 }
414 
415 /*
416  * Set epoll busy poll NAPI ID from sk.
417  */
ep_set_busy_poll_napi_id(struct epitem * epi)418 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
419 {
420 	struct eventpoll *ep;
421 	unsigned int napi_id;
422 	struct socket *sock;
423 	struct sock *sk;
424 
425 	if (!net_busy_loop_on())
426 		return;
427 
428 	sock = sock_from_file(epi->ffd.file);
429 	if (!sock)
430 		return;
431 
432 	sk = sock->sk;
433 	if (!sk)
434 		return;
435 
436 	napi_id = READ_ONCE(sk->sk_napi_id);
437 	ep = epi->ep;
438 
439 	/* Non-NAPI IDs can be rejected
440 	 *	or
441 	 * Nothing to do if we already have this ID
442 	 */
443 	if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
444 		return;
445 
446 	/* record NAPI ID for use in next busy poll */
447 	ep->napi_id = napi_id;
448 }
449 
450 #else
451 
ep_busy_loop(struct eventpoll * ep,int nonblock)452 static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
453 {
454 	return false;
455 }
456 
ep_set_busy_poll_napi_id(struct epitem * epi)457 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
458 {
459 }
460 
461 #endif /* CONFIG_NET_RX_BUSY_POLL */
462 
463 /*
464  * As described in commit 0ccf831cb lockdep: annotate epoll
465  * the use of wait queues used by epoll is done in a very controlled
466  * manner. Wake ups can nest inside each other, but are never done
467  * with the same locking. For example:
468  *
469  *   dfd = socket(...);
470  *   efd1 = epoll_create();
471  *   efd2 = epoll_create();
472  *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
473  *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
474  *
475  * When a packet arrives to the device underneath "dfd", the net code will
476  * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
477  * callback wakeup entry on that queue, and the wake_up() performed by the
478  * "dfd" net code will end up in ep_poll_callback(). At this point epoll
479  * (efd1) notices that it may have some event ready, so it needs to wake up
480  * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
481  * that ends up in another wake_up(), after having checked about the
482  * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
483  * avoid stack blasting.
484  *
485  * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
486  * this special case of epoll.
487  */
488 #ifdef CONFIG_DEBUG_LOCK_ALLOC
489 
ep_poll_safewake(struct eventpoll * ep,struct epitem * epi,unsigned pollflags)490 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
491 			     unsigned pollflags)
492 {
493 	struct eventpoll *ep_src;
494 	unsigned long flags;
495 	u8 nests = 0;
496 
497 	/*
498 	 * To set the subclass or nesting level for spin_lock_irqsave_nested()
499 	 * it might be natural to create a per-cpu nest count. However, since
500 	 * we can recurse on ep->poll_wait.lock, and a non-raw spinlock can
501 	 * schedule() in the -rt kernel, the per-cpu variable are no longer
502 	 * protected. Thus, we are introducing a per eventpoll nest field.
503 	 * If we are not being call from ep_poll_callback(), epi is NULL and
504 	 * we are at the first level of nesting, 0. Otherwise, we are being
505 	 * called from ep_poll_callback() and if a previous wakeup source is
506 	 * not an epoll file itself, we are at depth 1 since the wakeup source
507 	 * is depth 0. If the wakeup source is a previous epoll file in the
508 	 * wakeup chain then we use its nests value and record ours as
509 	 * nests + 1. The previous epoll file nests value is stable since its
510 	 * already holding its own poll_wait.lock.
511 	 */
512 	if (epi) {
513 		if ((is_file_epoll(epi->ffd.file))) {
514 			ep_src = epi->ffd.file->private_data;
515 			nests = ep_src->nests;
516 		} else {
517 			nests = 1;
518 		}
519 	}
520 	spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
521 	ep->nests = nests + 1;
522 	wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
523 	ep->nests = 0;
524 	spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
525 }
526 
527 #else
528 
ep_poll_safewake(struct eventpoll * ep,struct epitem * epi,unsigned pollflags)529 static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
530 			     unsigned pollflags)
531 {
532 	wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
533 }
534 
535 #endif
536 
ep_remove_wait_queue(struct eppoll_entry * pwq)537 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
538 {
539 	wait_queue_head_t *whead;
540 
541 	rcu_read_lock();
542 	/*
543 	 * If it is cleared by POLLFREE, it should be rcu-safe.
544 	 * If we read NULL we need a barrier paired with
545 	 * smp_store_release() in ep_poll_callback(), otherwise
546 	 * we rely on whead->lock.
547 	 */
548 	whead = smp_load_acquire(&pwq->whead);
549 	if (whead)
550 		remove_wait_queue(whead, &pwq->wait);
551 	rcu_read_unlock();
552 }
553 
554 /*
555  * This function unregisters poll callbacks from the associated file
556  * descriptor.  Must be called with "mtx" held (or "epmutex" if called from
557  * ep_free).
558  */
ep_unregister_pollwait(struct eventpoll * ep,struct epitem * epi)559 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
560 {
561 	struct eppoll_entry **p = &epi->pwqlist;
562 	struct eppoll_entry *pwq;
563 
564 	while ((pwq = *p) != NULL) {
565 		*p = pwq->next;
566 		ep_remove_wait_queue(pwq);
567 		kmem_cache_free(pwq_cache, pwq);
568 	}
569 }
570 
571 /* call only when ep->mtx is held */
ep_wakeup_source(struct epitem * epi)572 static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
573 {
574 	return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
575 }
576 
577 /* call only when ep->mtx is held */
ep_pm_stay_awake(struct epitem * epi)578 static inline void ep_pm_stay_awake(struct epitem *epi)
579 {
580 	struct wakeup_source *ws = ep_wakeup_source(epi);
581 
582 	if (ws)
583 		__pm_stay_awake(ws);
584 }
585 
ep_has_wakeup_source(struct epitem * epi)586 static inline bool ep_has_wakeup_source(struct epitem *epi)
587 {
588 	return rcu_access_pointer(epi->ws) ? true : false;
589 }
590 
591 /* call when ep->mtx cannot be held (ep_poll_callback) */
ep_pm_stay_awake_rcu(struct epitem * epi)592 static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
593 {
594 	struct wakeup_source *ws;
595 
596 	rcu_read_lock();
597 	ws = rcu_dereference(epi->ws);
598 	if (ws)
599 		__pm_stay_awake(ws);
600 	rcu_read_unlock();
601 }
602 
603 
604 /*
605  * ep->mutex needs to be held because we could be hit by
606  * eventpoll_release_file() and epoll_ctl().
607  */
ep_start_scan(struct eventpoll * ep,struct list_head * txlist)608 static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
609 {
610 	/*
611 	 * Steal the ready list, and re-init the original one to the
612 	 * empty list. Also, set ep->ovflist to NULL so that events
613 	 * happening while looping w/out locks, are not lost. We cannot
614 	 * have the poll callback to queue directly on ep->rdllist,
615 	 * because we want the "sproc" callback to be able to do it
616 	 * in a lockless way.
617 	 */
618 	lockdep_assert_irqs_enabled();
619 	write_lock_irq(&ep->lock);
620 	list_splice_init(&ep->rdllist, txlist);
621 	WRITE_ONCE(ep->ovflist, NULL);
622 	write_unlock_irq(&ep->lock);
623 }
624 
ep_done_scan(struct eventpoll * ep,struct list_head * txlist)625 static void ep_done_scan(struct eventpoll *ep,
626 			 struct list_head *txlist)
627 {
628 	struct epitem *epi, *nepi;
629 
630 	write_lock_irq(&ep->lock);
631 	/*
632 	 * During the time we spent inside the "sproc" callback, some
633 	 * other events might have been queued by the poll callback.
634 	 * We re-insert them inside the main ready-list here.
635 	 */
636 	for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
637 	     nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
638 		/*
639 		 * We need to check if the item is already in the list.
640 		 * During the "sproc" callback execution time, items are
641 		 * queued into ->ovflist but the "txlist" might already
642 		 * contain them, and the list_splice() below takes care of them.
643 		 */
644 		if (!ep_is_linked(epi)) {
645 			/*
646 			 * ->ovflist is LIFO, so we have to reverse it in order
647 			 * to keep in FIFO.
648 			 */
649 			list_add(&epi->rdllink, &ep->rdllist);
650 			ep_pm_stay_awake(epi);
651 		}
652 	}
653 	/*
654 	 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
655 	 * releasing the lock, events will be queued in the normal way inside
656 	 * ep->rdllist.
657 	 */
658 	WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);
659 
660 	/*
661 	 * Quickly re-inject items left on "txlist".
662 	 */
663 	list_splice(txlist, &ep->rdllist);
664 	__pm_relax(ep->ws);
665 
666 	if (!list_empty(&ep->rdllist)) {
667 		if (waitqueue_active(&ep->wq))
668 			wake_up(&ep->wq);
669 	}
670 
671 	write_unlock_irq(&ep->lock);
672 }
673 
epi_rcu_free(struct rcu_head * head)674 static void epi_rcu_free(struct rcu_head *head)
675 {
676 	struct epitem *epi = container_of(head, struct epitem, rcu);
677 	kmem_cache_free(epi_cache, epi);
678 }
679 
680 /*
681  * Removes a "struct epitem" from the eventpoll RB tree and deallocates
682  * all the associated resources. Must be called with "mtx" held.
683  */
ep_remove(struct eventpoll * ep,struct epitem * epi)684 static int ep_remove(struct eventpoll *ep, struct epitem *epi)
685 {
686 	struct file *file = epi->ffd.file;
687 	struct epitems_head *to_free;
688 	struct hlist_head *head;
689 
690 	lockdep_assert_irqs_enabled();
691 
692 	/*
693 	 * Removes poll wait queue hooks.
694 	 */
695 	ep_unregister_pollwait(ep, epi);
696 
697 	/* Remove the current item from the list of epoll hooks */
698 	spin_lock(&file->f_lock);
699 	to_free = NULL;
700 	head = file->f_ep;
701 	if (head->first == &epi->fllink && !epi->fllink.next) {
702 		file->f_ep = NULL;
703 		if (!is_file_epoll(file)) {
704 			struct epitems_head *v;
705 			v = container_of(head, struct epitems_head, epitems);
706 			if (!smp_load_acquire(&v->next))
707 				to_free = v;
708 		}
709 	}
710 	hlist_del_rcu(&epi->fllink);
711 	spin_unlock(&file->f_lock);
712 	free_ephead(to_free);
713 
714 	rb_erase_cached(&epi->rbn, &ep->rbr);
715 
716 	write_lock_irq(&ep->lock);
717 	if (ep_is_linked(epi))
718 		list_del_init(&epi->rdllink);
719 	write_unlock_irq(&ep->lock);
720 
721 	wakeup_source_unregister(ep_wakeup_source(epi));
722 	/*
723 	 * At this point it is safe to free the eventpoll item. Use the union
724 	 * field epi->rcu, since we are trying to minimize the size of
725 	 * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
726 	 * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
727 	 * use of the rbn field.
728 	 */
729 	call_rcu(&epi->rcu, epi_rcu_free);
730 
731 	percpu_counter_dec(&ep->user->epoll_watches);
732 
733 	return 0;
734 }
735 
ep_free(struct eventpoll * ep)736 static void ep_free(struct eventpoll *ep)
737 {
738 	struct rb_node *rbp;
739 	struct epitem *epi;
740 
741 	/* We need to release all tasks waiting for these file */
742 	if (waitqueue_active(&ep->poll_wait))
743 		ep_poll_safewake(ep, NULL, 0);
744 
745 	/*
746 	 * We need to lock this because we could be hit by
747 	 * eventpoll_release_file() while we're freeing the "struct eventpoll".
748 	 * We do not need to hold "ep->mtx" here because the epoll file
749 	 * is on the way to be removed and no one has references to it
750 	 * anymore. The only hit might come from eventpoll_release_file() but
751 	 * holding "epmutex" is sufficient here.
752 	 */
753 	mutex_lock(&epmutex);
754 
755 	/*
756 	 * Walks through the whole tree by unregistering poll callbacks.
757 	 */
758 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
759 		epi = rb_entry(rbp, struct epitem, rbn);
760 
761 		ep_unregister_pollwait(ep, epi);
762 		cond_resched();
763 	}
764 
765 	/*
766 	 * Walks through the whole tree by freeing each "struct epitem". At this
767 	 * point we are sure no poll callbacks will be lingering around, and also by
768 	 * holding "epmutex" we can be sure that no file cleanup code will hit
769 	 * us during this operation. So we can avoid the lock on "ep->lock".
770 	 * We do not need to lock ep->mtx, either, we only do it to prevent
771 	 * a lockdep warning.
772 	 */
773 	mutex_lock(&ep->mtx);
774 	while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
775 		epi = rb_entry(rbp, struct epitem, rbn);
776 		ep_remove(ep, epi);
777 		cond_resched();
778 	}
779 	mutex_unlock(&ep->mtx);
780 
781 	mutex_unlock(&epmutex);
782 	mutex_destroy(&ep->mtx);
783 	free_uid(ep->user);
784 	wakeup_source_unregister(ep->ws);
785 	kfree(ep);
786 }
787 
ep_eventpoll_release(struct inode * inode,struct file * file)788 static int ep_eventpoll_release(struct inode *inode, struct file *file)
789 {
790 	struct eventpoll *ep = file->private_data;
791 
792 	if (ep)
793 		ep_free(ep);
794 
795 	return 0;
796 }
797 
798 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);
799 
__ep_eventpoll_poll(struct file * file,poll_table * wait,int depth)800 static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth)
801 {
802 	struct eventpoll *ep = file->private_data;
803 	LIST_HEAD(txlist);
804 	struct epitem *epi, *tmp;
805 	poll_table pt;
806 	__poll_t res = 0;
807 
808 	init_poll_funcptr(&pt, NULL);
809 
810 	/* Insert inside our poll wait queue */
811 	poll_wait(file, &ep->poll_wait, wait);
812 
813 	/*
814 	 * Proceed to find out if wanted events are really available inside
815 	 * the ready list.
816 	 */
817 	mutex_lock_nested(&ep->mtx, depth);
818 	ep_start_scan(ep, &txlist);
819 	list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
820 		if (ep_item_poll(epi, &pt, depth + 1)) {
821 			res = EPOLLIN | EPOLLRDNORM;
822 			break;
823 		} else {
824 			/*
825 			 * Item has been dropped into the ready list by the poll
826 			 * callback, but it's not actually ready, as far as
827 			 * caller requested events goes. We can remove it here.
828 			 */
829 			__pm_relax(ep_wakeup_source(epi));
830 			list_del_init(&epi->rdllink);
831 		}
832 	}
833 	ep_done_scan(ep, &txlist);
834 	mutex_unlock(&ep->mtx);
835 	return res;
836 }
837 
838 /*
839  * Differs from ep_eventpoll_poll() in that internal callers already have
840  * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
841  * is correctly annotated.
842  */
ep_item_poll(const struct epitem * epi,poll_table * pt,int depth)843 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
844 				 int depth)
845 {
846 	struct file *file = epi->ffd.file;
847 	__poll_t res;
848 
849 	pt->_key = epi->event.events;
850 	if (!is_file_epoll(file))
851 		res = vfs_poll(file, pt);
852 	else
853 		res = __ep_eventpoll_poll(file, pt, depth);
854 	return res & epi->event.events;
855 }
856 
ep_eventpoll_poll(struct file * file,poll_table * wait)857 static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
858 {
859 	return __ep_eventpoll_poll(file, wait, 0);
860 }
861 
862 #ifdef CONFIG_PROC_FS
ep_show_fdinfo(struct seq_file * m,struct file * f)863 static void ep_show_fdinfo(struct seq_file *m, struct file *f)
864 {
865 	struct eventpoll *ep = f->private_data;
866 	struct rb_node *rbp;
867 
868 	mutex_lock(&ep->mtx);
869 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
870 		struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
871 		struct inode *inode = file_inode(epi->ffd.file);
872 
873 		seq_printf(m, "tfd: %8d events: %8x data: %16llx "
874 			   " pos:%lli ino:%lx sdev:%x\n",
875 			   epi->ffd.fd, epi->event.events,
876 			   (long long)epi->event.data,
877 			   (long long)epi->ffd.file->f_pos,
878 			   inode->i_ino, inode->i_sb->s_dev);
879 		if (seq_has_overflowed(m))
880 			break;
881 	}
882 	mutex_unlock(&ep->mtx);
883 }
884 #endif
885 
886 /* File callbacks that implement the eventpoll file behaviour */
887 static const struct file_operations eventpoll_fops = {
888 #ifdef CONFIG_PROC_FS
889 	.show_fdinfo	= ep_show_fdinfo,
890 #endif
891 	.release	= ep_eventpoll_release,
892 	.poll		= ep_eventpoll_poll,
893 	.llseek		= noop_llseek,
894 };
895 
896 /*
897  * This is called from eventpoll_release() to unlink files from the eventpoll
898  * interface. We need to have this facility to cleanup correctly files that are
899  * closed without being removed from the eventpoll interface.
900  */
eventpoll_release_file(struct file * file)901 void eventpoll_release_file(struct file *file)
902 {
903 	struct eventpoll *ep;
904 	struct epitem *epi;
905 	struct hlist_node *next;
906 
907 	/*
908 	 * We don't want to get "file->f_lock" because it is not
909 	 * necessary. It is not necessary because we're in the "struct file"
910 	 * cleanup path, and this means that no one is using this file anymore.
911 	 * So, for example, epoll_ctl() cannot hit here since if we reach this
912 	 * point, the file counter already went to zero and fget() would fail.
913 	 * The only hit might come from ep_free() but by holding the mutex
914 	 * will correctly serialize the operation. We do need to acquire
915 	 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
916 	 * from anywhere but ep_free().
917 	 *
918 	 * Besides, ep_remove() acquires the lock, so we can't hold it here.
919 	 */
920 	mutex_lock(&epmutex);
921 	if (unlikely(!file->f_ep)) {
922 		mutex_unlock(&epmutex);
923 		return;
924 	}
925 	hlist_for_each_entry_safe(epi, next, file->f_ep, fllink) {
926 		ep = epi->ep;
927 		mutex_lock_nested(&ep->mtx, 0);
928 		ep_remove(ep, epi);
929 		mutex_unlock(&ep->mtx);
930 	}
931 	mutex_unlock(&epmutex);
932 }
933 
ep_alloc(struct eventpoll ** pep)934 static int ep_alloc(struct eventpoll **pep)
935 {
936 	int error;
937 	struct user_struct *user;
938 	struct eventpoll *ep;
939 
940 	user = get_current_user();
941 	error = -ENOMEM;
942 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
943 	if (unlikely(!ep))
944 		goto free_uid;
945 
946 	mutex_init(&ep->mtx);
947 	rwlock_init(&ep->lock);
948 	init_waitqueue_head(&ep->wq);
949 	init_waitqueue_head(&ep->poll_wait);
950 	INIT_LIST_HEAD(&ep->rdllist);
951 	ep->rbr = RB_ROOT_CACHED;
952 	ep->ovflist = EP_UNACTIVE_PTR;
953 	ep->user = user;
954 
955 	*pep = ep;
956 
957 	return 0;
958 
959 free_uid:
960 	free_uid(user);
961 	return error;
962 }
963 
964 /*
965  * Search the file inside the eventpoll tree. The RB tree operations
966  * are protected by the "mtx" mutex, and ep_find() must be called with
967  * "mtx" held.
968  */
ep_find(struct eventpoll * ep,struct file * file,int fd)969 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
970 {
971 	int kcmp;
972 	struct rb_node *rbp;
973 	struct epitem *epi, *epir = NULL;
974 	struct epoll_filefd ffd;
975 
976 	ep_set_ffd(&ffd, file, fd);
977 	for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
978 		epi = rb_entry(rbp, struct epitem, rbn);
979 		kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
980 		if (kcmp > 0)
981 			rbp = rbp->rb_right;
982 		else if (kcmp < 0)
983 			rbp = rbp->rb_left;
984 		else {
985 			epir = epi;
986 			break;
987 		}
988 	}
989 
990 	return epir;
991 }
992 
993 #ifdef CONFIG_KCMP
ep_find_tfd(struct eventpoll * ep,int tfd,unsigned long toff)994 static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
995 {
996 	struct rb_node *rbp;
997 	struct epitem *epi;
998 
999 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1000 		epi = rb_entry(rbp, struct epitem, rbn);
1001 		if (epi->ffd.fd == tfd) {
1002 			if (toff == 0)
1003 				return epi;
1004 			else
1005 				toff--;
1006 		}
1007 		cond_resched();
1008 	}
1009 
1010 	return NULL;
1011 }
1012 
get_epoll_tfile_raw_ptr(struct file * file,int tfd,unsigned long toff)1013 struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
1014 				     unsigned long toff)
1015 {
1016 	struct file *file_raw;
1017 	struct eventpoll *ep;
1018 	struct epitem *epi;
1019 
1020 	if (!is_file_epoll(file))
1021 		return ERR_PTR(-EINVAL);
1022 
1023 	ep = file->private_data;
1024 
1025 	mutex_lock(&ep->mtx);
1026 	epi = ep_find_tfd(ep, tfd, toff);
1027 	if (epi)
1028 		file_raw = epi->ffd.file;
1029 	else
1030 		file_raw = ERR_PTR(-ENOENT);
1031 	mutex_unlock(&ep->mtx);
1032 
1033 	return file_raw;
1034 }
1035 #endif /* CONFIG_KCMP */
1036 
1037 /*
1038  * Adds a new entry to the tail of the list in a lockless way, i.e.
1039  * multiple CPUs are allowed to call this function concurrently.
1040  *
1041  * Beware: it is necessary to prevent any other modifications of the
1042  *         existing list until all changes are completed, in other words
1043  *         concurrent list_add_tail_lockless() calls should be protected
1044  *         with a read lock, where write lock acts as a barrier which
1045  *         makes sure all list_add_tail_lockless() calls are fully
1046  *         completed.
1047  *
1048  *        Also an element can be locklessly added to the list only in one
1049  *        direction i.e. either to the tail or to the head, otherwise
1050  *        concurrent access will corrupt the list.
1051  *
1052  * Return: %false if element has been already added to the list, %true
1053  * otherwise.
1054  */
list_add_tail_lockless(struct list_head * new,struct list_head * head)1055 static inline bool list_add_tail_lockless(struct list_head *new,
1056 					  struct list_head *head)
1057 {
1058 	struct list_head *prev;
1059 
1060 	/*
1061 	 * This is simple 'new->next = head' operation, but cmpxchg()
1062 	 * is used in order to detect that same element has been just
1063 	 * added to the list from another CPU: the winner observes
1064 	 * new->next == new.
1065 	 */
1066 	if (cmpxchg(&new->next, new, head) != new)
1067 		return false;
1068 
1069 	/*
1070 	 * Initially ->next of a new element must be updated with the head
1071 	 * (we are inserting to the tail) and only then pointers are atomically
1072 	 * exchanged.  XCHG guarantees memory ordering, thus ->next should be
1073 	 * updated before pointers are actually swapped and pointers are
1074 	 * swapped before prev->next is updated.
1075 	 */
1076 
1077 	prev = xchg(&head->prev, new);
1078 
1079 	/*
1080 	 * It is safe to modify prev->next and new->prev, because a new element
1081 	 * is added only to the tail and new->next is updated before XCHG.
1082 	 */
1083 
1084 	prev->next = new;
1085 	new->prev = prev;
1086 
1087 	return true;
1088 }
1089 
1090 /*
1091  * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
1092  * i.e. multiple CPUs are allowed to call this function concurrently.
1093  *
1094  * Return: %false if epi element has been already chained, %true otherwise.
1095  */
chain_epi_lockless(struct epitem * epi)1096 static inline bool chain_epi_lockless(struct epitem *epi)
1097 {
1098 	struct eventpoll *ep = epi->ep;
1099 
1100 	/* Fast preliminary check */
1101 	if (epi->next != EP_UNACTIVE_PTR)
1102 		return false;
1103 
1104 	/* Check that the same epi has not been just chained from another CPU */
1105 	if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1106 		return false;
1107 
1108 	/* Atomically exchange tail */
1109 	epi->next = xchg(&ep->ovflist, epi);
1110 
1111 	return true;
1112 }
1113 
1114 /*
1115  * This is the callback that is passed to the wait queue wakeup
1116  * mechanism. It is called by the stored file descriptors when they
1117  * have events to report.
1118  *
1119  * This callback takes a read lock in order not to contend with concurrent
1120  * events from another file descriptor, thus all modifications to ->rdllist
1121  * or ->ovflist are lockless.  Read lock is paired with the write lock from
1122  * ep_scan_ready_list(), which stops all list modifications and guarantees
1123  * that lists state is seen correctly.
1124  *
1125  * Another thing worth to mention is that ep_poll_callback() can be called
1126  * concurrently for the same @epi from different CPUs if poll table was inited
1127  * with several wait queues entries.  Plural wakeup from different CPUs of a
1128  * single wait queue is serialized by wq.lock, but the case when multiple wait
1129  * queues are used should be detected accordingly.  This is detected using
1130  * cmpxchg() operation.
1131  */
ep_poll_callback(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)1132 static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1133 {
1134 	int pwake = 0;
1135 	struct epitem *epi = ep_item_from_wait(wait);
1136 	struct eventpoll *ep = epi->ep;
1137 	__poll_t pollflags = key_to_poll(key);
1138 	unsigned long flags;
1139 	int ewake = 0;
1140 
1141 	read_lock_irqsave(&ep->lock, flags);
1142 
1143 	ep_set_busy_poll_napi_id(epi);
1144 
1145 	/*
1146 	 * If the event mask does not contain any poll(2) event, we consider the
1147 	 * descriptor to be disabled. This condition is likely the effect of the
1148 	 * EPOLLONESHOT bit that disables the descriptor when an event is received,
1149 	 * until the next EPOLL_CTL_MOD will be issued.
1150 	 */
1151 	if (!(epi->event.events & ~EP_PRIVATE_BITS))
1152 		goto out_unlock;
1153 
1154 	/*
1155 	 * Check the events coming with the callback. At this stage, not
1156 	 * every device reports the events in the "key" parameter of the
1157 	 * callback. We need to be able to handle both cases here, hence the
1158 	 * test for "key" != NULL before the event match test.
1159 	 */
1160 	if (pollflags && !(pollflags & epi->event.events))
1161 		goto out_unlock;
1162 
1163 	/*
1164 	 * If we are transferring events to userspace, we can hold no locks
1165 	 * (because we're accessing user memory, and because of linux f_op->poll()
1166 	 * semantics). All the events that happen during that period of time are
1167 	 * chained in ep->ovflist and requeued later on.
1168 	 */
1169 	if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
1170 		if (chain_epi_lockless(epi))
1171 			ep_pm_stay_awake_rcu(epi);
1172 	} else if (!ep_is_linked(epi)) {
1173 		/* In the usual case, add event to ready list. */
1174 		if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1175 			ep_pm_stay_awake_rcu(epi);
1176 	}
1177 
1178 	/*
1179 	 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1180 	 * wait list.
1181 	 */
1182 	if (waitqueue_active(&ep->wq)) {
1183 		if ((epi->event.events & EPOLLEXCLUSIVE) &&
1184 					!(pollflags & POLLFREE)) {
1185 			switch (pollflags & EPOLLINOUT_BITS) {
1186 			case EPOLLIN:
1187 				if (epi->event.events & EPOLLIN)
1188 					ewake = 1;
1189 				break;
1190 			case EPOLLOUT:
1191 				if (epi->event.events & EPOLLOUT)
1192 					ewake = 1;
1193 				break;
1194 			case 0:
1195 				ewake = 1;
1196 				break;
1197 			}
1198 		}
1199 		wake_up(&ep->wq);
1200 	}
1201 	if (waitqueue_active(&ep->poll_wait))
1202 		pwake++;
1203 
1204 out_unlock:
1205 	read_unlock_irqrestore(&ep->lock, flags);
1206 
1207 	/* We have to call this outside the lock */
1208 	if (pwake)
1209 		ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
1210 
1211 	if (!(epi->event.events & EPOLLEXCLUSIVE))
1212 		ewake = 1;
1213 
1214 	if (pollflags & POLLFREE) {
1215 		/*
1216 		 * If we race with ep_remove_wait_queue() it can miss
1217 		 * ->whead = NULL and do another remove_wait_queue() after
1218 		 * us, so we can't use __remove_wait_queue().
1219 		 */
1220 		list_del_init(&wait->entry);
1221 		/*
1222 		 * ->whead != NULL protects us from the race with ep_free()
1223 		 * or ep_remove(), ep_remove_wait_queue() takes whead->lock
1224 		 * held by the caller. Once we nullify it, nothing protects
1225 		 * ep/epi or even wait.
1226 		 */
1227 		smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
1228 	}
1229 
1230 	return ewake;
1231 }
1232 
1233 /*
1234  * This is the callback that is used to add our wait queue to the
1235  * target file wakeup lists.
1236  */
ep_ptable_queue_proc(struct file * file,wait_queue_head_t * whead,poll_table * pt)1237 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1238 				 poll_table *pt)
1239 {
1240 	struct ep_pqueue *epq = container_of(pt, struct ep_pqueue, pt);
1241 	struct epitem *epi = epq->epi;
1242 	struct eppoll_entry *pwq;
1243 
1244 	if (unlikely(!epi))	// an earlier allocation has failed
1245 		return;
1246 
1247 	pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL);
1248 	if (unlikely(!pwq)) {
1249 		epq->epi = NULL;
1250 		return;
1251 	}
1252 
1253 	init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1254 	pwq->whead = whead;
1255 	pwq->base = epi;
1256 	if (epi->event.events & EPOLLEXCLUSIVE)
1257 		add_wait_queue_exclusive(whead, &pwq->wait);
1258 	else
1259 		add_wait_queue(whead, &pwq->wait);
1260 	pwq->next = epi->pwqlist;
1261 	epi->pwqlist = pwq;
1262 }
1263 
ep_rbtree_insert(struct eventpoll * ep,struct epitem * epi)1264 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1265 {
1266 	int kcmp;
1267 	struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
1268 	struct epitem *epic;
1269 	bool leftmost = true;
1270 
1271 	while (*p) {
1272 		parent = *p;
1273 		epic = rb_entry(parent, struct epitem, rbn);
1274 		kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1275 		if (kcmp > 0) {
1276 			p = &parent->rb_right;
1277 			leftmost = false;
1278 		} else
1279 			p = &parent->rb_left;
1280 	}
1281 	rb_link_node(&epi->rbn, parent, p);
1282 	rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1283 }
1284 
1285 
1286 
1287 #define PATH_ARR_SIZE 5
1288 /*
1289  * These are the number paths of length 1 to 5, that we are allowing to emanate
1290  * from a single file of interest. For example, we allow 1000 paths of length
1291  * 1, to emanate from each file of interest. This essentially represents the
1292  * potential wakeup paths, which need to be limited in order to avoid massive
1293  * uncontrolled wakeup storms. The common use case should be a single ep which
1294  * is connected to n file sources. In this case each file source has 1 path
1295  * of length 1. Thus, the numbers below should be more than sufficient. These
1296  * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1297  * and delete can't add additional paths. Protected by the epmutex.
1298  */
1299 static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1300 static int path_count[PATH_ARR_SIZE];
1301 
path_count_inc(int nests)1302 static int path_count_inc(int nests)
1303 {
1304 	/* Allow an arbitrary number of depth 1 paths */
1305 	if (nests == 0)
1306 		return 0;
1307 
1308 	if (++path_count[nests] > path_limits[nests])
1309 		return -1;
1310 	return 0;
1311 }
1312 
path_count_init(void)1313 static void path_count_init(void)
1314 {
1315 	int i;
1316 
1317 	for (i = 0; i < PATH_ARR_SIZE; i++)
1318 		path_count[i] = 0;
1319 }
1320 
reverse_path_check_proc(struct hlist_head * refs,int depth)1321 static int reverse_path_check_proc(struct hlist_head *refs, int depth)
1322 {
1323 	int error = 0;
1324 	struct epitem *epi;
1325 
1326 	if (depth > EP_MAX_NESTS) /* too deep nesting */
1327 		return -1;
1328 
1329 	/* CTL_DEL can remove links here, but that can't increase our count */
1330 	hlist_for_each_entry_rcu(epi, refs, fllink) {
1331 		struct hlist_head *refs = &epi->ep->refs;
1332 		if (hlist_empty(refs))
1333 			error = path_count_inc(depth);
1334 		else
1335 			error = reverse_path_check_proc(refs, depth + 1);
1336 		if (error != 0)
1337 			break;
1338 	}
1339 	return error;
1340 }
1341 
1342 /**
1343  * reverse_path_check - The tfile_check_list is list of epitem_head, which have
1344  *                      links that are proposed to be newly added. We need to
1345  *                      make sure that those added links don't add too many
1346  *                      paths such that we will spend all our time waking up
1347  *                      eventpoll objects.
1348  *
1349  * Return: %zero if the proposed links don't create too many paths,
1350  *	    %-1 otherwise.
1351  */
reverse_path_check(void)1352 static int reverse_path_check(void)
1353 {
1354 	struct epitems_head *p;
1355 
1356 	for (p = tfile_check_list; p != EP_UNACTIVE_PTR; p = p->next) {
1357 		int error;
1358 		path_count_init();
1359 		rcu_read_lock();
1360 		error = reverse_path_check_proc(&p->epitems, 0);
1361 		rcu_read_unlock();
1362 		if (error)
1363 			return error;
1364 	}
1365 	return 0;
1366 }
1367 
ep_create_wakeup_source(struct epitem * epi)1368 static int ep_create_wakeup_source(struct epitem *epi)
1369 {
1370 	struct name_snapshot n;
1371 	struct wakeup_source *ws;
1372 	char ws_name[64];
1373 
1374 	strscpy(ws_name, "eventpoll", sizeof(ws_name));
1375 	trace_android_vh_ep_create_wakeup_source(ws_name, sizeof(ws_name));
1376 	if (!epi->ep->ws) {
1377 		epi->ep->ws = wakeup_source_register(NULL, ws_name);
1378 		if (!epi->ep->ws)
1379 			return -ENOMEM;
1380 	}
1381 
1382 	take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
1383 	strscpy(ws_name, n.name.name, sizeof(ws_name));
1384 	trace_android_vh_ep_create_wakeup_source(ws_name, sizeof(ws_name));
1385 	ws = wakeup_source_register(NULL, ws_name);
1386 	release_dentry_name_snapshot(&n);
1387 
1388 	if (!ws)
1389 		return -ENOMEM;
1390 	rcu_assign_pointer(epi->ws, ws);
1391 
1392 	return 0;
1393 }
1394 
1395 /* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
ep_destroy_wakeup_source(struct epitem * epi)1396 static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1397 {
1398 	struct wakeup_source *ws = ep_wakeup_source(epi);
1399 
1400 	RCU_INIT_POINTER(epi->ws, NULL);
1401 
1402 	/*
1403 	 * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1404 	 * used internally by wakeup_source_remove, too (called by
1405 	 * wakeup_source_unregister), so we cannot use call_rcu
1406 	 */
1407 	synchronize_rcu();
1408 	wakeup_source_unregister(ws);
1409 }
1410 
attach_epitem(struct file * file,struct epitem * epi)1411 static int attach_epitem(struct file *file, struct epitem *epi)
1412 {
1413 	struct epitems_head *to_free = NULL;
1414 	struct hlist_head *head = NULL;
1415 	struct eventpoll *ep = NULL;
1416 
1417 	if (is_file_epoll(file))
1418 		ep = file->private_data;
1419 
1420 	if (ep) {
1421 		head = &ep->refs;
1422 	} else if (!READ_ONCE(file->f_ep)) {
1423 allocate:
1424 		to_free = kmem_cache_zalloc(ephead_cache, GFP_KERNEL);
1425 		if (!to_free)
1426 			return -ENOMEM;
1427 		head = &to_free->epitems;
1428 	}
1429 	spin_lock(&file->f_lock);
1430 	if (!file->f_ep) {
1431 		if (unlikely(!head)) {
1432 			spin_unlock(&file->f_lock);
1433 			goto allocate;
1434 		}
1435 		file->f_ep = head;
1436 		to_free = NULL;
1437 	}
1438 	hlist_add_head_rcu(&epi->fllink, file->f_ep);
1439 	spin_unlock(&file->f_lock);
1440 	free_ephead(to_free);
1441 	return 0;
1442 }
1443 
1444 /*
1445  * Must be called with "mtx" held.
1446  */
ep_insert(struct eventpoll * ep,const struct epoll_event * event,struct file * tfile,int fd,int full_check)1447 static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1448 		     struct file *tfile, int fd, int full_check)
1449 {
1450 	int error, pwake = 0;
1451 	__poll_t revents;
1452 	struct epitem *epi;
1453 	struct ep_pqueue epq;
1454 	struct eventpoll *tep = NULL;
1455 
1456 	if (is_file_epoll(tfile))
1457 		tep = tfile->private_data;
1458 
1459 	lockdep_assert_irqs_enabled();
1460 
1461 	if (unlikely(percpu_counter_compare(&ep->user->epoll_watches,
1462 					    max_user_watches) >= 0))
1463 		return -ENOSPC;
1464 	percpu_counter_inc(&ep->user->epoll_watches);
1465 
1466 	if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) {
1467 		percpu_counter_dec(&ep->user->epoll_watches);
1468 		return -ENOMEM;
1469 	}
1470 
1471 	/* Item initialization follow here ... */
1472 	INIT_LIST_HEAD(&epi->rdllink);
1473 	epi->ep = ep;
1474 	ep_set_ffd(&epi->ffd, tfile, fd);
1475 	epi->event = *event;
1476 	epi->next = EP_UNACTIVE_PTR;
1477 
1478 	if (tep)
1479 		mutex_lock_nested(&tep->mtx, 1);
1480 	/* Add the current item to the list of active epoll hook for this file */
1481 	if (unlikely(attach_epitem(tfile, epi) < 0)) {
1482 		if (tep)
1483 			mutex_unlock(&tep->mtx);
1484 		kmem_cache_free(epi_cache, epi);
1485 		percpu_counter_dec(&ep->user->epoll_watches);
1486 		return -ENOMEM;
1487 	}
1488 
1489 	if (full_check && !tep)
1490 		list_file(tfile);
1491 
1492 	/*
1493 	 * Add the current item to the RB tree. All RB tree operations are
1494 	 * protected by "mtx", and ep_insert() is called with "mtx" held.
1495 	 */
1496 	ep_rbtree_insert(ep, epi);
1497 	if (tep)
1498 		mutex_unlock(&tep->mtx);
1499 
1500 	/* now check if we've created too many backpaths */
1501 	if (unlikely(full_check && reverse_path_check())) {
1502 		ep_remove(ep, epi);
1503 		return -EINVAL;
1504 	}
1505 
1506 	if (epi->event.events & EPOLLWAKEUP) {
1507 		error = ep_create_wakeup_source(epi);
1508 		if (error) {
1509 			ep_remove(ep, epi);
1510 			return error;
1511 		}
1512 	}
1513 
1514 	/* Initialize the poll table using the queue callback */
1515 	epq.epi = epi;
1516 	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1517 
1518 	/*
1519 	 * Attach the item to the poll hooks and get current event bits.
1520 	 * We can safely use the file* here because its usage count has
1521 	 * been increased by the caller of this function. Note that after
1522 	 * this operation completes, the poll callback can start hitting
1523 	 * the new item.
1524 	 */
1525 	revents = ep_item_poll(epi, &epq.pt, 1);
1526 
1527 	/*
1528 	 * We have to check if something went wrong during the poll wait queue
1529 	 * install process. Namely an allocation for a wait queue failed due
1530 	 * high memory pressure.
1531 	 */
1532 	if (unlikely(!epq.epi)) {
1533 		ep_remove(ep, epi);
1534 		return -ENOMEM;
1535 	}
1536 
1537 	/* We have to drop the new item inside our item list to keep track of it */
1538 	write_lock_irq(&ep->lock);
1539 
1540 	/* record NAPI ID of new item if present */
1541 	ep_set_busy_poll_napi_id(epi);
1542 
1543 	/* If the file is already "ready" we drop it inside the ready list */
1544 	if (revents && !ep_is_linked(epi)) {
1545 		list_add_tail(&epi->rdllink, &ep->rdllist);
1546 		ep_pm_stay_awake(epi);
1547 
1548 		/* Notify waiting tasks that events are available */
1549 		if (waitqueue_active(&ep->wq))
1550 			wake_up(&ep->wq);
1551 		if (waitqueue_active(&ep->poll_wait))
1552 			pwake++;
1553 	}
1554 
1555 	write_unlock_irq(&ep->lock);
1556 
1557 	/* We have to call this outside the lock */
1558 	if (pwake)
1559 		ep_poll_safewake(ep, NULL, 0);
1560 
1561 	return 0;
1562 }
1563 
1564 /*
1565  * Modify the interest event mask by dropping an event if the new mask
1566  * has a match in the current file status. Must be called with "mtx" held.
1567  */
ep_modify(struct eventpoll * ep,struct epitem * epi,const struct epoll_event * event)1568 static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1569 		     const struct epoll_event *event)
1570 {
1571 	int pwake = 0;
1572 	poll_table pt;
1573 
1574 	lockdep_assert_irqs_enabled();
1575 
1576 	init_poll_funcptr(&pt, NULL);
1577 
1578 	/*
1579 	 * Set the new event interest mask before calling f_op->poll();
1580 	 * otherwise we might miss an event that happens between the
1581 	 * f_op->poll() call and the new event set registering.
1582 	 */
1583 	epi->event.events = event->events; /* need barrier below */
1584 	epi->event.data = event->data; /* protected by mtx */
1585 	if (epi->event.events & EPOLLWAKEUP) {
1586 		if (!ep_has_wakeup_source(epi))
1587 			ep_create_wakeup_source(epi);
1588 	} else if (ep_has_wakeup_source(epi)) {
1589 		ep_destroy_wakeup_source(epi);
1590 	}
1591 
1592 	/*
1593 	 * The following barrier has two effects:
1594 	 *
1595 	 * 1) Flush epi changes above to other CPUs.  This ensures
1596 	 *    we do not miss events from ep_poll_callback if an
1597 	 *    event occurs immediately after we call f_op->poll().
1598 	 *    We need this because we did not take ep->lock while
1599 	 *    changing epi above (but ep_poll_callback does take
1600 	 *    ep->lock).
1601 	 *
1602 	 * 2) We also need to ensure we do not miss _past_ events
1603 	 *    when calling f_op->poll().  This barrier also
1604 	 *    pairs with the barrier in wq_has_sleeper (see
1605 	 *    comments for wq_has_sleeper).
1606 	 *
1607 	 * This barrier will now guarantee ep_poll_callback or f_op->poll
1608 	 * (or both) will notice the readiness of an item.
1609 	 */
1610 	smp_mb();
1611 
1612 	/*
1613 	 * Get current event bits. We can safely use the file* here because
1614 	 * its usage count has been increased by the caller of this function.
1615 	 * If the item is "hot" and it is not registered inside the ready
1616 	 * list, push it inside.
1617 	 */
1618 	if (ep_item_poll(epi, &pt, 1)) {
1619 		write_lock_irq(&ep->lock);
1620 		if (!ep_is_linked(epi)) {
1621 			list_add_tail(&epi->rdllink, &ep->rdllist);
1622 			ep_pm_stay_awake(epi);
1623 
1624 			/* Notify waiting tasks that events are available */
1625 			if (waitqueue_active(&ep->wq))
1626 				wake_up(&ep->wq);
1627 			if (waitqueue_active(&ep->poll_wait))
1628 				pwake++;
1629 		}
1630 		write_unlock_irq(&ep->lock);
1631 	}
1632 
1633 	/* We have to call this outside the lock */
1634 	if (pwake)
1635 		ep_poll_safewake(ep, NULL, 0);
1636 
1637 	return 0;
1638 }
1639 
ep_send_events(struct eventpoll * ep,struct epoll_event __user * events,int maxevents)1640 static int ep_send_events(struct eventpoll *ep,
1641 			  struct epoll_event __user *events, int maxevents)
1642 {
1643 	struct epitem *epi, *tmp;
1644 	LIST_HEAD(txlist);
1645 	poll_table pt;
1646 	int res = 0;
1647 
1648 	/*
1649 	 * Always short-circuit for fatal signals to allow threads to make a
1650 	 * timely exit without the chance of finding more events available and
1651 	 * fetching repeatedly.
1652 	 */
1653 	if (fatal_signal_pending(current))
1654 		return -EINTR;
1655 
1656 	init_poll_funcptr(&pt, NULL);
1657 
1658 	mutex_lock(&ep->mtx);
1659 	ep_start_scan(ep, &txlist);
1660 
1661 	/*
1662 	 * We can loop without lock because we are passed a task private list.
1663 	 * Items cannot vanish during the loop we are holding ep->mtx.
1664 	 */
1665 	list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
1666 		struct wakeup_source *ws;
1667 		__poll_t revents;
1668 
1669 		if (res >= maxevents)
1670 			break;
1671 
1672 		/*
1673 		 * Activate ep->ws before deactivating epi->ws to prevent
1674 		 * triggering auto-suspend here (in case we reactive epi->ws
1675 		 * below).
1676 		 *
1677 		 * This could be rearranged to delay the deactivation of epi->ws
1678 		 * instead, but then epi->ws would temporarily be out of sync
1679 		 * with ep_is_linked().
1680 		 */
1681 		ws = ep_wakeup_source(epi);
1682 		if (ws) {
1683 			if (ws->active)
1684 				__pm_stay_awake(ep->ws);
1685 			__pm_relax(ws);
1686 		}
1687 
1688 		list_del_init(&epi->rdllink);
1689 
1690 		/*
1691 		 * If the event mask intersect the caller-requested one,
1692 		 * deliver the event to userspace. Again, we are holding ep->mtx,
1693 		 * so no operations coming from userspace can change the item.
1694 		 */
1695 		revents = ep_item_poll(epi, &pt, 1);
1696 		if (!revents)
1697 			continue;
1698 
1699 		events = epoll_put_uevent(revents, epi->event.data, events);
1700 		if (!events) {
1701 			list_add(&epi->rdllink, &txlist);
1702 			ep_pm_stay_awake(epi);
1703 			if (!res)
1704 				res = -EFAULT;
1705 			break;
1706 		}
1707 		res++;
1708 		if (epi->event.events & EPOLLONESHOT)
1709 			epi->event.events &= EP_PRIVATE_BITS;
1710 		else if (!(epi->event.events & EPOLLET)) {
1711 			/*
1712 			 * If this file has been added with Level
1713 			 * Trigger mode, we need to insert back inside
1714 			 * the ready list, so that the next call to
1715 			 * epoll_wait() will check again the events
1716 			 * availability. At this point, no one can insert
1717 			 * into ep->rdllist besides us. The epoll_ctl()
1718 			 * callers are locked out by
1719 			 * ep_scan_ready_list() holding "mtx" and the
1720 			 * poll callback will queue them in ep->ovflist.
1721 			 */
1722 			list_add_tail(&epi->rdllink, &ep->rdllist);
1723 			ep_pm_stay_awake(epi);
1724 		}
1725 	}
1726 	ep_done_scan(ep, &txlist);
1727 	mutex_unlock(&ep->mtx);
1728 
1729 	return res;
1730 }
1731 
ep_timeout_to_timespec(struct timespec64 * to,long ms)1732 static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
1733 {
1734 	struct timespec64 now;
1735 
1736 	if (ms < 0)
1737 		return NULL;
1738 
1739 	if (!ms) {
1740 		to->tv_sec = 0;
1741 		to->tv_nsec = 0;
1742 		return to;
1743 	}
1744 
1745 	to->tv_sec = ms / MSEC_PER_SEC;
1746 	to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC);
1747 
1748 	ktime_get_ts64(&now);
1749 	*to = timespec64_add_safe(now, *to);
1750 	return to;
1751 }
1752 
1753 /*
1754  * autoremove_wake_function, but remove even on failure to wake up, because we
1755  * know that default_wake_function/ttwu will only fail if the thread is already
1756  * woken, and in that case the ep_poll loop will remove the entry anyways, not
1757  * try to reuse it.
1758  */
ep_autoremove_wake_function(struct wait_queue_entry * wq_entry,unsigned int mode,int sync,void * key)1759 static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
1760 				       unsigned int mode, int sync, void *key)
1761 {
1762 	int ret = default_wake_function(wq_entry, mode, sync, key);
1763 
1764 	/*
1765 	 * Pairs with list_empty_careful in ep_poll, and ensures future loop
1766 	 * iterations see the cause of this wakeup.
1767 	 */
1768 	list_del_init_careful(&wq_entry->entry);
1769 	return ret;
1770 }
1771 
1772 /**
1773  * ep_poll - Retrieves ready events, and delivers them to the caller-supplied
1774  *           event buffer.
1775  *
1776  * @ep: Pointer to the eventpoll context.
1777  * @events: Pointer to the userspace buffer where the ready events should be
1778  *          stored.
1779  * @maxevents: Size (in terms of number of events) of the caller event buffer.
1780  * @timeout: Maximum timeout for the ready events fetch operation, in
1781  *           timespec. If the timeout is zero, the function will not block,
1782  *           while if the @timeout ptr is NULL, the function will block
1783  *           until at least one event has been retrieved (or an error
1784  *           occurred).
1785  *
1786  * Return: the number of ready events which have been fetched, or an
1787  *          error code, in case of error.
1788  */
ep_poll(struct eventpoll * ep,struct epoll_event __user * events,int maxevents,struct timespec64 * timeout)1789 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1790 		   int maxevents, struct timespec64 *timeout)
1791 {
1792 	int res, eavail, timed_out = 0;
1793 	u64 slack = 0;
1794 	wait_queue_entry_t wait;
1795 	ktime_t expires, *to = NULL;
1796 
1797 	lockdep_assert_irqs_enabled();
1798 
1799 	if (timeout && (timeout->tv_sec | timeout->tv_nsec)) {
1800 		slack = select_estimate_accuracy(timeout);
1801 		to = &expires;
1802 		*to = timespec64_to_ktime(*timeout);
1803 	} else if (timeout) {
1804 		/*
1805 		 * Avoid the unnecessary trip to the wait queue loop, if the
1806 		 * caller specified a non blocking operation.
1807 		 */
1808 		timed_out = 1;
1809 	}
1810 
1811 	/*
1812 	 * This call is racy: We may or may not see events that are being added
1813 	 * to the ready list under the lock (e.g., in IRQ callbacks). For cases
1814 	 * with a non-zero timeout, this thread will check the ready list under
1815 	 * lock and will add to the wait queue.  For cases with a zero
1816 	 * timeout, the user by definition should not care and will have to
1817 	 * recheck again.
1818 	 */
1819 	eavail = ep_events_available(ep);
1820 
1821 	while (1) {
1822 		if (eavail) {
1823 			/*
1824 			 * Try to transfer events to user space. In case we get
1825 			 * 0 events and there's still timeout left over, we go
1826 			 * trying again in search of more luck.
1827 			 */
1828 			res = ep_send_events(ep, events, maxevents);
1829 			if (res)
1830 				return res;
1831 		}
1832 
1833 		if (timed_out)
1834 			return 0;
1835 
1836 		eavail = ep_busy_loop(ep, timed_out);
1837 		if (eavail)
1838 			continue;
1839 
1840 		if (signal_pending(current))
1841 			return -EINTR;
1842 
1843 		/*
1844 		 * Internally init_wait() uses autoremove_wake_function(),
1845 		 * thus wait entry is removed from the wait queue on each
1846 		 * wakeup. Why it is important? In case of several waiters
1847 		 * each new wakeup will hit the next waiter, giving it the
1848 		 * chance to harvest new event. Otherwise wakeup can be
1849 		 * lost. This is also good performance-wise, because on
1850 		 * normal wakeup path no need to call __remove_wait_queue()
1851 		 * explicitly, thus ep->lock is not taken, which halts the
1852 		 * event delivery.
1853 		 *
1854 		 * In fact, we now use an even more aggressive function that
1855 		 * unconditionally removes, because we don't reuse the wait
1856 		 * entry between loop iterations. This lets us also avoid the
1857 		 * performance issue if a process is killed, causing all of its
1858 		 * threads to wake up without being removed normally.
1859 		 */
1860 		init_wait(&wait);
1861 		wait.func = ep_autoremove_wake_function;
1862 
1863 		write_lock_irq(&ep->lock);
1864 		/*
1865 		 * Barrierless variant, waitqueue_active() is called under
1866 		 * the same lock on wakeup ep_poll_callback() side, so it
1867 		 * is safe to avoid an explicit barrier.
1868 		 */
1869 		__set_current_state(TASK_INTERRUPTIBLE);
1870 
1871 		/*
1872 		 * Do the final check under the lock. ep_scan_ready_list()
1873 		 * plays with two lists (->rdllist and ->ovflist) and there
1874 		 * is always a race when both lists are empty for short
1875 		 * period of time although events are pending, so lock is
1876 		 * important.
1877 		 */
1878 		eavail = ep_events_available(ep);
1879 		if (!eavail)
1880 			__add_wait_queue_exclusive(&ep->wq, &wait);
1881 
1882 		write_unlock_irq(&ep->lock);
1883 
1884 		if (!eavail)
1885 			timed_out = !freezable_schedule_hrtimeout_range(to, slack,
1886 									HRTIMER_MODE_ABS);
1887 		__set_current_state(TASK_RUNNING);
1888 
1889 		/*
1890 		 * We were woken up, thus go and try to harvest some events.
1891 		 * If timed out and still on the wait queue, recheck eavail
1892 		 * carefully under lock, below.
1893 		 */
1894 		eavail = 1;
1895 
1896 		if (!list_empty_careful(&wait.entry)) {
1897 			write_lock_irq(&ep->lock);
1898 			/*
1899 			 * If the thread timed out and is not on the wait queue,
1900 			 * it means that the thread was woken up after its
1901 			 * timeout expired before it could reacquire the lock.
1902 			 * Thus, when wait.entry is empty, it needs to harvest
1903 			 * events.
1904 			 */
1905 			if (timed_out)
1906 				eavail = list_empty(&wait.entry);
1907 			__remove_wait_queue(&ep->wq, &wait);
1908 			write_unlock_irq(&ep->lock);
1909 		}
1910 	}
1911 }
1912 
1913 /**
1914  * ep_loop_check_proc - verify that adding an epoll file inside another
1915  *                      epoll structure does not violate the constraints, in
1916  *                      terms of closed loops, or too deep chains (which can
1917  *                      result in excessive stack usage).
1918  *
1919  * @ep: the &struct eventpoll to be currently checked.
1920  * @depth: Current depth of the path being checked.
1921  *
1922  * Return: %zero if adding the epoll @file inside current epoll
1923  *          structure @ep does not violate the constraints, or %-1 otherwise.
1924  */
ep_loop_check_proc(struct eventpoll * ep,int depth)1925 static int ep_loop_check_proc(struct eventpoll *ep, int depth)
1926 {
1927 	int error = 0;
1928 	struct rb_node *rbp;
1929 	struct epitem *epi;
1930 
1931 	mutex_lock_nested(&ep->mtx, depth + 1);
1932 	ep->gen = loop_check_gen;
1933 	for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1934 		epi = rb_entry(rbp, struct epitem, rbn);
1935 		if (unlikely(is_file_epoll(epi->ffd.file))) {
1936 			struct eventpoll *ep_tovisit;
1937 			ep_tovisit = epi->ffd.file->private_data;
1938 			if (ep_tovisit->gen == loop_check_gen)
1939 				continue;
1940 			if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
1941 				error = -1;
1942 			else
1943 				error = ep_loop_check_proc(ep_tovisit, depth + 1);
1944 			if (error != 0)
1945 				break;
1946 		} else {
1947 			/*
1948 			 * If we've reached a file that is not associated with
1949 			 * an ep, then we need to check if the newly added
1950 			 * links are going to add too many wakeup paths. We do
1951 			 * this by adding it to the tfile_check_list, if it's
1952 			 * not already there, and calling reverse_path_check()
1953 			 * during ep_insert().
1954 			 */
1955 			list_file(epi->ffd.file);
1956 		}
1957 	}
1958 	mutex_unlock(&ep->mtx);
1959 
1960 	return error;
1961 }
1962 
1963 /**
1964  * ep_loop_check - Performs a check to verify that adding an epoll file (@to)
1965  *                 into another epoll file (represented by @ep) does not create
1966  *                 closed loops or too deep chains.
1967  *
1968  * @ep: Pointer to the epoll we are inserting into.
1969  * @to: Pointer to the epoll to be inserted.
1970  *
1971  * Return: %zero if adding the epoll @to inside the epoll @from
1972  * does not violate the constraints, or %-1 otherwise.
1973  */
ep_loop_check(struct eventpoll * ep,struct eventpoll * to)1974 static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
1975 {
1976 	inserting_into = ep;
1977 	return ep_loop_check_proc(to, 0);
1978 }
1979 
clear_tfile_check_list(void)1980 static void clear_tfile_check_list(void)
1981 {
1982 	rcu_read_lock();
1983 	while (tfile_check_list != EP_UNACTIVE_PTR) {
1984 		struct epitems_head *head = tfile_check_list;
1985 		tfile_check_list = head->next;
1986 		unlist_file(head);
1987 	}
1988 	rcu_read_unlock();
1989 }
1990 
1991 /*
1992  * Open an eventpoll file descriptor.
1993  */
do_epoll_create(int flags)1994 static int do_epoll_create(int flags)
1995 {
1996 	int error, fd;
1997 	struct eventpoll *ep = NULL;
1998 	struct file *file;
1999 
2000 	/* Check the EPOLL_* constant for consistency.  */
2001 	BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
2002 
2003 	if (flags & ~EPOLL_CLOEXEC)
2004 		return -EINVAL;
2005 	/*
2006 	 * Create the internal data structure ("struct eventpoll").
2007 	 */
2008 	error = ep_alloc(&ep);
2009 	if (error < 0)
2010 		return error;
2011 	/*
2012 	 * Creates all the items needed to setup an eventpoll file. That is,
2013 	 * a file structure and a free file descriptor.
2014 	 */
2015 	fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
2016 	if (fd < 0) {
2017 		error = fd;
2018 		goto out_free_ep;
2019 	}
2020 	file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
2021 				 O_RDWR | (flags & O_CLOEXEC));
2022 	if (IS_ERR(file)) {
2023 		error = PTR_ERR(file);
2024 		goto out_free_fd;
2025 	}
2026 	ep->file = file;
2027 	fd_install(fd, file);
2028 	return fd;
2029 
2030 out_free_fd:
2031 	put_unused_fd(fd);
2032 out_free_ep:
2033 	ep_free(ep);
2034 	return error;
2035 }
2036 
SYSCALL_DEFINE1(epoll_create1,int,flags)2037 SYSCALL_DEFINE1(epoll_create1, int, flags)
2038 {
2039 	return do_epoll_create(flags);
2040 }
2041 
SYSCALL_DEFINE1(epoll_create,int,size)2042 SYSCALL_DEFINE1(epoll_create, int, size)
2043 {
2044 	if (size <= 0)
2045 		return -EINVAL;
2046 
2047 	return do_epoll_create(0);
2048 }
2049 
epoll_mutex_lock(struct mutex * mutex,int depth,bool nonblock)2050 static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
2051 				   bool nonblock)
2052 {
2053 	if (!nonblock) {
2054 		mutex_lock_nested(mutex, depth);
2055 		return 0;
2056 	}
2057 	if (mutex_trylock(mutex))
2058 		return 0;
2059 	return -EAGAIN;
2060 }
2061 
do_epoll_ctl(int epfd,int op,int fd,struct epoll_event * epds,bool nonblock)2062 int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2063 		 bool nonblock)
2064 {
2065 	int error;
2066 	int full_check = 0;
2067 	struct fd f, tf;
2068 	struct eventpoll *ep;
2069 	struct epitem *epi;
2070 	struct eventpoll *tep = NULL;
2071 
2072 	error = -EBADF;
2073 	f = fdget(epfd);
2074 	if (!f.file)
2075 		goto error_return;
2076 
2077 	/* Get the "struct file *" for the target file */
2078 	tf = fdget(fd);
2079 	if (!tf.file)
2080 		goto error_fput;
2081 
2082 	/* The target file descriptor must support poll */
2083 	error = -EPERM;
2084 	if (!file_can_poll(tf.file))
2085 		goto error_tgt_fput;
2086 
2087 	/* Check if EPOLLWAKEUP is allowed */
2088 	if (ep_op_has_event(op))
2089 		ep_take_care_of_epollwakeup(epds);
2090 
2091 	/*
2092 	 * We have to check that the file structure underneath the file descriptor
2093 	 * the user passed to us _is_ an eventpoll file. And also we do not permit
2094 	 * adding an epoll file descriptor inside itself.
2095 	 */
2096 	error = -EINVAL;
2097 	if (f.file == tf.file || !is_file_epoll(f.file))
2098 		goto error_tgt_fput;
2099 
2100 	/*
2101 	 * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
2102 	 * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
2103 	 * Also, we do not currently supported nested exclusive wakeups.
2104 	 */
2105 	if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
2106 		if (op == EPOLL_CTL_MOD)
2107 			goto error_tgt_fput;
2108 		if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
2109 				(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
2110 			goto error_tgt_fput;
2111 	}
2112 
2113 	/*
2114 	 * At this point it is safe to assume that the "private_data" contains
2115 	 * our own data structure.
2116 	 */
2117 	ep = f.file->private_data;
2118 
2119 	/*
2120 	 * When we insert an epoll file descriptor inside another epoll file
2121 	 * descriptor, there is the chance of creating closed loops, which are
2122 	 * better be handled here, than in more critical paths. While we are
2123 	 * checking for loops we also determine the list of files reachable
2124 	 * and hang them on the tfile_check_list, so we can check that we
2125 	 * haven't created too many possible wakeup paths.
2126 	 *
2127 	 * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
2128 	 * the epoll file descriptor is attaching directly to a wakeup source,
2129 	 * unless the epoll file descriptor is nested. The purpose of taking the
2130 	 * 'epmutex' on add is to prevent complex toplogies such as loops and
2131 	 * deep wakeup paths from forming in parallel through multiple
2132 	 * EPOLL_CTL_ADD operations.
2133 	 */
2134 	error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2135 	if (error)
2136 		goto error_tgt_fput;
2137 	if (op == EPOLL_CTL_ADD) {
2138 		if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
2139 		    is_file_epoll(tf.file)) {
2140 			mutex_unlock(&ep->mtx);
2141 			error = epoll_mutex_lock(&epmutex, 0, nonblock);
2142 			if (error)
2143 				goto error_tgt_fput;
2144 			loop_check_gen++;
2145 			full_check = 1;
2146 			if (is_file_epoll(tf.file)) {
2147 				tep = tf.file->private_data;
2148 				error = -ELOOP;
2149 				if (ep_loop_check(ep, tep) != 0)
2150 					goto error_tgt_fput;
2151 			}
2152 			error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2153 			if (error)
2154 				goto error_tgt_fput;
2155 		}
2156 	}
2157 
2158 	/*
2159 	 * Try to lookup the file inside our RB tree. Since we grabbed "mtx"
2160 	 * above, we can be sure to be able to use the item looked up by
2161 	 * ep_find() till we release the mutex.
2162 	 */
2163 	epi = ep_find(ep, tf.file, fd);
2164 
2165 	error = -EINVAL;
2166 	switch (op) {
2167 	case EPOLL_CTL_ADD:
2168 		if (!epi) {
2169 			epds->events |= EPOLLERR | EPOLLHUP;
2170 			error = ep_insert(ep, epds, tf.file, fd, full_check);
2171 		} else
2172 			error = -EEXIST;
2173 		break;
2174 	case EPOLL_CTL_DEL:
2175 		if (epi)
2176 			error = ep_remove(ep, epi);
2177 		else
2178 			error = -ENOENT;
2179 		break;
2180 	case EPOLL_CTL_MOD:
2181 		if (epi) {
2182 			if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2183 				epds->events |= EPOLLERR | EPOLLHUP;
2184 				error = ep_modify(ep, epi, epds);
2185 			}
2186 		} else
2187 			error = -ENOENT;
2188 		break;
2189 	}
2190 	mutex_unlock(&ep->mtx);
2191 
2192 error_tgt_fput:
2193 	if (full_check) {
2194 		clear_tfile_check_list();
2195 		loop_check_gen++;
2196 		mutex_unlock(&epmutex);
2197 	}
2198 
2199 	fdput(tf);
2200 error_fput:
2201 	fdput(f);
2202 error_return:
2203 
2204 	return error;
2205 }
2206 
2207 /*
2208  * The following function implements the controller interface for
2209  * the eventpoll file that enables the insertion/removal/change of
2210  * file descriptors inside the interest set.
2211  */
SYSCALL_DEFINE4(epoll_ctl,int,epfd,int,op,int,fd,struct epoll_event __user *,event)2212 SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2213 		struct epoll_event __user *, event)
2214 {
2215 	struct epoll_event epds;
2216 
2217 	if (ep_op_has_event(op) &&
2218 	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
2219 		return -EFAULT;
2220 
2221 	return do_epoll_ctl(epfd, op, fd, &epds, false);
2222 }
2223 
2224 /*
2225  * Implement the event wait interface for the eventpoll file. It is the kernel
2226  * part of the user space epoll_wait(2).
2227  */
do_epoll_wait(int epfd,struct epoll_event __user * events,int maxevents,struct timespec64 * to)2228 static int do_epoll_wait(int epfd, struct epoll_event __user *events,
2229 			 int maxevents, struct timespec64 *to)
2230 {
2231 	int error;
2232 	struct fd f;
2233 	struct eventpoll *ep;
2234 
2235 	/* The maximum number of event must be greater than zero */
2236 	if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
2237 		return -EINVAL;
2238 
2239 	/* Verify that the area passed by the user is writeable */
2240 	if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
2241 		return -EFAULT;
2242 
2243 	/* Get the "struct file *" for the eventpoll file */
2244 	f = fdget(epfd);
2245 	if (!f.file)
2246 		return -EBADF;
2247 
2248 	/*
2249 	 * We have to check that the file structure underneath the fd
2250 	 * the user passed to us _is_ an eventpoll file.
2251 	 */
2252 	error = -EINVAL;
2253 	if (!is_file_epoll(f.file))
2254 		goto error_fput;
2255 
2256 	/*
2257 	 * At this point it is safe to assume that the "private_data" contains
2258 	 * our own data structure.
2259 	 */
2260 	ep = f.file->private_data;
2261 
2262 	/* Time to fish for events ... */
2263 	error = ep_poll(ep, events, maxevents, to);
2264 
2265 error_fput:
2266 	fdput(f);
2267 	return error;
2268 }
2269 
SYSCALL_DEFINE4(epoll_wait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout)2270 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
2271 		int, maxevents, int, timeout)
2272 {
2273 	struct timespec64 to;
2274 
2275 	return do_epoll_wait(epfd, events, maxevents,
2276 			     ep_timeout_to_timespec(&to, timeout));
2277 }
2278 
2279 /*
2280  * Implement the event wait interface for the eventpoll file. It is the kernel
2281  * part of the user space epoll_pwait(2).
2282  */
do_epoll_pwait(int epfd,struct epoll_event __user * events,int maxevents,struct timespec64 * to,const sigset_t __user * sigmask,size_t sigsetsize)2283 static int do_epoll_pwait(int epfd, struct epoll_event __user *events,
2284 			  int maxevents, struct timespec64 *to,
2285 			  const sigset_t __user *sigmask, size_t sigsetsize)
2286 {
2287 	int error;
2288 
2289 	/*
2290 	 * If the caller wants a certain signal mask to be set during the wait,
2291 	 * we apply it here.
2292 	 */
2293 	error = set_user_sigmask(sigmask, sigsetsize);
2294 	if (error)
2295 		return error;
2296 
2297 	error = do_epoll_wait(epfd, events, maxevents, to);
2298 
2299 	restore_saved_sigmask_unless(error == -EINTR);
2300 
2301 	return error;
2302 }
2303 
SYSCALL_DEFINE6(epoll_pwait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout,const sigset_t __user *,sigmask,size_t,sigsetsize)2304 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2305 		int, maxevents, int, timeout, const sigset_t __user *, sigmask,
2306 		size_t, sigsetsize)
2307 {
2308 	struct timespec64 to;
2309 
2310 	return do_epoll_pwait(epfd, events, maxevents,
2311 			      ep_timeout_to_timespec(&to, timeout),
2312 			      sigmask, sigsetsize);
2313 }
2314 
SYSCALL_DEFINE6(epoll_pwait2,int,epfd,struct epoll_event __user *,events,int,maxevents,const struct __kernel_timespec __user *,timeout,const sigset_t __user *,sigmask,size_t,sigsetsize)2315 SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events,
2316 		int, maxevents, const struct __kernel_timespec __user *, timeout,
2317 		const sigset_t __user *, sigmask, size_t, sigsetsize)
2318 {
2319 	struct timespec64 ts, *to = NULL;
2320 
2321 	if (timeout) {
2322 		if (get_timespec64(&ts, timeout))
2323 			return -EFAULT;
2324 		to = &ts;
2325 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2326 			return -EINVAL;
2327 	}
2328 
2329 	return do_epoll_pwait(epfd, events, maxevents, to,
2330 			      sigmask, sigsetsize);
2331 }
2332 
2333 #ifdef CONFIG_COMPAT
do_compat_epoll_pwait(int epfd,struct epoll_event __user * events,int maxevents,struct timespec64 * timeout,const compat_sigset_t __user * sigmask,compat_size_t sigsetsize)2334 static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events,
2335 				 int maxevents, struct timespec64 *timeout,
2336 				 const compat_sigset_t __user *sigmask,
2337 				 compat_size_t sigsetsize)
2338 {
2339 	long err;
2340 
2341 	/*
2342 	 * If the caller wants a certain signal mask to be set during the wait,
2343 	 * we apply it here.
2344 	 */
2345 	err = set_compat_user_sigmask(sigmask, sigsetsize);
2346 	if (err)
2347 		return err;
2348 
2349 	err = do_epoll_wait(epfd, events, maxevents, timeout);
2350 
2351 	restore_saved_sigmask_unless(err == -EINTR);
2352 
2353 	return err;
2354 }
2355 
COMPAT_SYSCALL_DEFINE6(epoll_pwait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout,const compat_sigset_t __user *,sigmask,compat_size_t,sigsetsize)2356 COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2357 		       struct epoll_event __user *, events,
2358 		       int, maxevents, int, timeout,
2359 		       const compat_sigset_t __user *, sigmask,
2360 		       compat_size_t, sigsetsize)
2361 {
2362 	struct timespec64 to;
2363 
2364 	return do_compat_epoll_pwait(epfd, events, maxevents,
2365 				     ep_timeout_to_timespec(&to, timeout),
2366 				     sigmask, sigsetsize);
2367 }
2368 
COMPAT_SYSCALL_DEFINE6(epoll_pwait2,int,epfd,struct epoll_event __user *,events,int,maxevents,const struct __kernel_timespec __user *,timeout,const compat_sigset_t __user *,sigmask,compat_size_t,sigsetsize)2369 COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd,
2370 		       struct epoll_event __user *, events,
2371 		       int, maxevents,
2372 		       const struct __kernel_timespec __user *, timeout,
2373 		       const compat_sigset_t __user *, sigmask,
2374 		       compat_size_t, sigsetsize)
2375 {
2376 	struct timespec64 ts, *to = NULL;
2377 
2378 	if (timeout) {
2379 		if (get_timespec64(&ts, timeout))
2380 			return -EFAULT;
2381 		to = &ts;
2382 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2383 			return -EINVAL;
2384 	}
2385 
2386 	return do_compat_epoll_pwait(epfd, events, maxevents, to,
2387 				     sigmask, sigsetsize);
2388 }
2389 
2390 #endif
2391 
eventpoll_init(void)2392 static int __init eventpoll_init(void)
2393 {
2394 	struct sysinfo si;
2395 
2396 	si_meminfo(&si);
2397 	/*
2398 	 * Allows top 4% of lomem to be allocated for epoll watches (per user).
2399 	 */
2400 	max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2401 		EP_ITEM_COST;
2402 	BUG_ON(max_user_watches < 0);
2403 
2404 	/*
2405 	 * We can have many thousands of epitems, so prevent this from
2406 	 * using an extra cache line on 64-bit (and smaller) CPUs
2407 	 */
2408 	BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2409 
2410 	/* Allocates slab cache used to allocate "struct epitem" items */
2411 	epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2412 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
2413 
2414 	/* Allocates slab cache used to allocate "struct eppoll_entry" */
2415 	pwq_cache = kmem_cache_create("eventpoll_pwq",
2416 		sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2417 
2418 	ephead_cache = kmem_cache_create("ep_head",
2419 		sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2420 
2421 	return 0;
2422 }
2423 fs_initcall(eventpoll_init);
2424