• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  fs/eventpoll.c (Efficient event retrieval implementation)
3  *  Copyright (C) 2001,...,2009	 Davide Libenzi
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  Davide Libenzi <davidel@xmailserver.org>
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/file.h>
19 #include <linux/signal.h>
20 #include <linux/errno.h>
21 #include <linux/mm.h>
22 #include <linux/slab.h>
23 #include <linux/poll.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/hash.h>
27 #include <linux/spinlock.h>
28 #include <linux/syscalls.h>
29 #include <linux/rbtree.h>
30 #include <linux/wait.h>
31 #include <linux/eventpoll.h>
32 #include <linux/mount.h>
33 #include <linux/bitops.h>
34 #include <linux/mutex.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/device.h>
37 #include <linux/freezer.h>
38 #include <asm/uaccess.h>
39 #include <asm/io.h>
40 #include <asm/mman.h>
41 #include <linux/atomic.h>
42 
43 /*
44  * LOCKING:
45  * There are three level of locking required by epoll :
46  *
47  * 1) epmutex (mutex)
48  * 2) ep->mtx (mutex)
49  * 3) ep->lock (spinlock)
50  *
51  * The acquire order is the one listed above, from 1 to 3.
52  * We need a spinlock (ep->lock) because we manipulate objects
53  * from inside the poll callback, that might be triggered from
54  * a wake_up() that in turn might be called from IRQ context.
55  * So we can't sleep inside the poll callback and hence we need
56  * a spinlock. During the event transfer loop (from kernel to
57  * user space) we could end up sleeping due a copy_to_user(), so
58  * we need a lock that will allow us to sleep. This lock is a
59  * mutex (ep->mtx). It is acquired during the event transfer loop,
60  * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
61  * Then we also need a global mutex to serialize eventpoll_release_file()
62  * and ep_free().
63  * This mutex is acquired by ep_free() during the epoll file
64  * cleanup path and it is also acquired by eventpoll_release_file()
65  * if a file has been pushed inside an epoll set and it is then
66  * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
67  * It is also acquired when inserting an epoll fd onto another epoll
68  * fd. We do this so that we walk the epoll tree and ensure that this
69  * insertion does not create a cycle of epoll file descriptors, which
70  * could lead to deadlock. We need a global mutex to prevent two
71  * simultaneous inserts (A into B and B into A) from racing and
72  * constructing a cycle without either insert observing that it is
73  * going to.
74  * It is necessary to acquire multiple "ep->mtx"es at once in the
75  * case when one epoll fd is added to another. In this case, we
76  * always acquire the locks in the order of nesting (i.e. after
77  * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
78  * before e2->mtx). Since we disallow cycles of epoll file
79  * descriptors, this ensures that the mutexes are well-ordered. In
80  * order to communicate this nesting to lockdep, when walking a tree
81  * of epoll file descriptors, we use the current recursion depth as
82  * the lockdep subkey.
83  * It is possible to drop the "ep->mtx" and to use the global
84  * mutex "epmutex" (together with "ep->lock") to have it working,
85  * but having "ep->mtx" will make the interface more scalable.
86  * Events that require holding "epmutex" are very rare, while for
87  * normal operations the epoll private "ep->mtx" will guarantee
88  * a better scalability.
89  */
90 
91 /* Epoll private bits inside the event mask */
92 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET)
93 
94 /* Maximum number of nesting allowed inside epoll sets */
95 #define EP_MAX_NESTS 4
96 
97 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
98 
99 #define EP_UNACTIVE_PTR ((void *) -1L)
100 
101 #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
102 
103 struct epoll_filefd {
104 	struct file *file;
105 	int fd;
106 };
107 
108 /*
109  * Structure used to track possible nested calls, for too deep recursions
110  * and loop cycles.
111  */
112 struct nested_call_node {
113 	struct list_head llink;
114 	void *cookie;
115 	void *ctx;
116 };
117 
118 /*
119  * This structure is used as collector for nested calls, to check for
120  * maximum recursion dept and loop cycles.
121  */
122 struct nested_calls {
123 	struct list_head tasks_call_list;
124 	spinlock_t lock;
125 };
126 
127 /*
128  * Each file descriptor added to the eventpoll interface will
129  * have an entry of this type linked to the "rbr" RB tree.
130  */
131 struct epitem {
132 	/* RB tree node used to link this structure to the eventpoll RB tree */
133 	struct rb_node rbn;
134 
135 	/* List header used to link this structure to the eventpoll ready list */
136 	struct list_head rdllink;
137 
138 	/*
139 	 * Works together "struct eventpoll"->ovflist in keeping the
140 	 * single linked chain of items.
141 	 */
142 	struct epitem *next;
143 
144 	/* The file descriptor information this item refers to */
145 	struct epoll_filefd ffd;
146 
147 	/* Number of active wait queue attached to poll operations */
148 	int nwait;
149 
150 	/* List containing poll wait queues */
151 	struct list_head pwqlist;
152 
153 	/* The "container" of this item */
154 	struct eventpoll *ep;
155 
156 	/* List header used to link this item to the "struct file" items list */
157 	struct list_head fllink;
158 
159 	/* wakeup_source used when EPOLLWAKEUP is set */
160 	struct wakeup_source *ws;
161 
162 	/* The structure that describe the interested events and the source fd */
163 	struct epoll_event event;
164 };
165 
166 /*
167  * This structure is stored inside the "private_data" member of the file
168  * structure and represents the main data structure for the eventpoll
169  * interface.
170  */
171 struct eventpoll {
172 	/* Protect the access to this structure */
173 	spinlock_t lock;
174 
175 	/*
176 	 * This mutex is used to ensure that files are not removed
177 	 * while epoll is using them. This is held during the event
178 	 * collection loop, the file cleanup path, the epoll file exit
179 	 * code and the ctl operations.
180 	 */
181 	struct mutex mtx;
182 
183 	/* Wait queue used by sys_epoll_wait() */
184 	wait_queue_head_t wq;
185 
186 	/* Wait queue used by file->poll() */
187 	wait_queue_head_t poll_wait;
188 
189 	/* List of ready file descriptors */
190 	struct list_head rdllist;
191 
192 	/* RB tree root used to store monitored fd structs */
193 	struct rb_root rbr;
194 
195 	/*
196 	 * This is a single linked list that chains all the "struct epitem" that
197 	 * happened while transferring ready events to userspace w/out
198 	 * holding ->lock.
199 	 */
200 	struct epitem *ovflist;
201 
202 	/* wakeup_source used when ep_scan_ready_list is running */
203 	struct wakeup_source *ws;
204 
205 	/* The user that created the eventpoll descriptor */
206 	struct user_struct *user;
207 
208 	struct file *file;
209 
210 	/* used to optimize loop detection check */
211 	int visited;
212 	struct list_head visited_list_link;
213 };
214 
215 /* Wait structure used by the poll hooks */
216 struct eppoll_entry {
217 	/* List header used to link this structure to the "struct epitem" */
218 	struct list_head llink;
219 
220 	/* The "base" pointer is set to the container "struct epitem" */
221 	struct epitem *base;
222 
223 	/*
224 	 * Wait queue item that will be linked to the target file wait
225 	 * queue head.
226 	 */
227 	wait_queue_t wait;
228 
229 	/* The wait queue head that linked the "wait" wait queue item */
230 	wait_queue_head_t *whead;
231 };
232 
233 /* Wrapper struct used by poll queueing */
234 struct ep_pqueue {
235 	poll_table pt;
236 	struct epitem *epi;
237 };
238 
239 /* Used by the ep_send_events() function as callback private data */
240 struct ep_send_events_data {
241 	int maxevents;
242 	struct epoll_event __user *events;
243 };
244 
245 /*
246  * Configuration options available inside /proc/sys/fs/epoll/
247  */
248 /* Maximum number of epoll watched descriptors, per user */
249 static long max_user_watches __read_mostly;
250 
251 /*
252  * This mutex is used to serialize ep_free() and eventpoll_release_file().
253  */
254 static DEFINE_MUTEX(epmutex);
255 
256 /* Used to check for epoll file descriptor inclusion loops */
257 static struct nested_calls poll_loop_ncalls;
258 
259 /* Used for safe wake up implementation */
260 static struct nested_calls poll_safewake_ncalls;
261 
262 /* Used to call file's f_op->poll() under the nested calls boundaries */
263 static struct nested_calls poll_readywalk_ncalls;
264 
265 /* Slab cache used to allocate "struct epitem" */
266 static struct kmem_cache *epi_cache __read_mostly;
267 
268 /* Slab cache used to allocate "struct eppoll_entry" */
269 static struct kmem_cache *pwq_cache __read_mostly;
270 
271 /* Visited nodes during ep_loop_check(), so we can unset them when we finish */
272 static LIST_HEAD(visited_list);
273 
274 /*
275  * List of files with newly added links, where we may need to limit the number
276  * of emanating paths. Protected by the epmutex.
277  */
278 static LIST_HEAD(tfile_check_list);
279 
280 #ifdef CONFIG_SYSCTL
281 
282 #include <linux/sysctl.h>
283 
284 static long zero;
285 static long long_max = LONG_MAX;
286 
287 ctl_table epoll_table[] = {
288 	{
289 		.procname	= "max_user_watches",
290 		.data		= &max_user_watches,
291 		.maxlen		= sizeof(max_user_watches),
292 		.mode		= 0644,
293 		.proc_handler	= proc_doulongvec_minmax,
294 		.extra1		= &zero,
295 		.extra2		= &long_max,
296 	},
297 	{ }
298 };
299 #endif /* CONFIG_SYSCTL */
300 
301 static const struct file_operations eventpoll_fops;
302 
is_file_epoll(struct file * f)303 static inline int is_file_epoll(struct file *f)
304 {
305 	return f->f_op == &eventpoll_fops;
306 }
307 
308 /* Setup the structure that is used as key for the RB tree */
ep_set_ffd(struct epoll_filefd * ffd,struct file * file,int fd)309 static inline void ep_set_ffd(struct epoll_filefd *ffd,
310 			      struct file *file, int fd)
311 {
312 	ffd->file = file;
313 	ffd->fd = fd;
314 }
315 
316 /* Compare RB tree keys */
ep_cmp_ffd(struct epoll_filefd * p1,struct epoll_filefd * p2)317 static inline int ep_cmp_ffd(struct epoll_filefd *p1,
318 			     struct epoll_filefd *p2)
319 {
320 	return (p1->file > p2->file ? +1:
321 	        (p1->file < p2->file ? -1 : p1->fd - p2->fd));
322 }
323 
324 /* Tells us if the item is currently linked */
ep_is_linked(struct list_head * p)325 static inline int ep_is_linked(struct list_head *p)
326 {
327 	return !list_empty(p);
328 }
329 
ep_pwq_from_wait(wait_queue_t * p)330 static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
331 {
332 	return container_of(p, struct eppoll_entry, wait);
333 }
334 
335 /* Get the "struct epitem" from a wait queue pointer */
ep_item_from_wait(wait_queue_t * p)336 static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
337 {
338 	return container_of(p, struct eppoll_entry, wait)->base;
339 }
340 
341 /* Get the "struct epitem" from an epoll queue wrapper */
ep_item_from_epqueue(poll_table * p)342 static inline struct epitem *ep_item_from_epqueue(poll_table *p)
343 {
344 	return container_of(p, struct ep_pqueue, pt)->epi;
345 }
346 
347 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
ep_op_has_event(int op)348 static inline int ep_op_has_event(int op)
349 {
350 	return op != EPOLL_CTL_DEL;
351 }
352 
353 /* Initialize the poll safe wake up structure */
ep_nested_calls_init(struct nested_calls * ncalls)354 static void ep_nested_calls_init(struct nested_calls *ncalls)
355 {
356 	INIT_LIST_HEAD(&ncalls->tasks_call_list);
357 	spin_lock_init(&ncalls->lock);
358 }
359 
360 /**
361  * ep_events_available - Checks if ready events might be available.
362  *
363  * @ep: Pointer to the eventpoll context.
364  *
365  * Returns: Returns a value different than zero if ready events are available,
366  *          or zero otherwise.
367  */
ep_events_available(struct eventpoll * ep)368 static inline int ep_events_available(struct eventpoll *ep)
369 {
370 	return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
371 }
372 
373 /**
374  * ep_call_nested - Perform a bound (possibly) nested call, by checking
375  *                  that the recursion limit is not exceeded, and that
376  *                  the same nested call (by the meaning of same cookie) is
377  *                  no re-entered.
378  *
379  * @ncalls: Pointer to the nested_calls structure to be used for this call.
380  * @max_nests: Maximum number of allowed nesting calls.
381  * @nproc: Nested call core function pointer.
382  * @priv: Opaque data to be passed to the @nproc callback.
383  * @cookie: Cookie to be used to identify this nested call.
384  * @ctx: This instance context.
385  *
386  * Returns: Returns the code returned by the @nproc callback, or -1 if
387  *          the maximum recursion limit has been exceeded.
388  */
ep_call_nested(struct nested_calls * ncalls,int max_nests,int (* nproc)(void *,void *,int),void * priv,void * cookie,void * ctx)389 static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
390 			  int (*nproc)(void *, void *, int), void *priv,
391 			  void *cookie, void *ctx)
392 {
393 	int error, call_nests = 0;
394 	unsigned long flags;
395 	struct list_head *lsthead = &ncalls->tasks_call_list;
396 	struct nested_call_node *tncur;
397 	struct nested_call_node tnode;
398 
399 	spin_lock_irqsave(&ncalls->lock, flags);
400 
401 	/*
402 	 * Try to see if the current task is already inside this wakeup call.
403 	 * We use a list here, since the population inside this set is always
404 	 * very much limited.
405 	 */
406 	list_for_each_entry(tncur, lsthead, llink) {
407 		if (tncur->ctx == ctx &&
408 		    (tncur->cookie == cookie || ++call_nests > max_nests)) {
409 			/*
410 			 * Ops ... loop detected or maximum nest level reached.
411 			 * We abort this wake by breaking the cycle itself.
412 			 */
413 			error = -1;
414 			goto out_unlock;
415 		}
416 	}
417 
418 	/* Add the current task and cookie to the list */
419 	tnode.ctx = ctx;
420 	tnode.cookie = cookie;
421 	list_add(&tnode.llink, lsthead);
422 
423 	spin_unlock_irqrestore(&ncalls->lock, flags);
424 
425 	/* Call the nested function */
426 	error = (*nproc)(priv, cookie, call_nests);
427 
428 	/* Remove the current task from the list */
429 	spin_lock_irqsave(&ncalls->lock, flags);
430 	list_del(&tnode.llink);
431 out_unlock:
432 	spin_unlock_irqrestore(&ncalls->lock, flags);
433 
434 	return error;
435 }
436 
437 /*
438  * As described in commit 0ccf831cb lockdep: annotate epoll
439  * the use of wait queues used by epoll is done in a very controlled
440  * manner. Wake ups can nest inside each other, but are never done
441  * with the same locking. For example:
442  *
443  *   dfd = socket(...);
444  *   efd1 = epoll_create();
445  *   efd2 = epoll_create();
446  *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
447  *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
448  *
449  * When a packet arrives to the device underneath "dfd", the net code will
450  * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
451  * callback wakeup entry on that queue, and the wake_up() performed by the
452  * "dfd" net code will end up in ep_poll_callback(). At this point epoll
453  * (efd1) notices that it may have some event ready, so it needs to wake up
454  * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
455  * that ends up in another wake_up(), after having checked about the
456  * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
457  * avoid stack blasting.
458  *
459  * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
460  * this special case of epoll.
461  */
462 #ifdef CONFIG_DEBUG_LOCK_ALLOC
ep_wake_up_nested(wait_queue_head_t * wqueue,unsigned long events,int subclass)463 static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
464 				     unsigned long events, int subclass)
465 {
466 	unsigned long flags;
467 
468 	spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
469 	wake_up_locked_poll(wqueue, events);
470 	spin_unlock_irqrestore(&wqueue->lock, flags);
471 }
472 #else
ep_wake_up_nested(wait_queue_head_t * wqueue,unsigned long events,int subclass)473 static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
474 				     unsigned long events, int subclass)
475 {
476 	wake_up_poll(wqueue, events);
477 }
478 #endif
479 
ep_poll_wakeup_proc(void * priv,void * cookie,int call_nests)480 static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
481 {
482 	ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
483 			  1 + call_nests);
484 	return 0;
485 }
486 
487 /*
488  * Perform a safe wake up of the poll wait list. The problem is that
489  * with the new callback'd wake up system, it is possible that the
490  * poll callback is reentered from inside the call to wake_up() done
491  * on the poll wait queue head. The rule is that we cannot reenter the
492  * wake up code from the same task more than EP_MAX_NESTS times,
493  * and we cannot reenter the same wait queue head at all. This will
494  * enable to have a hierarchy of epoll file descriptor of no more than
495  * EP_MAX_NESTS deep.
496  */
ep_poll_safewake(wait_queue_head_t * wq)497 static void ep_poll_safewake(wait_queue_head_t *wq)
498 {
499 	int this_cpu = get_cpu();
500 
501 	ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
502 		       ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
503 
504 	put_cpu();
505 }
506 
ep_remove_wait_queue(struct eppoll_entry * pwq)507 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
508 {
509 	wait_queue_head_t *whead;
510 
511 	rcu_read_lock();
512 	/* If it is cleared by POLLFREE, it should be rcu-safe */
513 	whead = rcu_dereference(pwq->whead);
514 	if (whead)
515 		remove_wait_queue(whead, &pwq->wait);
516 	rcu_read_unlock();
517 }
518 
519 /*
520  * This function unregisters poll callbacks from the associated file
521  * descriptor.  Must be called with "mtx" held (or "epmutex" if called from
522  * ep_free).
523  */
ep_unregister_pollwait(struct eventpoll * ep,struct epitem * epi)524 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
525 {
526 	struct list_head *lsthead = &epi->pwqlist;
527 	struct eppoll_entry *pwq;
528 
529 	while (!list_empty(lsthead)) {
530 		pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
531 
532 		list_del(&pwq->llink);
533 		ep_remove_wait_queue(pwq);
534 		kmem_cache_free(pwq_cache, pwq);
535 	}
536 }
537 
538 /**
539  * ep_scan_ready_list - Scans the ready list in a way that makes possible for
540  *                      the scan code, to call f_op->poll(). Also allows for
541  *                      O(NumReady) performance.
542  *
543  * @ep: Pointer to the epoll private data structure.
544  * @sproc: Pointer to the scan callback.
545  * @priv: Private opaque data passed to the @sproc callback.
546  * @depth: The current depth of recursive f_op->poll calls.
547  *
548  * Returns: The same integer error code returned by the @sproc callback.
549  */
ep_scan_ready_list(struct eventpoll * ep,int (* sproc)(struct eventpoll *,struct list_head *,void *),void * priv,int depth)550 static int ep_scan_ready_list(struct eventpoll *ep,
551 			      int (*sproc)(struct eventpoll *,
552 					   struct list_head *, void *),
553 			      void *priv,
554 			      int depth)
555 {
556 	int error, pwake = 0;
557 	unsigned long flags;
558 	struct epitem *epi, *nepi;
559 	LIST_HEAD(txlist);
560 
561 	/*
562 	 * We need to lock this because we could be hit by
563 	 * eventpoll_release_file() and epoll_ctl().
564 	 */
565 	mutex_lock_nested(&ep->mtx, depth);
566 
567 	/*
568 	 * Steal the ready list, and re-init the original one to the
569 	 * empty list. Also, set ep->ovflist to NULL so that events
570 	 * happening while looping w/out locks, are not lost. We cannot
571 	 * have the poll callback to queue directly on ep->rdllist,
572 	 * because we want the "sproc" callback to be able to do it
573 	 * in a lockless way.
574 	 */
575 	spin_lock_irqsave(&ep->lock, flags);
576 	list_splice_init(&ep->rdllist, &txlist);
577 	ep->ovflist = NULL;
578 	spin_unlock_irqrestore(&ep->lock, flags);
579 
580 	/*
581 	 * Now call the callback function.
582 	 */
583 	error = (*sproc)(ep, &txlist, priv);
584 
585 	spin_lock_irqsave(&ep->lock, flags);
586 	/*
587 	 * During the time we spent inside the "sproc" callback, some
588 	 * other events might have been queued by the poll callback.
589 	 * We re-insert them inside the main ready-list here.
590 	 */
591 	for (nepi = ep->ovflist; (epi = nepi) != NULL;
592 	     nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
593 		/*
594 		 * We need to check if the item is already in the list.
595 		 * During the "sproc" callback execution time, items are
596 		 * queued into ->ovflist but the "txlist" might already
597 		 * contain them, and the list_splice() below takes care of them.
598 		 */
599 		if (!ep_is_linked(&epi->rdllink)) {
600 			list_add_tail(&epi->rdllink, &ep->rdllist);
601 			__pm_stay_awake(epi->ws);
602 		}
603 	}
604 	/*
605 	 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
606 	 * releasing the lock, events will be queued in the normal way inside
607 	 * ep->rdllist.
608 	 */
609 	ep->ovflist = EP_UNACTIVE_PTR;
610 
611 	/*
612 	 * Quickly re-inject items left on "txlist".
613 	 */
614 	list_splice(&txlist, &ep->rdllist);
615 	__pm_relax(ep->ws);
616 
617 	if (!list_empty(&ep->rdllist)) {
618 		/*
619 		 * Wake up (if active) both the eventpoll wait list and
620 		 * the ->poll() wait list (delayed after we release the lock).
621 		 */
622 		if (waitqueue_active(&ep->wq))
623 			wake_up_locked(&ep->wq);
624 		if (waitqueue_active(&ep->poll_wait))
625 			pwake++;
626 	}
627 	spin_unlock_irqrestore(&ep->lock, flags);
628 
629 	mutex_unlock(&ep->mtx);
630 
631 	/* We have to call this outside the lock */
632 	if (pwake)
633 		ep_poll_safewake(&ep->poll_wait);
634 
635 	return error;
636 }
637 
638 /*
639  * Removes a "struct epitem" from the eventpoll RB tree and deallocates
640  * all the associated resources. Must be called with "mtx" held.
641  */
ep_remove(struct eventpoll * ep,struct epitem * epi)642 static int ep_remove(struct eventpoll *ep, struct epitem *epi)
643 {
644 	unsigned long flags;
645 	struct file *file = epi->ffd.file;
646 
647 	/*
648 	 * Removes poll wait queue hooks. We _have_ to do this without holding
649 	 * the "ep->lock" otherwise a deadlock might occur. This because of the
650 	 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
651 	 * queue head lock when unregistering the wait queue. The wakeup callback
652 	 * will run by holding the wait queue head lock and will call our callback
653 	 * that will try to get "ep->lock".
654 	 */
655 	ep_unregister_pollwait(ep, epi);
656 
657 	/* Remove the current item from the list of epoll hooks */
658 	spin_lock(&file->f_lock);
659 	if (ep_is_linked(&epi->fllink))
660 		list_del_init(&epi->fllink);
661 	spin_unlock(&file->f_lock);
662 
663 	rb_erase(&epi->rbn, &ep->rbr);
664 
665 	spin_lock_irqsave(&ep->lock, flags);
666 	if (ep_is_linked(&epi->rdllink))
667 		list_del_init(&epi->rdllink);
668 	spin_unlock_irqrestore(&ep->lock, flags);
669 
670 	wakeup_source_unregister(epi->ws);
671 
672 	/* At this point it is safe to free the eventpoll item */
673 	kmem_cache_free(epi_cache, epi);
674 
675 	atomic_long_dec(&ep->user->epoll_watches);
676 
677 	return 0;
678 }
679 
ep_free(struct eventpoll * ep)680 static void ep_free(struct eventpoll *ep)
681 {
682 	struct rb_node *rbp;
683 	struct epitem *epi;
684 
685 	/* We need to release all tasks waiting for these file */
686 	if (waitqueue_active(&ep->poll_wait))
687 		ep_poll_safewake(&ep->poll_wait);
688 
689 	/*
690 	 * We need to lock this because we could be hit by
691 	 * eventpoll_release_file() while we're freeing the "struct eventpoll".
692 	 * We do not need to hold "ep->mtx" here because the epoll file
693 	 * is on the way to be removed and no one has references to it
694 	 * anymore. The only hit might come from eventpoll_release_file() but
695 	 * holding "epmutex" is sufficient here.
696 	 */
697 	mutex_lock(&epmutex);
698 
699 	/*
700 	 * Walks through the whole tree by unregistering poll callbacks.
701 	 */
702 	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
703 		epi = rb_entry(rbp, struct epitem, rbn);
704 
705 		ep_unregister_pollwait(ep, epi);
706 	}
707 
708 	/*
709 	 * Walks through the whole tree by freeing each "struct epitem". At this
710 	 * point we are sure no poll callbacks will be lingering around, and also by
711 	 * holding "epmutex" we can be sure that no file cleanup code will hit
712 	 * us during this operation. So we can avoid the lock on "ep->lock".
713 	 */
714 	while ((rbp = rb_first(&ep->rbr)) != NULL) {
715 		epi = rb_entry(rbp, struct epitem, rbn);
716 		ep_remove(ep, epi);
717 	}
718 
719 	mutex_unlock(&epmutex);
720 	mutex_destroy(&ep->mtx);
721 	free_uid(ep->user);
722 	wakeup_source_unregister(ep->ws);
723 	kfree(ep);
724 }
725 
ep_eventpoll_release(struct inode * inode,struct file * file)726 static int ep_eventpoll_release(struct inode *inode, struct file *file)
727 {
728 	struct eventpoll *ep = file->private_data;
729 
730 	if (ep)
731 		ep_free(ep);
732 
733 	return 0;
734 }
735 
ep_read_events_proc(struct eventpoll * ep,struct list_head * head,void * priv)736 static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
737 			       void *priv)
738 {
739 	struct epitem *epi, *tmp;
740 	poll_table pt;
741 
742 	init_poll_funcptr(&pt, NULL);
743 	list_for_each_entry_safe(epi, tmp, head, rdllink) {
744 		pt._key = epi->event.events;
745 		if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
746 		    epi->event.events)
747 			return POLLIN | POLLRDNORM;
748 		else {
749 			/*
750 			 * Item has been dropped into the ready list by the poll
751 			 * callback, but it's not actually ready, as far as
752 			 * caller requested events goes. We can remove it here.
753 			 */
754 			__pm_relax(epi->ws);
755 			list_del_init(&epi->rdllink);
756 		}
757 	}
758 
759 	return 0;
760 }
761 
ep_poll_readyevents_proc(void * priv,void * cookie,int call_nests)762 static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
763 {
764 	return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
765 }
766 
ep_eventpoll_poll(struct file * file,poll_table * wait)767 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
768 {
769 	int pollflags;
770 	struct eventpoll *ep = file->private_data;
771 
772 	/* Insert inside our poll wait queue */
773 	poll_wait(file, &ep->poll_wait, wait);
774 
775 	/*
776 	 * Proceed to find out if wanted events are really available inside
777 	 * the ready list. This need to be done under ep_call_nested()
778 	 * supervision, since the call to f_op->poll() done on listed files
779 	 * could re-enter here.
780 	 */
781 	pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
782 				   ep_poll_readyevents_proc, ep, ep, current);
783 
784 	return pollflags != -1 ? pollflags : 0;
785 }
786 
787 /* File callbacks that implement the eventpoll file behaviour */
788 static const struct file_operations eventpoll_fops = {
789 	.release	= ep_eventpoll_release,
790 	.poll		= ep_eventpoll_poll,
791 	.llseek		= noop_llseek,
792 };
793 
794 /*
795  * This is called from eventpoll_release() to unlink files from the eventpoll
796  * interface. We need to have this facility to cleanup correctly files that are
797  * closed without being removed from the eventpoll interface.
798  */
eventpoll_release_file(struct file * file)799 void eventpoll_release_file(struct file *file)
800 {
801 	struct list_head *lsthead = &file->f_ep_links;
802 	struct eventpoll *ep;
803 	struct epitem *epi;
804 
805 	/*
806 	 * We don't want to get "file->f_lock" because it is not
807 	 * necessary. It is not necessary because we're in the "struct file"
808 	 * cleanup path, and this means that no one is using this file anymore.
809 	 * So, for example, epoll_ctl() cannot hit here since if we reach this
810 	 * point, the file counter already went to zero and fget() would fail.
811 	 * The only hit might come from ep_free() but by holding the mutex
812 	 * will correctly serialize the operation. We do need to acquire
813 	 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
814 	 * from anywhere but ep_free().
815 	 *
816 	 * Besides, ep_remove() acquires the lock, so we can't hold it here.
817 	 */
818 	mutex_lock(&epmutex);
819 
820 	while (!list_empty(lsthead)) {
821 		epi = list_first_entry(lsthead, struct epitem, fllink);
822 
823 		ep = epi->ep;
824 		list_del_init(&epi->fllink);
825 		mutex_lock_nested(&ep->mtx, 0);
826 		ep_remove(ep, epi);
827 		mutex_unlock(&ep->mtx);
828 	}
829 
830 	mutex_unlock(&epmutex);
831 }
832 
ep_alloc(struct eventpoll ** pep)833 static int ep_alloc(struct eventpoll **pep)
834 {
835 	int error;
836 	struct user_struct *user;
837 	struct eventpoll *ep;
838 
839 	user = get_current_user();
840 	error = -ENOMEM;
841 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
842 	if (unlikely(!ep))
843 		goto free_uid;
844 
845 	spin_lock_init(&ep->lock);
846 	mutex_init(&ep->mtx);
847 	init_waitqueue_head(&ep->wq);
848 	init_waitqueue_head(&ep->poll_wait);
849 	INIT_LIST_HEAD(&ep->rdllist);
850 	ep->rbr = RB_ROOT;
851 	ep->ovflist = EP_UNACTIVE_PTR;
852 	ep->user = user;
853 
854 	*pep = ep;
855 
856 	return 0;
857 
858 free_uid:
859 	free_uid(user);
860 	return error;
861 }
862 
863 /*
864  * Search the file inside the eventpoll tree. The RB tree operations
865  * are protected by the "mtx" mutex, and ep_find() must be called with
866  * "mtx" held.
867  */
ep_find(struct eventpoll * ep,struct file * file,int fd)868 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
869 {
870 	int kcmp;
871 	struct rb_node *rbp;
872 	struct epitem *epi, *epir = NULL;
873 	struct epoll_filefd ffd;
874 
875 	ep_set_ffd(&ffd, file, fd);
876 	for (rbp = ep->rbr.rb_node; rbp; ) {
877 		epi = rb_entry(rbp, struct epitem, rbn);
878 		kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
879 		if (kcmp > 0)
880 			rbp = rbp->rb_right;
881 		else if (kcmp < 0)
882 			rbp = rbp->rb_left;
883 		else {
884 			epir = epi;
885 			break;
886 		}
887 	}
888 
889 	return epir;
890 }
891 
892 /*
893  * This is the callback that is passed to the wait queue wakeup
894  * mechanism. It is called by the stored file descriptors when they
895  * have events to report.
896  */
ep_poll_callback(wait_queue_t * wait,unsigned mode,int sync,void * key)897 static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
898 {
899 	int pwake = 0;
900 	unsigned long flags;
901 	struct epitem *epi = ep_item_from_wait(wait);
902 	struct eventpoll *ep = epi->ep;
903 
904 	if ((unsigned long)key & POLLFREE) {
905 		ep_pwq_from_wait(wait)->whead = NULL;
906 		/*
907 		 * whead = NULL above can race with ep_remove_wait_queue()
908 		 * which can do another remove_wait_queue() after us, so we
909 		 * can't use __remove_wait_queue(). whead->lock is held by
910 		 * the caller.
911 		 */
912 		list_del_init(&wait->task_list);
913 	}
914 
915 	spin_lock_irqsave(&ep->lock, flags);
916 
917 	/*
918 	 * If the event mask does not contain any poll(2) event, we consider the
919 	 * descriptor to be disabled. This condition is likely the effect of the
920 	 * EPOLLONESHOT bit that disables the descriptor when an event is received,
921 	 * until the next EPOLL_CTL_MOD will be issued.
922 	 */
923 	if (!(epi->event.events & ~EP_PRIVATE_BITS))
924 		goto out_unlock;
925 
926 	/*
927 	 * Check the events coming with the callback. At this stage, not
928 	 * every device reports the events in the "key" parameter of the
929 	 * callback. We need to be able to handle both cases here, hence the
930 	 * test for "key" != NULL before the event match test.
931 	 */
932 	if (key && !((unsigned long) key & epi->event.events))
933 		goto out_unlock;
934 
935 	/*
936 	 * If we are transferring events to userspace, we can hold no locks
937 	 * (because we're accessing user memory, and because of linux f_op->poll()
938 	 * semantics). All the events that happen during that period of time are
939 	 * chained in ep->ovflist and requeued later on.
940 	 */
941 	if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
942 		if (epi->next == EP_UNACTIVE_PTR) {
943 			epi->next = ep->ovflist;
944 			ep->ovflist = epi;
945 			if (epi->ws) {
946 				/*
947 				 * Activate ep->ws since epi->ws may get
948 				 * deactivated at any time.
949 				 */
950 				__pm_stay_awake(ep->ws);
951 			}
952 
953 		}
954 		goto out_unlock;
955 	}
956 
957 	/* If this file is already in the ready list we exit soon */
958 	if (!ep_is_linked(&epi->rdllink)) {
959 		list_add_tail(&epi->rdllink, &ep->rdllist);
960 		__pm_stay_awake(epi->ws);
961 	}
962 
963 	/*
964 	 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
965 	 * wait list.
966 	 */
967 	if (waitqueue_active(&ep->wq))
968 		wake_up_locked(&ep->wq);
969 	if (waitqueue_active(&ep->poll_wait))
970 		pwake++;
971 
972 out_unlock:
973 	spin_unlock_irqrestore(&ep->lock, flags);
974 
975 	/* We have to call this outside the lock */
976 	if (pwake)
977 		ep_poll_safewake(&ep->poll_wait);
978 
979 	return 1;
980 }
981 
982 /*
983  * This is the callback that is used to add our wait queue to the
984  * target file wakeup lists.
985  */
ep_ptable_queue_proc(struct file * file,wait_queue_head_t * whead,poll_table * pt)986 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
987 				 poll_table *pt)
988 {
989 	struct epitem *epi = ep_item_from_epqueue(pt);
990 	struct eppoll_entry *pwq;
991 
992 	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
993 		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
994 		pwq->whead = whead;
995 		pwq->base = epi;
996 		add_wait_queue(whead, &pwq->wait);
997 		list_add_tail(&pwq->llink, &epi->pwqlist);
998 		epi->nwait++;
999 	} else {
1000 		/* We have to signal that an error occurred */
1001 		epi->nwait = -1;
1002 	}
1003 }
1004 
ep_rbtree_insert(struct eventpoll * ep,struct epitem * epi)1005 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1006 {
1007 	int kcmp;
1008 	struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
1009 	struct epitem *epic;
1010 
1011 	while (*p) {
1012 		parent = *p;
1013 		epic = rb_entry(parent, struct epitem, rbn);
1014 		kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1015 		if (kcmp > 0)
1016 			p = &parent->rb_right;
1017 		else
1018 			p = &parent->rb_left;
1019 	}
1020 	rb_link_node(&epi->rbn, parent, p);
1021 	rb_insert_color(&epi->rbn, &ep->rbr);
1022 }
1023 
1024 
1025 
1026 #define PATH_ARR_SIZE 5
1027 /*
1028  * These are the number paths of length 1 to 5, that we are allowing to emanate
1029  * from a single file of interest. For example, we allow 1000 paths of length
1030  * 1, to emanate from each file of interest. This essentially represents the
1031  * potential wakeup paths, which need to be limited in order to avoid massive
1032  * uncontrolled wakeup storms. The common use case should be a single ep which
1033  * is connected to n file sources. In this case each file source has 1 path
1034  * of length 1. Thus, the numbers below should be more than sufficient. These
1035  * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1036  * and delete can't add additional paths. Protected by the epmutex.
1037  */
1038 static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1039 static int path_count[PATH_ARR_SIZE];
1040 
path_count_inc(int nests)1041 static int path_count_inc(int nests)
1042 {
1043 	/* Allow an arbitrary number of depth 1 paths */
1044 	if (nests == 0)
1045 		return 0;
1046 
1047 	if (++path_count[nests] > path_limits[nests])
1048 		return -1;
1049 	return 0;
1050 }
1051 
path_count_init(void)1052 static void path_count_init(void)
1053 {
1054 	int i;
1055 
1056 	for (i = 0; i < PATH_ARR_SIZE; i++)
1057 		path_count[i] = 0;
1058 }
1059 
reverse_path_check_proc(void * priv,void * cookie,int call_nests)1060 static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1061 {
1062 	int error = 0;
1063 	struct file *file = priv;
1064 	struct file *child_file;
1065 	struct epitem *epi;
1066 
1067 	list_for_each_entry(epi, &file->f_ep_links, fllink) {
1068 		child_file = epi->ep->file;
1069 		if (is_file_epoll(child_file)) {
1070 			if (list_empty(&child_file->f_ep_links)) {
1071 				if (path_count_inc(call_nests)) {
1072 					error = -1;
1073 					break;
1074 				}
1075 			} else {
1076 				error = ep_call_nested(&poll_loop_ncalls,
1077 							EP_MAX_NESTS,
1078 							reverse_path_check_proc,
1079 							child_file, child_file,
1080 							current);
1081 			}
1082 			if (error != 0)
1083 				break;
1084 		} else {
1085 			printk(KERN_ERR "reverse_path_check_proc: "
1086 				"file is not an ep!\n");
1087 		}
1088 	}
1089 	return error;
1090 }
1091 
1092 /**
1093  * reverse_path_check - The tfile_check_list is list of file *, which have
1094  *                      links that are proposed to be newly added. We need to
1095  *                      make sure that those added links don't add too many
1096  *                      paths such that we will spend all our time waking up
1097  *                      eventpoll objects.
1098  *
1099  * Returns: Returns zero if the proposed links don't create too many paths,
1100  *	    -1 otherwise.
1101  */
reverse_path_check(void)1102 static int reverse_path_check(void)
1103 {
1104 	int error = 0;
1105 	struct file *current_file;
1106 
1107 	/* let's call this for all tfiles */
1108 	list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
1109 		path_count_init();
1110 		error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1111 					reverse_path_check_proc, current_file,
1112 					current_file, current);
1113 		if (error)
1114 			break;
1115 	}
1116 	return error;
1117 }
1118 
ep_create_wakeup_source(struct epitem * epi)1119 static int ep_create_wakeup_source(struct epitem *epi)
1120 {
1121 	const char *name;
1122 
1123 	if (!epi->ep->ws) {
1124 		epi->ep->ws = wakeup_source_register("eventpoll");
1125 		if (!epi->ep->ws)
1126 			return -ENOMEM;
1127 	}
1128 
1129 	name = epi->ffd.file->f_path.dentry->d_name.name;
1130 	epi->ws = wakeup_source_register(name);
1131 	if (!epi->ws)
1132 		return -ENOMEM;
1133 
1134 	return 0;
1135 }
1136 
ep_destroy_wakeup_source(struct epitem * epi)1137 static void ep_destroy_wakeup_source(struct epitem *epi)
1138 {
1139 	wakeup_source_unregister(epi->ws);
1140 	epi->ws = NULL;
1141 }
1142 
1143 /*
1144  * Must be called with "mtx" held.
1145  */
ep_insert(struct eventpoll * ep,struct epoll_event * event,struct file * tfile,int fd)1146 static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1147 		     struct file *tfile, int fd)
1148 {
1149 	int error, revents, pwake = 0;
1150 	unsigned long flags;
1151 	long user_watches;
1152 	struct epitem *epi;
1153 	struct ep_pqueue epq;
1154 
1155 	user_watches = atomic_long_read(&ep->user->epoll_watches);
1156 	if (unlikely(user_watches >= max_user_watches))
1157 		return -ENOSPC;
1158 	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
1159 		return -ENOMEM;
1160 
1161 	/* Item initialization follow here ... */
1162 	INIT_LIST_HEAD(&epi->rdllink);
1163 	INIT_LIST_HEAD(&epi->fllink);
1164 	INIT_LIST_HEAD(&epi->pwqlist);
1165 	epi->ep = ep;
1166 	ep_set_ffd(&epi->ffd, tfile, fd);
1167 	epi->event = *event;
1168 	epi->nwait = 0;
1169 	epi->next = EP_UNACTIVE_PTR;
1170 	if (epi->event.events & EPOLLWAKEUP) {
1171 		error = ep_create_wakeup_source(epi);
1172 		if (error)
1173 			goto error_create_wakeup_source;
1174 	} else {
1175 		epi->ws = NULL;
1176 	}
1177 
1178 	/* Initialize the poll table using the queue callback */
1179 	epq.epi = epi;
1180 	init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1181 	epq.pt._key = event->events;
1182 
1183 	/*
1184 	 * Attach the item to the poll hooks and get current event bits.
1185 	 * We can safely use the file* here because its usage count has
1186 	 * been increased by the caller of this function. Note that after
1187 	 * this operation completes, the poll callback can start hitting
1188 	 * the new item.
1189 	 */
1190 	revents = tfile->f_op->poll(tfile, &epq.pt);
1191 
1192 	/*
1193 	 * We have to check if something went wrong during the poll wait queue
1194 	 * install process. Namely an allocation for a wait queue failed due
1195 	 * high memory pressure.
1196 	 */
1197 	error = -ENOMEM;
1198 	if (epi->nwait < 0)
1199 		goto error_unregister;
1200 
1201 	/* Add the current item to the list of active epoll hook for this file */
1202 	spin_lock(&tfile->f_lock);
1203 	list_add_tail(&epi->fllink, &tfile->f_ep_links);
1204 	spin_unlock(&tfile->f_lock);
1205 
1206 	/*
1207 	 * Add the current item to the RB tree. All RB tree operations are
1208 	 * protected by "mtx", and ep_insert() is called with "mtx" held.
1209 	 */
1210 	ep_rbtree_insert(ep, epi);
1211 
1212 	/* now check if we've created too many backpaths */
1213 	error = -EINVAL;
1214 	if (reverse_path_check())
1215 		goto error_remove_epi;
1216 
1217 	/* We have to drop the new item inside our item list to keep track of it */
1218 	spin_lock_irqsave(&ep->lock, flags);
1219 
1220 	/* If the file is already "ready" we drop it inside the ready list */
1221 	if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
1222 		list_add_tail(&epi->rdllink, &ep->rdllist);
1223 		__pm_stay_awake(epi->ws);
1224 
1225 		/* Notify waiting tasks that events are available */
1226 		if (waitqueue_active(&ep->wq))
1227 			wake_up_locked(&ep->wq);
1228 		if (waitqueue_active(&ep->poll_wait))
1229 			pwake++;
1230 	}
1231 
1232 	spin_unlock_irqrestore(&ep->lock, flags);
1233 
1234 	atomic_long_inc(&ep->user->epoll_watches);
1235 
1236 	/* We have to call this outside the lock */
1237 	if (pwake)
1238 		ep_poll_safewake(&ep->poll_wait);
1239 
1240 	return 0;
1241 
1242 error_remove_epi:
1243 	spin_lock(&tfile->f_lock);
1244 	if (ep_is_linked(&epi->fllink))
1245 		list_del_init(&epi->fllink);
1246 	spin_unlock(&tfile->f_lock);
1247 
1248 	rb_erase(&epi->rbn, &ep->rbr);
1249 
1250 error_unregister:
1251 	ep_unregister_pollwait(ep, epi);
1252 
1253 	/*
1254 	 * We need to do this because an event could have been arrived on some
1255 	 * allocated wait queue. Note that we don't care about the ep->ovflist
1256 	 * list, since that is used/cleaned only inside a section bound by "mtx".
1257 	 * And ep_insert() is called with "mtx" held.
1258 	 */
1259 	spin_lock_irqsave(&ep->lock, flags);
1260 	if (ep_is_linked(&epi->rdllink))
1261 		list_del_init(&epi->rdllink);
1262 	spin_unlock_irqrestore(&ep->lock, flags);
1263 
1264 	wakeup_source_unregister(epi->ws);
1265 
1266 error_create_wakeup_source:
1267 	kmem_cache_free(epi_cache, epi);
1268 
1269 	return error;
1270 }
1271 
1272 /*
1273  * Modify the interest event mask by dropping an event if the new mask
1274  * has a match in the current file status. Must be called with "mtx" held.
1275  */
ep_modify(struct eventpoll * ep,struct epitem * epi,struct epoll_event * event)1276 static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
1277 {
1278 	int pwake = 0;
1279 	unsigned int revents;
1280 	poll_table pt;
1281 
1282 	init_poll_funcptr(&pt, NULL);
1283 
1284 	/*
1285 	 * Set the new event interest mask before calling f_op->poll();
1286 	 * otherwise we might miss an event that happens between the
1287 	 * f_op->poll() call and the new event set registering.
1288 	 */
1289 	epi->event.events = event->events; /* need barrier below */
1290 	pt._key = event->events;
1291 	epi->event.data = event->data; /* protected by mtx */
1292 	if (epi->event.events & EPOLLWAKEUP) {
1293 		if (!epi->ws)
1294 			ep_create_wakeup_source(epi);
1295 	} else if (epi->ws) {
1296 		ep_destroy_wakeup_source(epi);
1297 	}
1298 
1299 	/*
1300 	 * The following barrier has two effects:
1301 	 *
1302 	 * 1) Flush epi changes above to other CPUs.  This ensures
1303 	 *    we do not miss events from ep_poll_callback if an
1304 	 *    event occurs immediately after we call f_op->poll().
1305 	 *    We need this because we did not take ep->lock while
1306 	 *    changing epi above (but ep_poll_callback does take
1307 	 *    ep->lock).
1308 	 *
1309 	 * 2) We also need to ensure we do not miss _past_ events
1310 	 *    when calling f_op->poll().  This barrier also
1311 	 *    pairs with the barrier in wq_has_sleeper (see
1312 	 *    comments for wq_has_sleeper).
1313 	 *
1314 	 * This barrier will now guarantee ep_poll_callback or f_op->poll
1315 	 * (or both) will notice the readiness of an item.
1316 	 */
1317 	smp_mb();
1318 
1319 	/*
1320 	 * Get current event bits. We can safely use the file* here because
1321 	 * its usage count has been increased by the caller of this function.
1322 	 */
1323 	revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt);
1324 
1325 	/*
1326 	 * If the item is "hot" and it is not registered inside the ready
1327 	 * list, push it inside.
1328 	 */
1329 	if (revents & event->events) {
1330 		spin_lock_irq(&ep->lock);
1331 		if (!ep_is_linked(&epi->rdllink)) {
1332 			list_add_tail(&epi->rdllink, &ep->rdllist);
1333 			__pm_stay_awake(epi->ws);
1334 
1335 			/* Notify waiting tasks that events are available */
1336 			if (waitqueue_active(&ep->wq))
1337 				wake_up_locked(&ep->wq);
1338 			if (waitqueue_active(&ep->poll_wait))
1339 				pwake++;
1340 		}
1341 		spin_unlock_irq(&ep->lock);
1342 	}
1343 
1344 	/* We have to call this outside the lock */
1345 	if (pwake)
1346 		ep_poll_safewake(&ep->poll_wait);
1347 
1348 	return 0;
1349 }
1350 
ep_send_events_proc(struct eventpoll * ep,struct list_head * head,void * priv)1351 static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1352 			       void *priv)
1353 {
1354 	struct ep_send_events_data *esed = priv;
1355 	int eventcnt;
1356 	unsigned int revents;
1357 	struct epitem *epi;
1358 	struct epoll_event __user *uevent;
1359 	poll_table pt;
1360 
1361 	init_poll_funcptr(&pt, NULL);
1362 
1363 	/*
1364 	 * We can loop without lock because we are passed a task private list.
1365 	 * Items cannot vanish during the loop because ep_scan_ready_list() is
1366 	 * holding "mtx" during this call.
1367 	 */
1368 	for (eventcnt = 0, uevent = esed->events;
1369 	     !list_empty(head) && eventcnt < esed->maxevents;) {
1370 		epi = list_first_entry(head, struct epitem, rdllink);
1371 
1372 		/*
1373 		 * Activate ep->ws before deactivating epi->ws to prevent
1374 		 * triggering auto-suspend here (in case we reactive epi->ws
1375 		 * below).
1376 		 *
1377 		 * This could be rearranged to delay the deactivation of epi->ws
1378 		 * instead, but then epi->ws would temporarily be out of sync
1379 		 * with ep_is_linked().
1380 		 */
1381 		if (epi->ws && epi->ws->active)
1382 			__pm_stay_awake(ep->ws);
1383 		__pm_relax(epi->ws);
1384 		list_del_init(&epi->rdllink);
1385 
1386 		pt._key = epi->event.events;
1387 		revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
1388 			epi->event.events;
1389 
1390 		/*
1391 		 * If the event mask intersect the caller-requested one,
1392 		 * deliver the event to userspace. Again, ep_scan_ready_list()
1393 		 * is holding "mtx", so no operations coming from userspace
1394 		 * can change the item.
1395 		 */
1396 		if (revents) {
1397 			if (__put_user(revents, &uevent->events) ||
1398 			    __put_user(epi->event.data, &uevent->data)) {
1399 				list_add(&epi->rdllink, head);
1400 				__pm_stay_awake(epi->ws);
1401 				return eventcnt ? eventcnt : -EFAULT;
1402 			}
1403 			eventcnt++;
1404 			uevent++;
1405 			if (epi->event.events & EPOLLONESHOT)
1406 				epi->event.events &= EP_PRIVATE_BITS;
1407 			else if (!(epi->event.events & EPOLLET)) {
1408 				/*
1409 				 * If this file has been added with Level
1410 				 * Trigger mode, we need to insert back inside
1411 				 * the ready list, so that the next call to
1412 				 * epoll_wait() will check again the events
1413 				 * availability. At this point, no one can insert
1414 				 * into ep->rdllist besides us. The epoll_ctl()
1415 				 * callers are locked out by
1416 				 * ep_scan_ready_list() holding "mtx" and the
1417 				 * poll callback will queue them in ep->ovflist.
1418 				 */
1419 				list_add_tail(&epi->rdllink, &ep->rdllist);
1420 				__pm_stay_awake(epi->ws);
1421 			}
1422 		}
1423 	}
1424 
1425 	return eventcnt;
1426 }
1427 
ep_send_events(struct eventpoll * ep,struct epoll_event __user * events,int maxevents)1428 static int ep_send_events(struct eventpoll *ep,
1429 			  struct epoll_event __user *events, int maxevents)
1430 {
1431 	struct ep_send_events_data esed;
1432 
1433 	esed.maxevents = maxevents;
1434 	esed.events = events;
1435 
1436 	return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
1437 }
1438 
ep_set_mstimeout(long ms)1439 static inline struct timespec ep_set_mstimeout(long ms)
1440 {
1441 	struct timespec now, ts = {
1442 		.tv_sec = ms / MSEC_PER_SEC,
1443 		.tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
1444 	};
1445 
1446 	ktime_get_ts(&now);
1447 	return timespec_add_safe(now, ts);
1448 }
1449 
1450 /**
1451  * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1452  *           event buffer.
1453  *
1454  * @ep: Pointer to the eventpoll context.
1455  * @events: Pointer to the userspace buffer where the ready events should be
1456  *          stored.
1457  * @maxevents: Size (in terms of number of events) of the caller event buffer.
1458  * @timeout: Maximum timeout for the ready events fetch operation, in
1459  *           milliseconds. If the @timeout is zero, the function will not block,
1460  *           while if the @timeout is less than zero, the function will block
1461  *           until at least one event has been retrieved (or an error
1462  *           occurred).
1463  *
1464  * Returns: Returns the number of ready events which have been fetched, or an
1465  *          error code, in case of error.
1466  */
ep_poll(struct eventpoll * ep,struct epoll_event __user * events,int maxevents,long timeout)1467 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1468 		   int maxevents, long timeout)
1469 {
1470 	int res = 0, eavail, timed_out = 0;
1471 	unsigned long flags;
1472 	long slack = 0;
1473 	wait_queue_t wait;
1474 	ktime_t expires, *to = NULL;
1475 
1476 	if (timeout > 0) {
1477 		struct timespec end_time = ep_set_mstimeout(timeout);
1478 
1479 		slack = select_estimate_accuracy(&end_time);
1480 		to = &expires;
1481 		*to = timespec_to_ktime(end_time);
1482 	} else if (timeout == 0) {
1483 		/*
1484 		 * Avoid the unnecessary trip to the wait queue loop, if the
1485 		 * caller specified a non blocking operation.
1486 		 */
1487 		timed_out = 1;
1488 		spin_lock_irqsave(&ep->lock, flags);
1489 		goto check_events;
1490 	}
1491 
1492 fetch_events:
1493 	spin_lock_irqsave(&ep->lock, flags);
1494 
1495 	if (!ep_events_available(ep)) {
1496 		/*
1497 		 * We don't have any available event to return to the caller.
1498 		 * We need to sleep here, and we will be wake up by
1499 		 * ep_poll_callback() when events will become available.
1500 		 */
1501 		init_waitqueue_entry(&wait, current);
1502 		__add_wait_queue_exclusive(&ep->wq, &wait);
1503 
1504 		for (;;) {
1505 			/*
1506 			 * We don't want to sleep if the ep_poll_callback() sends us
1507 			 * a wakeup in between. That's why we set the task state
1508 			 * to TASK_INTERRUPTIBLE before doing the checks.
1509 			 */
1510 			set_current_state(TASK_INTERRUPTIBLE);
1511 			if (ep_events_available(ep) || timed_out)
1512 				break;
1513 			if (signal_pending(current)) {
1514 				res = -EINTR;
1515 				break;
1516 			}
1517 
1518 			spin_unlock_irqrestore(&ep->lock, flags);
1519 			if (!freezable_schedule_hrtimeout_range(to, slack,
1520 								HRTIMER_MODE_ABS))
1521 				timed_out = 1;
1522 
1523 			spin_lock_irqsave(&ep->lock, flags);
1524 		}
1525 		__remove_wait_queue(&ep->wq, &wait);
1526 
1527 		set_current_state(TASK_RUNNING);
1528 	}
1529 check_events:
1530 	/* Is it worth to try to dig for events ? */
1531 	eavail = ep_events_available(ep);
1532 
1533 	spin_unlock_irqrestore(&ep->lock, flags);
1534 
1535 	/*
1536 	 * Try to transfer events to user space. In case we get 0 events and
1537 	 * there's still timeout left over, we go trying again in search of
1538 	 * more luck.
1539 	 */
1540 	if (!res && eavail &&
1541 	    !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1542 		goto fetch_events;
1543 
1544 	return res;
1545 }
1546 
1547 /**
1548  * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1549  *                      API, to verify that adding an epoll file inside another
1550  *                      epoll structure, does not violate the constraints, in
1551  *                      terms of closed loops, or too deep chains (which can
1552  *                      result in excessive stack usage).
1553  *
1554  * @priv: Pointer to the epoll file to be currently checked.
1555  * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1556  *          data structure pointer.
1557  * @call_nests: Current dept of the @ep_call_nested() call stack.
1558  *
1559  * Returns: Returns zero if adding the epoll @file inside current epoll
1560  *          structure @ep does not violate the constraints, or -1 otherwise.
1561  */
ep_loop_check_proc(void * priv,void * cookie,int call_nests)1562 static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1563 {
1564 	int error = 0;
1565 	struct file *file = priv;
1566 	struct eventpoll *ep = file->private_data;
1567 	struct eventpoll *ep_tovisit;
1568 	struct rb_node *rbp;
1569 	struct epitem *epi;
1570 
1571 	mutex_lock_nested(&ep->mtx, call_nests + 1);
1572 	ep->visited = 1;
1573 	list_add(&ep->visited_list_link, &visited_list);
1574 	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1575 		epi = rb_entry(rbp, struct epitem, rbn);
1576 		if (unlikely(is_file_epoll(epi->ffd.file))) {
1577 			ep_tovisit = epi->ffd.file->private_data;
1578 			if (ep_tovisit->visited)
1579 				continue;
1580 			error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1581 					ep_loop_check_proc, epi->ffd.file,
1582 					ep_tovisit, current);
1583 			if (error != 0)
1584 				break;
1585 		} else {
1586 			/*
1587 			 * If we've reached a file that is not associated with
1588 			 * an ep, then we need to check if the newly added
1589 			 * links are going to add too many wakeup paths. We do
1590 			 * this by adding it to the tfile_check_list, if it's
1591 			 * not already there, and calling reverse_path_check()
1592 			 * during ep_insert().
1593 			 */
1594 			if (list_empty(&epi->ffd.file->f_tfile_llink))
1595 				list_add(&epi->ffd.file->f_tfile_llink,
1596 					 &tfile_check_list);
1597 		}
1598 	}
1599 	mutex_unlock(&ep->mtx);
1600 
1601 	return error;
1602 }
1603 
1604 /**
1605  * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1606  *                 another epoll file (represented by @ep) does not create
1607  *                 closed loops or too deep chains.
1608  *
1609  * @ep: Pointer to the epoll private data structure.
1610  * @file: Pointer to the epoll file to be checked.
1611  *
1612  * Returns: Returns zero if adding the epoll @file inside current epoll
1613  *          structure @ep does not violate the constraints, or -1 otherwise.
1614  */
ep_loop_check(struct eventpoll * ep,struct file * file)1615 static int ep_loop_check(struct eventpoll *ep, struct file *file)
1616 {
1617 	int ret;
1618 	struct eventpoll *ep_cur, *ep_next;
1619 
1620 	ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1621 			      ep_loop_check_proc, file, ep, current);
1622 	/* clear visited list */
1623 	list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
1624 							visited_list_link) {
1625 		ep_cur->visited = 0;
1626 		list_del(&ep_cur->visited_list_link);
1627 	}
1628 	return ret;
1629 }
1630 
clear_tfile_check_list(void)1631 static void clear_tfile_check_list(void)
1632 {
1633 	struct file *file;
1634 
1635 	/* first clear the tfile_check_list */
1636 	while (!list_empty(&tfile_check_list)) {
1637 		file = list_first_entry(&tfile_check_list, struct file,
1638 					f_tfile_llink);
1639 		list_del_init(&file->f_tfile_llink);
1640 	}
1641 	INIT_LIST_HEAD(&tfile_check_list);
1642 }
1643 
1644 /*
1645  * Open an eventpoll file descriptor.
1646  */
SYSCALL_DEFINE1(epoll_create1,int,flags)1647 SYSCALL_DEFINE1(epoll_create1, int, flags)
1648 {
1649 	int error, fd;
1650 	struct eventpoll *ep = NULL;
1651 	struct file *file;
1652 
1653 	/* Check the EPOLL_* constant for consistency.  */
1654 	BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
1655 
1656 	if (flags & ~EPOLL_CLOEXEC)
1657 		return -EINVAL;
1658 	/*
1659 	 * Create the internal data structure ("struct eventpoll").
1660 	 */
1661 	error = ep_alloc(&ep);
1662 	if (error < 0)
1663 		return error;
1664 	/*
1665 	 * Creates all the items needed to setup an eventpoll file. That is,
1666 	 * a file structure and a free file descriptor.
1667 	 */
1668 	fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
1669 	if (fd < 0) {
1670 		error = fd;
1671 		goto out_free_ep;
1672 	}
1673 	file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
1674 				 O_RDWR | (flags & O_CLOEXEC));
1675 	if (IS_ERR(file)) {
1676 		error = PTR_ERR(file);
1677 		goto out_free_fd;
1678 	}
1679 	fd_install(fd, file);
1680 	ep->file = file;
1681 	return fd;
1682 
1683 out_free_fd:
1684 	put_unused_fd(fd);
1685 out_free_ep:
1686 	ep_free(ep);
1687 	return error;
1688 }
1689 
SYSCALL_DEFINE1(epoll_create,int,size)1690 SYSCALL_DEFINE1(epoll_create, int, size)
1691 {
1692 	if (size <= 0)
1693 		return -EINVAL;
1694 
1695 	return sys_epoll_create1(0);
1696 }
1697 
1698 /*
1699  * The following function implements the controller interface for
1700  * the eventpoll file that enables the insertion/removal/change of
1701  * file descriptors inside the interest set.
1702  */
SYSCALL_DEFINE4(epoll_ctl,int,epfd,int,op,int,fd,struct epoll_event __user *,event)1703 SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1704 		struct epoll_event __user *, event)
1705 {
1706 	int error;
1707 	int did_lock_epmutex = 0;
1708 	struct file *file, *tfile;
1709 	struct eventpoll *ep;
1710 	struct epitem *epi;
1711 	struct epoll_event epds;
1712 
1713 	error = -EFAULT;
1714 	if (ep_op_has_event(op) &&
1715 	    copy_from_user(&epds, event, sizeof(struct epoll_event)))
1716 		goto error_return;
1717 
1718 	/* Get the "struct file *" for the eventpoll file */
1719 	error = -EBADF;
1720 	file = fget(epfd);
1721 	if (!file)
1722 		goto error_return;
1723 
1724 	/* Get the "struct file *" for the target file */
1725 	tfile = fget(fd);
1726 	if (!tfile)
1727 		goto error_fput;
1728 
1729 	/* The target file descriptor must support poll */
1730 	error = -EPERM;
1731 	if (!tfile->f_op || !tfile->f_op->poll)
1732 		goto error_tgt_fput;
1733 
1734 	/* Check if EPOLLWAKEUP is allowed */
1735 	if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP))
1736 		epds.events &= ~EPOLLWAKEUP;
1737 
1738 	/*
1739 	 * We have to check that the file structure underneath the file descriptor
1740 	 * the user passed to us _is_ an eventpoll file. And also we do not permit
1741 	 * adding an epoll file descriptor inside itself.
1742 	 */
1743 	error = -EINVAL;
1744 	if (file == tfile || !is_file_epoll(file))
1745 		goto error_tgt_fput;
1746 
1747 	/*
1748 	 * At this point it is safe to assume that the "private_data" contains
1749 	 * our own data structure.
1750 	 */
1751 	ep = file->private_data;
1752 
1753 	/*
1754 	 * When we insert an epoll file descriptor, inside another epoll file
1755 	 * descriptor, there is the change of creating closed loops, which are
1756 	 * better be handled here, than in more critical paths. While we are
1757 	 * checking for loops we also determine the list of files reachable
1758 	 * and hang them on the tfile_check_list, so we can check that we
1759 	 * haven't created too many possible wakeup paths.
1760 	 *
1761 	 * We need to hold the epmutex across both ep_insert and ep_remove
1762 	 * b/c we want to make sure we are looking at a coherent view of
1763 	 * epoll network.
1764 	 */
1765 	if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
1766 		mutex_lock(&epmutex);
1767 		did_lock_epmutex = 1;
1768 	}
1769 	if (op == EPOLL_CTL_ADD) {
1770 		if (is_file_epoll(tfile)) {
1771 			error = -ELOOP;
1772 			if (ep_loop_check(ep, tfile) != 0) {
1773 				clear_tfile_check_list();
1774 				goto error_tgt_fput;
1775 			}
1776 		} else
1777 			list_add(&tfile->f_tfile_llink, &tfile_check_list);
1778 	}
1779 
1780 	mutex_lock_nested(&ep->mtx, 0);
1781 
1782 	/*
1783 	 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
1784 	 * above, we can be sure to be able to use the item looked up by
1785 	 * ep_find() till we release the mutex.
1786 	 */
1787 	epi = ep_find(ep, tfile, fd);
1788 
1789 	error = -EINVAL;
1790 	switch (op) {
1791 	case EPOLL_CTL_ADD:
1792 		if (!epi) {
1793 			epds.events |= POLLERR | POLLHUP;
1794 			error = ep_insert(ep, &epds, tfile, fd);
1795 		} else
1796 			error = -EEXIST;
1797 		clear_tfile_check_list();
1798 		break;
1799 	case EPOLL_CTL_DEL:
1800 		if (epi)
1801 			error = ep_remove(ep, epi);
1802 		else
1803 			error = -ENOENT;
1804 		break;
1805 	case EPOLL_CTL_MOD:
1806 		if (epi) {
1807 			epds.events |= POLLERR | POLLHUP;
1808 			error = ep_modify(ep, epi, &epds);
1809 		} else
1810 			error = -ENOENT;
1811 		break;
1812 	}
1813 	mutex_unlock(&ep->mtx);
1814 
1815 error_tgt_fput:
1816 	if (did_lock_epmutex)
1817 		mutex_unlock(&epmutex);
1818 
1819 	fput(tfile);
1820 error_fput:
1821 	fput(file);
1822 error_return:
1823 
1824 	return error;
1825 }
1826 
1827 /*
1828  * Implement the event wait interface for the eventpoll file. It is the kernel
1829  * part of the user space epoll_wait(2).
1830  */
SYSCALL_DEFINE4(epoll_wait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout)1831 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
1832 		int, maxevents, int, timeout)
1833 {
1834 	int error;
1835 	struct file *file;
1836 	struct eventpoll *ep;
1837 
1838 	/* The maximum number of event must be greater than zero */
1839 	if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
1840 		return -EINVAL;
1841 
1842 	/* Verify that the area passed by the user is writeable */
1843 	if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
1844 		error = -EFAULT;
1845 		goto error_return;
1846 	}
1847 
1848 	/* Get the "struct file *" for the eventpoll file */
1849 	error = -EBADF;
1850 	file = fget(epfd);
1851 	if (!file)
1852 		goto error_return;
1853 
1854 	/*
1855 	 * We have to check that the file structure underneath the fd
1856 	 * the user passed to us _is_ an eventpoll file.
1857 	 */
1858 	error = -EINVAL;
1859 	if (!is_file_epoll(file))
1860 		goto error_fput;
1861 
1862 	/*
1863 	 * At this point it is safe to assume that the "private_data" contains
1864 	 * our own data structure.
1865 	 */
1866 	ep = file->private_data;
1867 
1868 	/* Time to fish for events ... */
1869 	error = ep_poll(ep, events, maxevents, timeout);
1870 
1871 error_fput:
1872 	fput(file);
1873 error_return:
1874 
1875 	return error;
1876 }
1877 
1878 #ifdef HAVE_SET_RESTORE_SIGMASK
1879 
1880 /*
1881  * Implement the event wait interface for the eventpoll file. It is the kernel
1882  * part of the user space epoll_pwait(2).
1883  */
SYSCALL_DEFINE6(epoll_pwait,int,epfd,struct epoll_event __user *,events,int,maxevents,int,timeout,const sigset_t __user *,sigmask,size_t,sigsetsize)1884 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
1885 		int, maxevents, int, timeout, const sigset_t __user *, sigmask,
1886 		size_t, sigsetsize)
1887 {
1888 	int error;
1889 	sigset_t ksigmask, sigsaved;
1890 
1891 	/*
1892 	 * If the caller wants a certain signal mask to be set during the wait,
1893 	 * we apply it here.
1894 	 */
1895 	if (sigmask) {
1896 		if (sigsetsize != sizeof(sigset_t))
1897 			return -EINVAL;
1898 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1899 			return -EFAULT;
1900 		sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1901 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1902 	}
1903 
1904 	error = sys_epoll_wait(epfd, events, maxevents, timeout);
1905 
1906 	/*
1907 	 * If we changed the signal mask, we need to restore the original one.
1908 	 * In case we've got a signal while waiting, we do not restore the
1909 	 * signal mask yet, and we allow do_signal() to deliver the signal on
1910 	 * the way back to userspace, before the signal mask is restored.
1911 	 */
1912 	if (sigmask) {
1913 		if (error == -EINTR) {
1914 			memcpy(&current->saved_sigmask, &sigsaved,
1915 			       sizeof(sigsaved));
1916 			set_restore_sigmask();
1917 		} else
1918 			sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1919 	}
1920 
1921 	return error;
1922 }
1923 
1924 #endif /* HAVE_SET_RESTORE_SIGMASK */
1925 
eventpoll_init(void)1926 static int __init eventpoll_init(void)
1927 {
1928 	struct sysinfo si;
1929 
1930 	si_meminfo(&si);
1931 	/*
1932 	 * Allows top 4% of lomem to be allocated for epoll watches (per user).
1933 	 */
1934 	max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
1935 		EP_ITEM_COST;
1936 	BUG_ON(max_user_watches < 0);
1937 
1938 	/*
1939 	 * Initialize the structure used to perform epoll file descriptor
1940 	 * inclusion loops checks.
1941 	 */
1942 	ep_nested_calls_init(&poll_loop_ncalls);
1943 
1944 	/* Initialize the structure used to perform safe poll wait head wake ups */
1945 	ep_nested_calls_init(&poll_safewake_ncalls);
1946 
1947 	/* Initialize the structure used to perform file's f_op->poll() calls */
1948 	ep_nested_calls_init(&poll_readywalk_ncalls);
1949 
1950 	/* Allocates slab cache used to allocate "struct epitem" items */
1951 	epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
1952 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1953 
1954 	/* Allocates slab cache used to allocate "struct eppoll_entry" */
1955 	pwq_cache = kmem_cache_create("eventpoll_pwq",
1956 			sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
1957 
1958 	return 0;
1959 }
1960 fs_initcall(eventpoll_init);
1961