• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file contains the procedures for the handling of select and poll
3  *
4  * Created for Linux based loosely upon Mathius Lattner's minix
5  * patches by Peter MacDonald. Heavily edited by Linus.
6  *
7  *  4 February 1994
8  *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9  *     flag set in its personality we do *not* modify the given timeout
10  *     parameter to reflect time remaining.
11  *
12  *  24 January 2000
13  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
26 #include <linux/fs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/rt.h>
30 #include <linux/freezer.h>
31 
32 #include <asm/uaccess.h>
33 
34 
35 /*
36  * Estimate expected accuracy in ns from a timeval.
37  *
38  * After quite a bit of churning around, we've settled on
39  * a simple thing of taking 0.1% of the timeout as the
40  * slack, with a cap of 100 msec.
41  * "nice" tasks get a 0.5% slack instead.
42  *
43  * Consider this comment an open invitation to come up with even
44  * better solutions..
45  */
46 
47 #define MAX_SLACK	(100 * NSEC_PER_MSEC)
48 
__estimate_accuracy(struct timespec * tv)49 static long __estimate_accuracy(struct timespec *tv)
50 {
51 	long slack;
52 	int divfactor = 1000;
53 
54 	if (tv->tv_sec < 0)
55 		return 0;
56 
57 	if (task_nice(current) > 0)
58 		divfactor = divfactor / 5;
59 
60 	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
61 		return MAX_SLACK;
62 
63 	slack = tv->tv_nsec / divfactor;
64 	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
65 
66 	if (slack > MAX_SLACK)
67 		return MAX_SLACK;
68 
69 	return slack;
70 }
71 
select_estimate_accuracy(struct timespec * tv)72 long select_estimate_accuracy(struct timespec *tv)
73 {
74 	unsigned long ret;
75 	struct timespec now;
76 
77 	/*
78 	 * Realtime tasks get a slack of 0 for obvious reasons.
79 	 */
80 
81 	if (rt_task(current))
82 		return 0;
83 
84 	ktime_get_ts(&now);
85 	now = timespec_sub(*tv, now);
86 	ret = __estimate_accuracy(&now);
87 	if (ret < current->timer_slack_ns)
88 		return current->timer_slack_ns;
89 	return ret;
90 }
91 
92 
93 
94 struct poll_table_page {
95 	struct poll_table_page * next;
96 	struct poll_table_entry * entry;
97 	struct poll_table_entry entries[0];
98 };
99 
100 #define POLL_TABLE_FULL(table) \
101 	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
102 
103 /*
104  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
105  * I have rewritten this, taking some shortcuts: This code may not be easy to
106  * follow, but it should be free of race-conditions, and it's practical. If you
107  * understand what I'm doing here, then you understand how the linux
108  * sleep/wakeup mechanism works.
109  *
110  * Two very simple procedures, poll_wait() and poll_freewait() make all the
111  * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
112  * as all select/poll functions have to call it to add an entry to the
113  * poll table.
114  */
115 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
116 		       poll_table *p);
117 
poll_initwait(struct poll_wqueues * pwq)118 void poll_initwait(struct poll_wqueues *pwq)
119 {
120 	init_poll_funcptr(&pwq->pt, __pollwait);
121 	pwq->polling_task = current;
122 	pwq->triggered = 0;
123 	pwq->error = 0;
124 	pwq->table = NULL;
125 	pwq->inline_index = 0;
126 }
127 EXPORT_SYMBOL(poll_initwait);
128 
free_poll_entry(struct poll_table_entry * entry)129 static void free_poll_entry(struct poll_table_entry *entry)
130 {
131 	remove_wait_queue(entry->wait_address, &entry->wait);
132 	fput(entry->filp);
133 }
134 
poll_freewait(struct poll_wqueues * pwq)135 void poll_freewait(struct poll_wqueues *pwq)
136 {
137 	struct poll_table_page * p = pwq->table;
138 	int i;
139 	for (i = 0; i < pwq->inline_index; i++)
140 		free_poll_entry(pwq->inline_entries + i);
141 	while (p) {
142 		struct poll_table_entry * entry;
143 		struct poll_table_page *old;
144 
145 		entry = p->entry;
146 		do {
147 			entry--;
148 			free_poll_entry(entry);
149 		} while (entry > p->entries);
150 		old = p;
151 		p = p->next;
152 		free_page((unsigned long) old);
153 	}
154 }
155 EXPORT_SYMBOL(poll_freewait);
156 
poll_get_entry(struct poll_wqueues * p)157 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
158 {
159 	struct poll_table_page *table = p->table;
160 
161 	if (p->inline_index < N_INLINE_POLL_ENTRIES)
162 		return p->inline_entries + p->inline_index++;
163 
164 	if (!table || POLL_TABLE_FULL(table)) {
165 		struct poll_table_page *new_table;
166 
167 		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
168 		if (!new_table) {
169 			p->error = -ENOMEM;
170 			return NULL;
171 		}
172 		new_table->entry = new_table->entries;
173 		new_table->next = table;
174 		p->table = new_table;
175 		table = new_table;
176 	}
177 
178 	return table->entry++;
179 }
180 
__pollwake(wait_queue_t * wait,unsigned mode,int sync,void * key)181 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
182 {
183 	struct poll_wqueues *pwq = wait->private;
184 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
185 
186 	/*
187 	 * Although this function is called under waitqueue lock, LOCK
188 	 * doesn't imply write barrier and the users expect write
189 	 * barrier semantics on wakeup functions.  The following
190 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
191 	 * and is paired with set_mb() in poll_schedule_timeout.
192 	 */
193 	smp_wmb();
194 	pwq->triggered = 1;
195 
196 	/*
197 	 * Perform the default wake up operation using a dummy
198 	 * waitqueue.
199 	 *
200 	 * TODO: This is hacky but there currently is no interface to
201 	 * pass in @sync.  @sync is scheduled to be removed and once
202 	 * that happens, wake_up_process() can be used directly.
203 	 */
204 	return default_wake_function(&dummy_wait, mode, sync, key);
205 }
206 
pollwake(wait_queue_t * wait,unsigned mode,int sync,void * key)207 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
208 {
209 	struct poll_table_entry *entry;
210 
211 	entry = container_of(wait, struct poll_table_entry, wait);
212 	if (key && !((unsigned long)key & entry->key))
213 		return 0;
214 	return __pollwake(wait, mode, sync, key);
215 }
216 
217 /* Add a new entry */
__pollwait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)218 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
219 				poll_table *p)
220 {
221 	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
222 	struct poll_table_entry *entry = poll_get_entry(pwq);
223 	if (!entry)
224 		return;
225 	entry->filp = get_file(filp);
226 	entry->wait_address = wait_address;
227 	entry->key = p->_key;
228 	init_waitqueue_func_entry(&entry->wait, pollwake);
229 	entry->wait.private = pwq;
230 	add_wait_queue(wait_address, &entry->wait);
231 }
232 
poll_schedule_timeout(struct poll_wqueues * pwq,int state,ktime_t * expires,unsigned long slack)233 int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
234 			  ktime_t *expires, unsigned long slack)
235 {
236 	int rc = -EINTR;
237 
238 	set_current_state(state);
239 	if (!pwq->triggered)
240 		rc = freezable_schedule_hrtimeout_range(expires, slack,
241 							HRTIMER_MODE_ABS);
242 	__set_current_state(TASK_RUNNING);
243 
244 	/*
245 	 * Prepare for the next iteration.
246 	 *
247 	 * The following set_mb() serves two purposes.  First, it's
248 	 * the counterpart rmb of the wmb in pollwake() such that data
249 	 * written before wake up is always visible after wake up.
250 	 * Second, the full barrier guarantees that triggered clearing
251 	 * doesn't pass event check of the next iteration.  Note that
252 	 * this problem doesn't exist for the first iteration as
253 	 * add_wait_queue() has full barrier semantics.
254 	 */
255 	set_mb(pwq->triggered, 0);
256 
257 	return rc;
258 }
259 EXPORT_SYMBOL(poll_schedule_timeout);
260 
261 /**
262  * poll_select_set_timeout - helper function to setup the timeout value
263  * @to:		pointer to timespec variable for the final timeout
264  * @sec:	seconds (from user space)
265  * @nsec:	nanoseconds (from user space)
266  *
267  * Note, we do not use a timespec for the user space value here, That
268  * way we can use the function for timeval and compat interfaces as well.
269  *
270  * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
271  */
poll_select_set_timeout(struct timespec * to,long sec,long nsec)272 int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
273 {
274 	struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
275 
276 	if (!timespec_valid(&ts))
277 		return -EINVAL;
278 
279 	/* Optimize for the zero timeout value here */
280 	if (!sec && !nsec) {
281 		to->tv_sec = to->tv_nsec = 0;
282 	} else {
283 		ktime_get_ts(to);
284 		*to = timespec_add_safe(*to, ts);
285 	}
286 	return 0;
287 }
288 
poll_select_copy_remaining(struct timespec * end_time,void __user * p,int timeval,int ret)289 static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
290 				      int timeval, int ret)
291 {
292 	struct timespec rts;
293 	struct timeval rtv;
294 
295 	if (!p)
296 		return ret;
297 
298 	if (current->personality & STICKY_TIMEOUTS)
299 		goto sticky;
300 
301 	/* No update for zero timeout */
302 	if (!end_time->tv_sec && !end_time->tv_nsec)
303 		return ret;
304 
305 	ktime_get_ts(&rts);
306 	rts = timespec_sub(*end_time, rts);
307 	if (rts.tv_sec < 0)
308 		rts.tv_sec = rts.tv_nsec = 0;
309 
310 	if (timeval) {
311 		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
312 			memset(&rtv, 0, sizeof(rtv));
313 		rtv.tv_sec = rts.tv_sec;
314 		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
315 
316 		if (!copy_to_user(p, &rtv, sizeof(rtv)))
317 			return ret;
318 
319 	} else if (!copy_to_user(p, &rts, sizeof(rts)))
320 		return ret;
321 
322 	/*
323 	 * If an application puts its timeval in read-only memory, we
324 	 * don't want the Linux-specific update to the timeval to
325 	 * cause a fault after the select has completed
326 	 * successfully. However, because we're not updating the
327 	 * timeval, we can't restart the system call.
328 	 */
329 
330 sticky:
331 	if (ret == -ERESTARTNOHAND)
332 		ret = -EINTR;
333 	return ret;
334 }
335 
336 #define FDS_IN(fds, n)		(fds->in + n)
337 #define FDS_OUT(fds, n)		(fds->out + n)
338 #define FDS_EX(fds, n)		(fds->ex + n)
339 
340 #define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
341 
max_select_fd(unsigned long n,fd_set_bits * fds)342 static int max_select_fd(unsigned long n, fd_set_bits *fds)
343 {
344 	unsigned long *open_fds;
345 	unsigned long set;
346 	int max;
347 	struct fdtable *fdt;
348 
349 	/* handle last in-complete long-word first */
350 	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
351 	n /= BITS_PER_LONG;
352 	fdt = files_fdtable(current->files);
353 	open_fds = fdt->open_fds + n;
354 	max = 0;
355 	if (set) {
356 		set &= BITS(fds, n);
357 		if (set) {
358 			if (!(set & ~*open_fds))
359 				goto get_max;
360 			return -EBADF;
361 		}
362 	}
363 	while (n) {
364 		open_fds--;
365 		n--;
366 		set = BITS(fds, n);
367 		if (!set)
368 			continue;
369 		if (set & ~*open_fds)
370 			return -EBADF;
371 		if (max)
372 			continue;
373 get_max:
374 		do {
375 			max++;
376 			set >>= 1;
377 		} while (set);
378 		max += n * BITS_PER_LONG;
379 	}
380 
381 	return max;
382 }
383 
384 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
385 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
386 #define POLLEX_SET (POLLPRI)
387 
wait_key_set(poll_table * wait,unsigned long in,unsigned long out,unsigned long bit)388 static inline void wait_key_set(poll_table *wait, unsigned long in,
389 				unsigned long out, unsigned long bit)
390 {
391 	wait->_key = POLLEX_SET;
392 	if (in & bit)
393 		wait->_key |= POLLIN_SET;
394 	if (out & bit)
395 		wait->_key |= POLLOUT_SET;
396 }
397 
do_select(int n,fd_set_bits * fds,struct timespec * end_time)398 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
399 {
400 	ktime_t expire, *to = NULL;
401 	struct poll_wqueues table;
402 	poll_table *wait;
403 	int retval, i, timed_out = 0;
404 	unsigned long slack = 0;
405 
406 	rcu_read_lock();
407 	retval = max_select_fd(n, fds);
408 	rcu_read_unlock();
409 
410 	if (retval < 0)
411 		return retval;
412 	n = retval;
413 
414 	poll_initwait(&table);
415 	wait = &table.pt;
416 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
417 		wait->_qproc = NULL;
418 		timed_out = 1;
419 	}
420 
421 	if (end_time && !timed_out)
422 		slack = select_estimate_accuracy(end_time);
423 
424 	retval = 0;
425 	for (;;) {
426 		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
427 
428 		inp = fds->in; outp = fds->out; exp = fds->ex;
429 		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
430 
431 		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
432 			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
433 			unsigned long res_in = 0, res_out = 0, res_ex = 0;
434 
435 			in = *inp++; out = *outp++; ex = *exp++;
436 			all_bits = in | out | ex;
437 			if (all_bits == 0) {
438 				i += BITS_PER_LONG;
439 				continue;
440 			}
441 
442 			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
443 				struct fd f;
444 				if (i >= n)
445 					break;
446 				if (!(bit & all_bits))
447 					continue;
448 				f = fdget(i);
449 				if (f.file) {
450 					const struct file_operations *f_op;
451 					f_op = f.file->f_op;
452 					mask = DEFAULT_POLLMASK;
453 					if (f_op && f_op->poll) {
454 						wait_key_set(wait, in, out, bit);
455 						mask = (*f_op->poll)(f.file, wait);
456 					}
457 					fdput(f);
458 					if ((mask & POLLIN_SET) && (in & bit)) {
459 						res_in |= bit;
460 						retval++;
461 						wait->_qproc = NULL;
462 					}
463 					if ((mask & POLLOUT_SET) && (out & bit)) {
464 						res_out |= bit;
465 						retval++;
466 						wait->_qproc = NULL;
467 					}
468 					if ((mask & POLLEX_SET) && (ex & bit)) {
469 						res_ex |= bit;
470 						retval++;
471 						wait->_qproc = NULL;
472 					}
473 				}
474 			}
475 			if (res_in)
476 				*rinp = res_in;
477 			if (res_out)
478 				*routp = res_out;
479 			if (res_ex)
480 				*rexp = res_ex;
481 			cond_resched();
482 		}
483 		wait->_qproc = NULL;
484 		if (retval || timed_out || signal_pending(current))
485 			break;
486 		if (table.error) {
487 			retval = table.error;
488 			break;
489 		}
490 
491 		/*
492 		 * If this is the first loop and we have a timeout
493 		 * given, then we convert to ktime_t and set the to
494 		 * pointer to the expiry value.
495 		 */
496 		if (end_time && !to) {
497 			expire = timespec_to_ktime(*end_time);
498 			to = &expire;
499 		}
500 
501 		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
502 					   to, slack))
503 			timed_out = 1;
504 	}
505 
506 	poll_freewait(&table);
507 
508 	return retval;
509 }
510 
511 /*
512  * We can actually return ERESTARTSYS instead of EINTR, but I'd
513  * like to be certain this leads to no problems. So I return
514  * EINTR just for safety.
515  *
516  * Update: ERESTARTSYS breaks at least the xview clock binary, so
517  * I'm trying ERESTARTNOHAND which restart only when you want to.
518  */
core_sys_select(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timespec * end_time)519 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
520 			   fd_set __user *exp, struct timespec *end_time)
521 {
522 	fd_set_bits fds;
523 	void *bits;
524 	int ret, max_fds;
525 	unsigned int size;
526 	struct fdtable *fdt;
527 	/* Allocate small arguments on the stack to save memory and be faster */
528 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
529 
530 	ret = -EINVAL;
531 	if (n < 0)
532 		goto out_nofds;
533 
534 	/* max_fds can increase, so grab it once to avoid race */
535 	rcu_read_lock();
536 	fdt = files_fdtable(current->files);
537 	max_fds = fdt->max_fds;
538 	rcu_read_unlock();
539 	if (n > max_fds)
540 		n = max_fds;
541 
542 	/*
543 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
544 	 * since we used fdset we need to allocate memory in units of
545 	 * long-words.
546 	 */
547 	size = FDS_BYTES(n);
548 	bits = stack_fds;
549 	if (size > sizeof(stack_fds) / 6) {
550 		/* Not enough space in on-stack array; must use kmalloc */
551 		ret = -ENOMEM;
552 		bits = kmalloc(6 * size, GFP_KERNEL);
553 		if (!bits)
554 			goto out_nofds;
555 	}
556 	fds.in      = bits;
557 	fds.out     = bits +   size;
558 	fds.ex      = bits + 2*size;
559 	fds.res_in  = bits + 3*size;
560 	fds.res_out = bits + 4*size;
561 	fds.res_ex  = bits + 5*size;
562 
563 	if ((ret = get_fd_set(n, inp, fds.in)) ||
564 	    (ret = get_fd_set(n, outp, fds.out)) ||
565 	    (ret = get_fd_set(n, exp, fds.ex)))
566 		goto out;
567 	zero_fd_set(n, fds.res_in);
568 	zero_fd_set(n, fds.res_out);
569 	zero_fd_set(n, fds.res_ex);
570 
571 	ret = do_select(n, &fds, end_time);
572 
573 	if (ret < 0)
574 		goto out;
575 	if (!ret) {
576 		ret = -ERESTARTNOHAND;
577 		if (signal_pending(current))
578 			goto out;
579 		ret = 0;
580 	}
581 
582 	if (set_fd_set(n, inp, fds.res_in) ||
583 	    set_fd_set(n, outp, fds.res_out) ||
584 	    set_fd_set(n, exp, fds.res_ex))
585 		ret = -EFAULT;
586 
587 out:
588 	if (bits != stack_fds)
589 		kfree(bits);
590 out_nofds:
591 	return ret;
592 }
593 
SYSCALL_DEFINE5(select,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct timeval __user *,tvp)594 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
595 		fd_set __user *, exp, struct timeval __user *, tvp)
596 {
597 	struct timespec end_time, *to = NULL;
598 	struct timeval tv;
599 	int ret;
600 
601 	if (tvp) {
602 		if (copy_from_user(&tv, tvp, sizeof(tv)))
603 			return -EFAULT;
604 
605 		to = &end_time;
606 		if (poll_select_set_timeout(to,
607 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
608 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
609 			return -EINVAL;
610 	}
611 
612 	ret = core_sys_select(n, inp, outp, exp, to);
613 	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
614 
615 	return ret;
616 }
617 
do_pselect(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timespec __user * tsp,const sigset_t __user * sigmask,size_t sigsetsize)618 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
619 		       fd_set __user *exp, struct timespec __user *tsp,
620 		       const sigset_t __user *sigmask, size_t sigsetsize)
621 {
622 	sigset_t ksigmask, sigsaved;
623 	struct timespec ts, end_time, *to = NULL;
624 	int ret;
625 
626 	if (tsp) {
627 		if (copy_from_user(&ts, tsp, sizeof(ts)))
628 			return -EFAULT;
629 
630 		to = &end_time;
631 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
632 			return -EINVAL;
633 	}
634 
635 	if (sigmask) {
636 		/* XXX: Don't preclude handling different sized sigset_t's.  */
637 		if (sigsetsize != sizeof(sigset_t))
638 			return -EINVAL;
639 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
640 			return -EFAULT;
641 
642 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
643 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
644 	}
645 
646 	ret = core_sys_select(n, inp, outp, exp, to);
647 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
648 
649 	if (ret == -ERESTARTNOHAND) {
650 		/*
651 		 * Don't restore the signal mask yet. Let do_signal() deliver
652 		 * the signal on the way back to userspace, before the signal
653 		 * mask is restored.
654 		 */
655 		if (sigmask) {
656 			memcpy(&current->saved_sigmask, &sigsaved,
657 					sizeof(sigsaved));
658 			set_restore_sigmask();
659 		}
660 	} else if (sigmask)
661 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
662 
663 	return ret;
664 }
665 
666 /*
667  * Most architectures can't handle 7-argument syscalls. So we provide a
668  * 6-argument version where the sixth argument is a pointer to a structure
669  * which has a pointer to the sigset_t itself followed by a size_t containing
670  * the sigset size.
671  */
SYSCALL_DEFINE6(pselect6,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct timespec __user *,tsp,void __user *,sig)672 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
673 		fd_set __user *, exp, struct timespec __user *, tsp,
674 		void __user *, sig)
675 {
676 	size_t sigsetsize = 0;
677 	sigset_t __user *up = NULL;
678 
679 	if (sig) {
680 		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
681 		    || __get_user(up, (sigset_t __user * __user *)sig)
682 		    || __get_user(sigsetsize,
683 				(size_t __user *)(sig+sizeof(void *))))
684 			return -EFAULT;
685 	}
686 
687 	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
688 }
689 
690 #ifdef __ARCH_WANT_SYS_OLD_SELECT
691 struct sel_arg_struct {
692 	unsigned long n;
693 	fd_set __user *inp, *outp, *exp;
694 	struct timeval __user *tvp;
695 };
696 
SYSCALL_DEFINE1(old_select,struct sel_arg_struct __user *,arg)697 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
698 {
699 	struct sel_arg_struct a;
700 
701 	if (copy_from_user(&a, arg, sizeof(a)))
702 		return -EFAULT;
703 	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
704 }
705 #endif
706 
707 struct poll_list {
708 	struct poll_list *next;
709 	int len;
710 	struct pollfd entries[0];
711 };
712 
713 #define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
714 
715 /*
716  * Fish for pollable events on the pollfd->fd file descriptor. We're only
717  * interested in events matching the pollfd->events mask, and the result
718  * matching that mask is both recorded in pollfd->revents and returned. The
719  * pwait poll_table will be used by the fd-provided poll handler for waiting,
720  * if pwait->_qproc is non-NULL.
721  */
do_pollfd(struct pollfd * pollfd,poll_table * pwait)722 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
723 {
724 	unsigned int mask;
725 	int fd;
726 
727 	mask = 0;
728 	fd = pollfd->fd;
729 	if (fd >= 0) {
730 		struct fd f = fdget(fd);
731 		mask = POLLNVAL;
732 		if (f.file) {
733 			mask = DEFAULT_POLLMASK;
734 			if (f.file->f_op && f.file->f_op->poll) {
735 				pwait->_key = pollfd->events|POLLERR|POLLHUP;
736 				mask = f.file->f_op->poll(f.file, pwait);
737 			}
738 			/* Mask out unneeded events. */
739 			mask &= pollfd->events | POLLERR | POLLHUP;
740 			fdput(f);
741 		}
742 	}
743 	pollfd->revents = mask;
744 
745 	return mask;
746 }
747 
do_poll(unsigned int nfds,struct poll_list * list,struct poll_wqueues * wait,struct timespec * end_time)748 static int do_poll(unsigned int nfds,  struct poll_list *list,
749 		   struct poll_wqueues *wait, struct timespec *end_time)
750 {
751 	poll_table* pt = &wait->pt;
752 	ktime_t expire, *to = NULL;
753 	int timed_out = 0, count = 0;
754 	unsigned long slack = 0;
755 
756 	/* Optimise the no-wait case */
757 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
758 		pt->_qproc = NULL;
759 		timed_out = 1;
760 	}
761 
762 	if (end_time && !timed_out)
763 		slack = select_estimate_accuracy(end_time);
764 
765 	for (;;) {
766 		struct poll_list *walk;
767 
768 		for (walk = list; walk != NULL; walk = walk->next) {
769 			struct pollfd * pfd, * pfd_end;
770 
771 			pfd = walk->entries;
772 			pfd_end = pfd + walk->len;
773 			for (; pfd != pfd_end; pfd++) {
774 				/*
775 				 * Fish for events. If we found one, record it
776 				 * and kill poll_table->_qproc, so we don't
777 				 * needlessly register any other waiters after
778 				 * this. They'll get immediately deregistered
779 				 * when we break out and return.
780 				 */
781 				if (do_pollfd(pfd, pt)) {
782 					count++;
783 					pt->_qproc = NULL;
784 				}
785 			}
786 		}
787 		/*
788 		 * All waiters have already been registered, so don't provide
789 		 * a poll_table->_qproc to them on the next loop iteration.
790 		 */
791 		pt->_qproc = NULL;
792 		if (!count) {
793 			count = wait->error;
794 			if (signal_pending(current))
795 				count = -EINTR;
796 		}
797 		if (count || timed_out)
798 			break;
799 
800 		/*
801 		 * If this is the first loop and we have a timeout
802 		 * given, then we convert to ktime_t and set the to
803 		 * pointer to the expiry value.
804 		 */
805 		if (end_time && !to) {
806 			expire = timespec_to_ktime(*end_time);
807 			to = &expire;
808 		}
809 
810 		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
811 			timed_out = 1;
812 	}
813 	return count;
814 }
815 
816 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
817 			sizeof(struct pollfd))
818 
do_sys_poll(struct pollfd __user * ufds,unsigned int nfds,struct timespec * end_time)819 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
820 		struct timespec *end_time)
821 {
822 	struct poll_wqueues table;
823  	int err = -EFAULT, fdcount, len, size;
824 	/* Allocate small arguments on the stack to save memory and be
825 	   faster - use long to make sure the buffer is aligned properly
826 	   on 64 bit archs to avoid unaligned access */
827 	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
828 	struct poll_list *const head = (struct poll_list *)stack_pps;
829  	struct poll_list *walk = head;
830  	unsigned long todo = nfds;
831 
832 	if (nfds > rlimit(RLIMIT_NOFILE))
833 		return -EINVAL;
834 
835 	len = min_t(unsigned int, nfds, N_STACK_PPS);
836 	for (;;) {
837 		walk->next = NULL;
838 		walk->len = len;
839 		if (!len)
840 			break;
841 
842 		if (copy_from_user(walk->entries, ufds + nfds-todo,
843 					sizeof(struct pollfd) * walk->len))
844 			goto out_fds;
845 
846 		todo -= walk->len;
847 		if (!todo)
848 			break;
849 
850 		len = min(todo, POLLFD_PER_PAGE);
851 		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
852 		walk = walk->next = kmalloc(size, GFP_KERNEL);
853 		if (!walk) {
854 			err = -ENOMEM;
855 			goto out_fds;
856 		}
857 	}
858 
859 	poll_initwait(&table);
860 	fdcount = do_poll(nfds, head, &table, end_time);
861 	poll_freewait(&table);
862 
863 	for (walk = head; walk; walk = walk->next) {
864 		struct pollfd *fds = walk->entries;
865 		int j;
866 
867 		for (j = 0; j < walk->len; j++, ufds++)
868 			if (__put_user(fds[j].revents, &ufds->revents))
869 				goto out_fds;
870   	}
871 
872 	err = fdcount;
873 out_fds:
874 	walk = head->next;
875 	while (walk) {
876 		struct poll_list *pos = walk;
877 		walk = walk->next;
878 		kfree(pos);
879 	}
880 
881 	return err;
882 }
883 
do_restart_poll(struct restart_block * restart_block)884 static long do_restart_poll(struct restart_block *restart_block)
885 {
886 	struct pollfd __user *ufds = restart_block->poll.ufds;
887 	int nfds = restart_block->poll.nfds;
888 	struct timespec *to = NULL, end_time;
889 	int ret;
890 
891 	if (restart_block->poll.has_timeout) {
892 		end_time.tv_sec = restart_block->poll.tv_sec;
893 		end_time.tv_nsec = restart_block->poll.tv_nsec;
894 		to = &end_time;
895 	}
896 
897 	ret = do_sys_poll(ufds, nfds, to);
898 
899 	if (ret == -EINTR) {
900 		restart_block->fn = do_restart_poll;
901 		ret = -ERESTART_RESTARTBLOCK;
902 	}
903 	return ret;
904 }
905 
SYSCALL_DEFINE3(poll,struct pollfd __user *,ufds,unsigned int,nfds,int,timeout_msecs)906 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
907 		int, timeout_msecs)
908 {
909 	struct timespec end_time, *to = NULL;
910 	int ret;
911 
912 	if (timeout_msecs >= 0) {
913 		to = &end_time;
914 		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
915 			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
916 	}
917 
918 	ret = do_sys_poll(ufds, nfds, to);
919 
920 	if (ret == -EINTR) {
921 		struct restart_block *restart_block;
922 
923 		restart_block = &current_thread_info()->restart_block;
924 		restart_block->fn = do_restart_poll;
925 		restart_block->poll.ufds = ufds;
926 		restart_block->poll.nfds = nfds;
927 
928 		if (timeout_msecs >= 0) {
929 			restart_block->poll.tv_sec = end_time.tv_sec;
930 			restart_block->poll.tv_nsec = end_time.tv_nsec;
931 			restart_block->poll.has_timeout = 1;
932 		} else
933 			restart_block->poll.has_timeout = 0;
934 
935 		ret = -ERESTART_RESTARTBLOCK;
936 	}
937 	return ret;
938 }
939 
SYSCALL_DEFINE5(ppoll,struct pollfd __user *,ufds,unsigned int,nfds,struct timespec __user *,tsp,const sigset_t __user *,sigmask,size_t,sigsetsize)940 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
941 		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
942 		size_t, sigsetsize)
943 {
944 	sigset_t ksigmask, sigsaved;
945 	struct timespec ts, end_time, *to = NULL;
946 	int ret;
947 
948 	if (tsp) {
949 		if (copy_from_user(&ts, tsp, sizeof(ts)))
950 			return -EFAULT;
951 
952 		to = &end_time;
953 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
954 			return -EINVAL;
955 	}
956 
957 	if (sigmask) {
958 		/* XXX: Don't preclude handling different sized sigset_t's.  */
959 		if (sigsetsize != sizeof(sigset_t))
960 			return -EINVAL;
961 		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
962 			return -EFAULT;
963 
964 		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
965 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
966 	}
967 
968 	ret = do_sys_poll(ufds, nfds, to);
969 
970 	/* We can restart this syscall, usually */
971 	if (ret == -EINTR) {
972 		/*
973 		 * Don't restore the signal mask yet. Let do_signal() deliver
974 		 * the signal on the way back to userspace, before the signal
975 		 * mask is restored.
976 		 */
977 		if (sigmask) {
978 			memcpy(&current->saved_sigmask, &sigsaved,
979 					sizeof(sigsaved));
980 			set_restore_sigmask();
981 		}
982 		ret = -ERESTARTNOHAND;
983 	} else if (sigmask)
984 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
985 
986 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
987 
988 	return ret;
989 }
990