• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains the procedures for the handling of select and poll
4  *
5  * Created for Linux based loosely upon Mathius Lattner's minix
6  * patches by Peter MacDonald. Heavily edited by Linus.
7  *
8  *  4 February 1994
9  *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
10  *     flag set in its personality we do *not* modify the given timeout
11  *     parameter to reflect time remaining.
12  *
13  *  24 January 2000
14  *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
15  *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/rt.h>
21 #include <linux/syscalls.h>
22 #include <linux/export.h>
23 #include <linux/slab.h>
24 #include <linux/poll.h>
25 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/fs.h>
29 #include <linux/rcupdate.h>
30 #include <linux/hrtimer.h>
31 #include <linux/freezer.h>
32 #include <net/busy_poll.h>
33 #include <linux/vmalloc.h>
34 
35 #include <linux/uaccess.h>
36 
37 
38 /*
39  * Estimate expected accuracy in ns from a timeval.
40  *
41  * After quite a bit of churning around, we've settled on
42  * a simple thing of taking 0.1% of the timeout as the
43  * slack, with a cap of 100 msec.
44  * "nice" tasks get a 0.5% slack instead.
45  *
46  * Consider this comment an open invitation to come up with even
47  * better solutions..
48  */
49 
50 #define MAX_SLACK	(100 * NSEC_PER_MSEC)
51 
__estimate_accuracy(struct timespec64 * tv)52 static long __estimate_accuracy(struct timespec64 *tv)
53 {
54 	long slack;
55 	int divfactor = 1000;
56 
57 	if (tv->tv_sec < 0)
58 		return 0;
59 
60 	if (task_nice(current) > 0)
61 		divfactor = divfactor / 5;
62 
63 	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
64 		return MAX_SLACK;
65 
66 	slack = tv->tv_nsec / divfactor;
67 	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
68 
69 	if (slack > MAX_SLACK)
70 		return MAX_SLACK;
71 
72 	return slack;
73 }
74 
select_estimate_accuracy(struct timespec64 * tv)75 u64 select_estimate_accuracy(struct timespec64 *tv)
76 {
77 	u64 ret;
78 	struct timespec64 now;
79 
80 	/*
81 	 * Realtime tasks get a slack of 0 for obvious reasons.
82 	 */
83 
84 	if (rt_task(current))
85 		return 0;
86 
87 	ktime_get_ts64(&now);
88 	now = timespec64_sub(*tv, now);
89 	ret = __estimate_accuracy(&now);
90 	if (ret < current->timer_slack_ns)
91 		return current->timer_slack_ns;
92 	return ret;
93 }
94 
95 
96 
97 struct poll_table_page {
98 	struct poll_table_page * next;
99 	struct poll_table_entry * entry;
100 	struct poll_table_entry entries[0];
101 };
102 
103 #define POLL_TABLE_FULL(table) \
104 	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
105 
106 /*
107  * Ok, Peter made a complicated, but straightforward multiple_wait() function.
108  * I have rewritten this, taking some shortcuts: This code may not be easy to
109  * follow, but it should be free of race-conditions, and it's practical. If you
110  * understand what I'm doing here, then you understand how the linux
111  * sleep/wakeup mechanism works.
112  *
113  * Two very simple procedures, poll_wait() and poll_freewait() make all the
114  * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
115  * as all select/poll functions have to call it to add an entry to the
116  * poll table.
117  */
118 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
119 		       poll_table *p);
120 
poll_initwait(struct poll_wqueues * pwq)121 void poll_initwait(struct poll_wqueues *pwq)
122 {
123 	init_poll_funcptr(&pwq->pt, __pollwait);
124 	pwq->polling_task = current;
125 	pwq->triggered = 0;
126 	pwq->error = 0;
127 	pwq->table = NULL;
128 	pwq->inline_index = 0;
129 }
130 EXPORT_SYMBOL(poll_initwait);
131 
free_poll_entry(struct poll_table_entry * entry)132 static void free_poll_entry(struct poll_table_entry *entry)
133 {
134 	remove_wait_queue(entry->wait_address, &entry->wait);
135 	fput(entry->filp);
136 }
137 
poll_freewait(struct poll_wqueues * pwq)138 void poll_freewait(struct poll_wqueues *pwq)
139 {
140 	struct poll_table_page * p = pwq->table;
141 	int i;
142 	for (i = 0; i < pwq->inline_index; i++)
143 		free_poll_entry(pwq->inline_entries + i);
144 	while (p) {
145 		struct poll_table_entry * entry;
146 		struct poll_table_page *old;
147 
148 		entry = p->entry;
149 		do {
150 			entry--;
151 			free_poll_entry(entry);
152 		} while (entry > p->entries);
153 		old = p;
154 		p = p->next;
155 		free_page((unsigned long) old);
156 	}
157 }
158 EXPORT_SYMBOL(poll_freewait);
159 
poll_get_entry(struct poll_wqueues * p)160 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
161 {
162 	struct poll_table_page *table = p->table;
163 
164 	if (p->inline_index < N_INLINE_POLL_ENTRIES)
165 		return p->inline_entries + p->inline_index++;
166 
167 	if (!table || POLL_TABLE_FULL(table)) {
168 		struct poll_table_page *new_table;
169 
170 		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
171 		if (!new_table) {
172 			p->error = -ENOMEM;
173 			return NULL;
174 		}
175 		new_table->entry = new_table->entries;
176 		new_table->next = table;
177 		p->table = new_table;
178 		table = new_table;
179 	}
180 
181 	return table->entry++;
182 }
183 
__pollwake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)184 static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
185 {
186 	struct poll_wqueues *pwq = wait->private;
187 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
188 
189 	/*
190 	 * Although this function is called under waitqueue lock, LOCK
191 	 * doesn't imply write barrier and the users expect write
192 	 * barrier semantics on wakeup functions.  The following
193 	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
194 	 * and is paired with smp_store_mb() in poll_schedule_timeout.
195 	 */
196 	smp_wmb();
197 	pwq->triggered = 1;
198 
199 	/*
200 	 * Perform the default wake up operation using a dummy
201 	 * waitqueue.
202 	 *
203 	 * TODO: This is hacky but there currently is no interface to
204 	 * pass in @sync.  @sync is scheduled to be removed and once
205 	 * that happens, wake_up_process() can be used directly.
206 	 */
207 	return default_wake_function(&dummy_wait, mode, sync, key);
208 }
209 
pollwake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)210 static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
211 {
212 	struct poll_table_entry *entry;
213 
214 	entry = container_of(wait, struct poll_table_entry, wait);
215 	if (key && !(key_to_poll(key) & entry->key))
216 		return 0;
217 	return __pollwake(wait, mode, sync, key);
218 }
219 
220 /* Add a new entry */
__pollwait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)221 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
222 				poll_table *p)
223 {
224 	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
225 	struct poll_table_entry *entry = poll_get_entry(pwq);
226 	if (!entry)
227 		return;
228 	entry->filp = get_file(filp);
229 	entry->wait_address = wait_address;
230 	entry->key = p->_key;
231 	init_waitqueue_func_entry(&entry->wait, pollwake);
232 	entry->wait.private = pwq;
233 	add_wait_queue(wait_address, &entry->wait);
234 }
235 
poll_schedule_timeout(struct poll_wqueues * pwq,int state,ktime_t * expires,unsigned long slack)236 static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
237 			  ktime_t *expires, unsigned long slack)
238 {
239 	int rc = -EINTR;
240 
241 	set_current_state(state);
242 	if (!pwq->triggered)
243 		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
244 	__set_current_state(TASK_RUNNING);
245 
246 	/*
247 	 * Prepare for the next iteration.
248 	 *
249 	 * The following smp_store_mb() serves two purposes.  First, it's
250 	 * the counterpart rmb of the wmb in pollwake() such that data
251 	 * written before wake up is always visible after wake up.
252 	 * Second, the full barrier guarantees that triggered clearing
253 	 * doesn't pass event check of the next iteration.  Note that
254 	 * this problem doesn't exist for the first iteration as
255 	 * add_wait_queue() has full barrier semantics.
256 	 */
257 	smp_store_mb(pwq->triggered, 0);
258 
259 	return rc;
260 }
261 
262 /**
263  * poll_select_set_timeout - helper function to setup the timeout value
264  * @to:		pointer to timespec64 variable for the final timeout
265  * @sec:	seconds (from user space)
266  * @nsec:	nanoseconds (from user space)
267  *
268  * Note, we do not use a timespec for the user space value here, That
269  * way we can use the function for timeval and compat interfaces as well.
270  *
271  * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
272  */
poll_select_set_timeout(struct timespec64 * to,time64_t sec,long nsec)273 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
274 {
275 	struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
276 
277 	if (!timespec64_valid(&ts))
278 		return -EINVAL;
279 
280 	/* Optimize for the zero timeout value here */
281 	if (!sec && !nsec) {
282 		to->tv_sec = to->tv_nsec = 0;
283 	} else {
284 		ktime_get_ts64(to);
285 		*to = timespec64_add_safe(*to, ts);
286 	}
287 	return 0;
288 }
289 
290 enum poll_time_type {
291 	PT_TIMEVAL = 0,
292 	PT_OLD_TIMEVAL = 1,
293 	PT_TIMESPEC = 2,
294 	PT_OLD_TIMESPEC = 3,
295 };
296 
poll_select_finish(struct timespec64 * end_time,void __user * p,enum poll_time_type pt_type,int ret)297 static int poll_select_finish(struct timespec64 *end_time,
298 			      void __user *p,
299 			      enum poll_time_type pt_type, int ret)
300 {
301 	struct timespec64 rts;
302 
303 	restore_saved_sigmask_unless(ret == -ERESTARTNOHAND);
304 
305 	if (!p)
306 		return ret;
307 
308 	if (current->personality & STICKY_TIMEOUTS)
309 		goto sticky;
310 
311 	/* No update for zero timeout */
312 	if (!end_time->tv_sec && !end_time->tv_nsec)
313 		return ret;
314 
315 	ktime_get_ts64(&rts);
316 	rts = timespec64_sub(*end_time, rts);
317 	if (rts.tv_sec < 0)
318 		rts.tv_sec = rts.tv_nsec = 0;
319 
320 
321 	switch (pt_type) {
322 	case PT_TIMEVAL:
323 		{
324 			struct timeval rtv;
325 
326 			if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
327 				memset(&rtv, 0, sizeof(rtv));
328 			rtv.tv_sec = rts.tv_sec;
329 			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
330 			if (!copy_to_user(p, &rtv, sizeof(rtv)))
331 				return ret;
332 		}
333 		break;
334 	case PT_OLD_TIMEVAL:
335 		{
336 			struct old_timeval32 rtv;
337 
338 			rtv.tv_sec = rts.tv_sec;
339 			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
340 			if (!copy_to_user(p, &rtv, sizeof(rtv)))
341 				return ret;
342 		}
343 		break;
344 	case PT_TIMESPEC:
345 		if (!put_timespec64(&rts, p))
346 			return ret;
347 		break;
348 	case PT_OLD_TIMESPEC:
349 		if (!put_old_timespec32(&rts, p))
350 			return ret;
351 		break;
352 	default:
353 		BUG();
354 	}
355 	/*
356 	 * If an application puts its timeval in read-only memory, we
357 	 * don't want the Linux-specific update to the timeval to
358 	 * cause a fault after the select has completed
359 	 * successfully. However, because we're not updating the
360 	 * timeval, we can't restart the system call.
361 	 */
362 
363 sticky:
364 	if (ret == -ERESTARTNOHAND)
365 		ret = -EINTR;
366 	return ret;
367 }
368 
369 /*
370  * Scalable version of the fd_set.
371  */
372 
373 typedef struct {
374 	unsigned long *in, *out, *ex;
375 	unsigned long *res_in, *res_out, *res_ex;
376 } fd_set_bits;
377 
378 /*
379  * How many longwords for "nr" bits?
380  */
381 #define FDS_BITPERLONG	(8*sizeof(long))
382 #define FDS_LONGS(nr)	(((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
383 #define FDS_BYTES(nr)	(FDS_LONGS(nr)*sizeof(long))
384 
385 /*
386  * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
387  */
388 static inline
get_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)389 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
390 {
391 	nr = FDS_BYTES(nr);
392 	if (ufdset)
393 		return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
394 
395 	memset(fdset, 0, nr);
396 	return 0;
397 }
398 
399 static inline unsigned long __must_check
set_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)400 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
401 {
402 	if (ufdset)
403 		return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
404 	return 0;
405 }
406 
407 static inline
zero_fd_set(unsigned long nr,unsigned long * fdset)408 void zero_fd_set(unsigned long nr, unsigned long *fdset)
409 {
410 	memset(fdset, 0, FDS_BYTES(nr));
411 }
412 
413 #define FDS_IN(fds, n)		(fds->in + n)
414 #define FDS_OUT(fds, n)		(fds->out + n)
415 #define FDS_EX(fds, n)		(fds->ex + n)
416 
417 #define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
418 
max_select_fd(unsigned long n,fd_set_bits * fds)419 static int max_select_fd(unsigned long n, fd_set_bits *fds)
420 {
421 	unsigned long *open_fds;
422 	unsigned long set;
423 	int max;
424 	struct fdtable *fdt;
425 
426 	/* handle last in-complete long-word first */
427 	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
428 	n /= BITS_PER_LONG;
429 	fdt = files_fdtable(current->files);
430 	open_fds = fdt->open_fds + n;
431 	max = 0;
432 	if (set) {
433 		set &= BITS(fds, n);
434 		if (set) {
435 			if (!(set & ~*open_fds))
436 				goto get_max;
437 			return -EBADF;
438 		}
439 	}
440 	while (n) {
441 		open_fds--;
442 		n--;
443 		set = BITS(fds, n);
444 		if (!set)
445 			continue;
446 		if (set & ~*open_fds)
447 			return -EBADF;
448 		if (max)
449 			continue;
450 get_max:
451 		do {
452 			max++;
453 			set >>= 1;
454 		} while (set);
455 		max += n * BITS_PER_LONG;
456 	}
457 
458 	return max;
459 }
460 
461 #define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\
462 			EPOLLNVAL)
463 #define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\
464 			 EPOLLNVAL)
465 #define POLLEX_SET (EPOLLPRI | EPOLLNVAL)
466 
wait_key_set(poll_table * wait,unsigned long in,unsigned long out,unsigned long bit,__poll_t ll_flag)467 static inline void wait_key_set(poll_table *wait, unsigned long in,
468 				unsigned long out, unsigned long bit,
469 				__poll_t ll_flag)
470 {
471 	wait->_key = POLLEX_SET | ll_flag;
472 	if (in & bit)
473 		wait->_key |= POLLIN_SET;
474 	if (out & bit)
475 		wait->_key |= POLLOUT_SET;
476 }
477 
do_select(int n,fd_set_bits * fds,struct timespec64 * end_time)478 static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
479 {
480 	ktime_t expire, *to = NULL;
481 	struct poll_wqueues table;
482 	poll_table *wait;
483 	int retval, i, timed_out = 0;
484 	u64 slack = 0;
485 	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
486 	unsigned long busy_start = 0;
487 
488 	rcu_read_lock();
489 	retval = max_select_fd(n, fds);
490 	rcu_read_unlock();
491 
492 	if (retval < 0)
493 		return retval;
494 	n = retval;
495 
496 	poll_initwait(&table);
497 	wait = &table.pt;
498 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
499 		wait->_qproc = NULL;
500 		timed_out = 1;
501 	}
502 
503 	if (end_time && !timed_out)
504 		slack = select_estimate_accuracy(end_time);
505 
506 	retval = 0;
507 	for (;;) {
508 		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
509 		bool can_busy_loop = false;
510 
511 		inp = fds->in; outp = fds->out; exp = fds->ex;
512 		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
513 
514 		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
515 			unsigned long in, out, ex, all_bits, bit = 1, j;
516 			unsigned long res_in = 0, res_out = 0, res_ex = 0;
517 			__poll_t mask;
518 
519 			in = *inp++; out = *outp++; ex = *exp++;
520 			all_bits = in | out | ex;
521 			if (all_bits == 0) {
522 				i += BITS_PER_LONG;
523 				continue;
524 			}
525 
526 			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
527 				struct fd f;
528 				if (i >= n)
529 					break;
530 				if (!(bit & all_bits))
531 					continue;
532 				mask = EPOLLNVAL;
533 				f = fdget(i);
534 				if (f.file) {
535 					wait_key_set(wait, in, out, bit,
536 						     busy_flag);
537 					mask = vfs_poll(f.file, wait);
538 
539 					fdput(f);
540 				}
541 				if ((mask & POLLIN_SET) && (in & bit)) {
542 					res_in |= bit;
543 					retval++;
544 					wait->_qproc = NULL;
545 				}
546 				if ((mask & POLLOUT_SET) && (out & bit)) {
547 					res_out |= bit;
548 					retval++;
549 					wait->_qproc = NULL;
550 				}
551 				if ((mask & POLLEX_SET) && (ex & bit)) {
552 					res_ex |= bit;
553 					retval++;
554 					wait->_qproc = NULL;
555 				}
556 				/* got something, stop busy polling */
557 				if (retval) {
558 					can_busy_loop = false;
559 					busy_flag = 0;
560 
561 				/*
562 				 * only remember a returned
563 				 * POLL_BUSY_LOOP if we asked for it
564 				 */
565 				} else if (busy_flag & mask)
566 					can_busy_loop = true;
567 
568 			}
569 			if (res_in)
570 				*rinp = res_in;
571 			if (res_out)
572 				*routp = res_out;
573 			if (res_ex)
574 				*rexp = res_ex;
575 			cond_resched();
576 		}
577 		wait->_qproc = NULL;
578 		if (retval || timed_out || signal_pending(current))
579 			break;
580 		if (table.error) {
581 			retval = table.error;
582 			break;
583 		}
584 
585 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
586 		if (can_busy_loop && !need_resched()) {
587 			if (!busy_start) {
588 				busy_start = busy_loop_current_time();
589 				continue;
590 			}
591 			if (!busy_loop_timeout(busy_start))
592 				continue;
593 		}
594 		busy_flag = 0;
595 
596 		/*
597 		 * If this is the first loop and we have a timeout
598 		 * given, then we convert to ktime_t and set the to
599 		 * pointer to the expiry value.
600 		 */
601 		if (end_time && !to) {
602 			expire = timespec64_to_ktime(*end_time);
603 			to = &expire;
604 		}
605 
606 		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
607 					   to, slack))
608 			timed_out = 1;
609 	}
610 
611 	poll_freewait(&table);
612 
613 	return retval;
614 }
615 
616 /*
617  * We can actually return ERESTARTSYS instead of EINTR, but I'd
618  * like to be certain this leads to no problems. So I return
619  * EINTR just for safety.
620  *
621  * Update: ERESTARTSYS breaks at least the xview clock binary, so
622  * I'm trying ERESTARTNOHAND which restart only when you want to.
623  */
core_sys_select(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timespec64 * end_time)624 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
625 			   fd_set __user *exp, struct timespec64 *end_time)
626 {
627 	fd_set_bits fds;
628 	void *bits;
629 	int ret, max_fds;
630 	size_t size, alloc_size;
631 	struct fdtable *fdt;
632 	/* Allocate small arguments on the stack to save memory and be faster */
633 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
634 
635 	ret = -EINVAL;
636 	if (n < 0)
637 		goto out_nofds;
638 
639 	/* max_fds can increase, so grab it once to avoid race */
640 	rcu_read_lock();
641 	fdt = files_fdtable(current->files);
642 	max_fds = fdt->max_fds;
643 	rcu_read_unlock();
644 	if (n > max_fds)
645 		n = max_fds;
646 
647 	/*
648 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
649 	 * since we used fdset we need to allocate memory in units of
650 	 * long-words.
651 	 */
652 	size = FDS_BYTES(n);
653 	bits = stack_fds;
654 	if (size > sizeof(stack_fds) / 6) {
655 		/* Not enough space in on-stack array; must use kmalloc */
656 		ret = -ENOMEM;
657 		if (size > (SIZE_MAX / 6))
658 			goto out_nofds;
659 
660 		alloc_size = 6 * size;
661 		bits = kvmalloc(alloc_size, GFP_KERNEL);
662 		if (!bits)
663 			goto out_nofds;
664 	}
665 	fds.in      = bits;
666 	fds.out     = bits +   size;
667 	fds.ex      = bits + 2*size;
668 	fds.res_in  = bits + 3*size;
669 	fds.res_out = bits + 4*size;
670 	fds.res_ex  = bits + 5*size;
671 
672 	if ((ret = get_fd_set(n, inp, fds.in)) ||
673 	    (ret = get_fd_set(n, outp, fds.out)) ||
674 	    (ret = get_fd_set(n, exp, fds.ex)))
675 		goto out;
676 	zero_fd_set(n, fds.res_in);
677 	zero_fd_set(n, fds.res_out);
678 	zero_fd_set(n, fds.res_ex);
679 
680 	ret = do_select(n, &fds, end_time);
681 
682 	if (ret < 0)
683 		goto out;
684 	if (!ret) {
685 		ret = -ERESTARTNOHAND;
686 		if (signal_pending(current))
687 			goto out;
688 		ret = 0;
689 	}
690 
691 	if (set_fd_set(n, inp, fds.res_in) ||
692 	    set_fd_set(n, outp, fds.res_out) ||
693 	    set_fd_set(n, exp, fds.res_ex))
694 		ret = -EFAULT;
695 
696 out:
697 	if (bits != stack_fds)
698 		kvfree(bits);
699 out_nofds:
700 	return ret;
701 }
702 
kern_select(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timeval __user * tvp)703 static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
704 		       fd_set __user *exp, struct timeval __user *tvp)
705 {
706 	struct timespec64 end_time, *to = NULL;
707 	struct timeval tv;
708 	int ret;
709 
710 	if (tvp) {
711 		if (copy_from_user(&tv, tvp, sizeof(tv)))
712 			return -EFAULT;
713 
714 		to = &end_time;
715 		if (poll_select_set_timeout(to,
716 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
717 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
718 			return -EINVAL;
719 	}
720 
721 	ret = core_sys_select(n, inp, outp, exp, to);
722 	return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret);
723 }
724 
SYSCALL_DEFINE5(select,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct timeval __user *,tvp)725 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
726 		fd_set __user *, exp, struct timeval __user *, tvp)
727 {
728 	return kern_select(n, inp, outp, exp, tvp);
729 }
730 
do_pselect(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,void __user * tsp,const sigset_t __user * sigmask,size_t sigsetsize,enum poll_time_type type)731 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
732 		       fd_set __user *exp, void __user *tsp,
733 		       const sigset_t __user *sigmask, size_t sigsetsize,
734 		       enum poll_time_type type)
735 {
736 	struct timespec64 ts, end_time, *to = NULL;
737 	int ret;
738 
739 	if (tsp) {
740 		switch (type) {
741 		case PT_TIMESPEC:
742 			if (get_timespec64(&ts, tsp))
743 				return -EFAULT;
744 			break;
745 		case PT_OLD_TIMESPEC:
746 			if (get_old_timespec32(&ts, tsp))
747 				return -EFAULT;
748 			break;
749 		default:
750 			BUG();
751 		}
752 
753 		to = &end_time;
754 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
755 			return -EINVAL;
756 	}
757 
758 	ret = set_user_sigmask(sigmask, sigsetsize);
759 	if (ret)
760 		return ret;
761 
762 	ret = core_sys_select(n, inp, outp, exp, to);
763 	return poll_select_finish(&end_time, tsp, type, ret);
764 }
765 
766 /*
767  * Most architectures can't handle 7-argument syscalls. So we provide a
768  * 6-argument version where the sixth argument is a pointer to a structure
769  * which has a pointer to the sigset_t itself followed by a size_t containing
770  * the sigset size.
771  */
SYSCALL_DEFINE6(pselect6,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct __kernel_timespec __user *,tsp,void __user *,sig)772 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
773 		fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
774 		void __user *, sig)
775 {
776 	size_t sigsetsize = 0;
777 	sigset_t __user *up = NULL;
778 
779 	if (sig) {
780 		if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
781 		    || __get_user(up, (sigset_t __user * __user *)sig)
782 		    || __get_user(sigsetsize,
783 				(size_t __user *)(sig+sizeof(void *))))
784 			return -EFAULT;
785 	}
786 
787 	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC);
788 }
789 
790 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
791 
SYSCALL_DEFINE6(pselect6_time32,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct old_timespec32 __user *,tsp,void __user *,sig)792 SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp,
793 		fd_set __user *, exp, struct old_timespec32 __user *, tsp,
794 		void __user *, sig)
795 {
796 	size_t sigsetsize = 0;
797 	sigset_t __user *up = NULL;
798 
799 	if (sig) {
800 		if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
801 		    || __get_user(up, (sigset_t __user * __user *)sig)
802 		    || __get_user(sigsetsize,
803 				(size_t __user *)(sig+sizeof(void *))))
804 			return -EFAULT;
805 	}
806 
807 	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC);
808 }
809 
810 #endif
811 
812 #ifdef __ARCH_WANT_SYS_OLD_SELECT
813 struct sel_arg_struct {
814 	unsigned long n;
815 	fd_set __user *inp, *outp, *exp;
816 	struct timeval __user *tvp;
817 };
818 
SYSCALL_DEFINE1(old_select,struct sel_arg_struct __user *,arg)819 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
820 {
821 	struct sel_arg_struct a;
822 
823 	if (copy_from_user(&a, arg, sizeof(a)))
824 		return -EFAULT;
825 	return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
826 }
827 #endif
828 
829 struct poll_list {
830 	struct poll_list *next;
831 	int len;
832 	struct pollfd entries[0];
833 };
834 
835 #define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
836 
837 /*
838  * Fish for pollable events on the pollfd->fd file descriptor. We're only
839  * interested in events matching the pollfd->events mask, and the result
840  * matching that mask is both recorded in pollfd->revents and returned. The
841  * pwait poll_table will be used by the fd-provided poll handler for waiting,
842  * if pwait->_qproc is non-NULL.
843  */
do_pollfd(struct pollfd * pollfd,poll_table * pwait,bool * can_busy_poll,__poll_t busy_flag)844 static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
845 				     bool *can_busy_poll,
846 				     __poll_t busy_flag)
847 {
848 	int fd = pollfd->fd;
849 	__poll_t mask = 0, filter;
850 	struct fd f;
851 
852 	if (fd < 0)
853 		goto out;
854 	mask = EPOLLNVAL;
855 	f = fdget(fd);
856 	if (!f.file)
857 		goto out;
858 
859 	/* userland u16 ->events contains POLL... bitmap */
860 	filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
861 	pwait->_key = filter | busy_flag;
862 	mask = vfs_poll(f.file, pwait);
863 	if (mask & busy_flag)
864 		*can_busy_poll = true;
865 	mask &= filter;		/* Mask out unneeded events. */
866 	fdput(f);
867 
868 out:
869 	/* ... and so does ->revents */
870 	pollfd->revents = mangle_poll(mask);
871 	return mask;
872 }
873 
do_poll(struct poll_list * list,struct poll_wqueues * wait,struct timespec64 * end_time)874 static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
875 		   struct timespec64 *end_time)
876 {
877 	poll_table* pt = &wait->pt;
878 	ktime_t expire, *to = NULL;
879 	int timed_out = 0, count = 0;
880 	u64 slack = 0;
881 	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
882 	unsigned long busy_start = 0;
883 
884 	/* Optimise the no-wait case */
885 	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
886 		pt->_qproc = NULL;
887 		timed_out = 1;
888 	}
889 
890 	if (end_time && !timed_out)
891 		slack = select_estimate_accuracy(end_time);
892 
893 	for (;;) {
894 		struct poll_list *walk;
895 		bool can_busy_loop = false;
896 
897 		for (walk = list; walk != NULL; walk = walk->next) {
898 			struct pollfd * pfd, * pfd_end;
899 
900 			pfd = walk->entries;
901 			pfd_end = pfd + walk->len;
902 			for (; pfd != pfd_end; pfd++) {
903 				/*
904 				 * Fish for events. If we found one, record it
905 				 * and kill poll_table->_qproc, so we don't
906 				 * needlessly register any other waiters after
907 				 * this. They'll get immediately deregistered
908 				 * when we break out and return.
909 				 */
910 				if (do_pollfd(pfd, pt, &can_busy_loop,
911 					      busy_flag)) {
912 					count++;
913 					pt->_qproc = NULL;
914 					/* found something, stop busy polling */
915 					busy_flag = 0;
916 					can_busy_loop = false;
917 				}
918 			}
919 		}
920 		/*
921 		 * All waiters have already been registered, so don't provide
922 		 * a poll_table->_qproc to them on the next loop iteration.
923 		 */
924 		pt->_qproc = NULL;
925 		if (!count) {
926 			count = wait->error;
927 			if (signal_pending(current))
928 				count = -ERESTARTNOHAND;
929 		}
930 		if (count || timed_out)
931 			break;
932 
933 		/* only if found POLL_BUSY_LOOP sockets && not out of time */
934 		if (can_busy_loop && !need_resched()) {
935 			if (!busy_start) {
936 				busy_start = busy_loop_current_time();
937 				continue;
938 			}
939 			if (!busy_loop_timeout(busy_start))
940 				continue;
941 		}
942 		busy_flag = 0;
943 
944 		/*
945 		 * If this is the first loop and we have a timeout
946 		 * given, then we convert to ktime_t and set the to
947 		 * pointer to the expiry value.
948 		 */
949 		if (end_time && !to) {
950 			expire = timespec64_to_ktime(*end_time);
951 			to = &expire;
952 		}
953 
954 		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
955 			timed_out = 1;
956 	}
957 	return count;
958 }
959 
960 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
961 			sizeof(struct pollfd))
962 
do_sys_poll(struct pollfd __user * ufds,unsigned int nfds,struct timespec64 * end_time)963 static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
964 		struct timespec64 *end_time)
965 {
966 	struct poll_wqueues table;
967 	int err = -EFAULT, fdcount, len;
968 	/* Allocate small arguments on the stack to save memory and be
969 	   faster - use long to make sure the buffer is aligned properly
970 	   on 64 bit archs to avoid unaligned access */
971 	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
972 	struct poll_list *const head = (struct poll_list *)stack_pps;
973  	struct poll_list *walk = head;
974  	unsigned long todo = nfds;
975 
976 	if (nfds > rlimit(RLIMIT_NOFILE))
977 		return -EINVAL;
978 
979 	len = min_t(unsigned int, nfds, N_STACK_PPS);
980 	for (;;) {
981 		walk->next = NULL;
982 		walk->len = len;
983 		if (!len)
984 			break;
985 
986 		if (copy_from_user(walk->entries, ufds + nfds-todo,
987 					sizeof(struct pollfd) * walk->len))
988 			goto out_fds;
989 
990 		todo -= walk->len;
991 		if (!todo)
992 			break;
993 
994 		len = min(todo, POLLFD_PER_PAGE);
995 		walk = walk->next = kmalloc(struct_size(walk, entries, len),
996 					    GFP_KERNEL);
997 		if (!walk) {
998 			err = -ENOMEM;
999 			goto out_fds;
1000 		}
1001 	}
1002 
1003 	poll_initwait(&table);
1004 	fdcount = do_poll(head, &table, end_time);
1005 	poll_freewait(&table);
1006 
1007 	for (walk = head; walk; walk = walk->next) {
1008 		struct pollfd *fds = walk->entries;
1009 		int j;
1010 
1011 		for (j = 0; j < walk->len; j++, ufds++)
1012 			if (__put_user(fds[j].revents, &ufds->revents))
1013 				goto out_fds;
1014   	}
1015 
1016 	err = fdcount;
1017 out_fds:
1018 	walk = head->next;
1019 	while (walk) {
1020 		struct poll_list *pos = walk;
1021 		walk = walk->next;
1022 		kfree(pos);
1023 	}
1024 
1025 	return err;
1026 }
1027 
do_restart_poll(struct restart_block * restart_block)1028 static long do_restart_poll(struct restart_block *restart_block)
1029 {
1030 	struct pollfd __user *ufds = restart_block->poll.ufds;
1031 	int nfds = restart_block->poll.nfds;
1032 	struct timespec64 *to = NULL, end_time;
1033 	int ret;
1034 
1035 	if (restart_block->poll.has_timeout) {
1036 		end_time.tv_sec = restart_block->poll.tv_sec;
1037 		end_time.tv_nsec = restart_block->poll.tv_nsec;
1038 		to = &end_time;
1039 	}
1040 
1041 	ret = do_sys_poll(ufds, nfds, to);
1042 
1043 	if (ret == -ERESTARTNOHAND)
1044 		ret = set_restart_fn(restart_block, do_restart_poll);
1045 
1046 	return ret;
1047 }
1048 
SYSCALL_DEFINE3(poll,struct pollfd __user *,ufds,unsigned int,nfds,int,timeout_msecs)1049 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
1050 		int, timeout_msecs)
1051 {
1052 	struct timespec64 end_time, *to = NULL;
1053 	int ret;
1054 
1055 	if (timeout_msecs >= 0) {
1056 		to = &end_time;
1057 		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1058 			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
1059 	}
1060 
1061 	ret = do_sys_poll(ufds, nfds, to);
1062 
1063 	if (ret == -ERESTARTNOHAND) {
1064 		struct restart_block *restart_block;
1065 
1066 		restart_block = &current->restart_block;
1067 		restart_block->poll.ufds = ufds;
1068 		restart_block->poll.nfds = nfds;
1069 
1070 		if (timeout_msecs >= 0) {
1071 			restart_block->poll.tv_sec = end_time.tv_sec;
1072 			restart_block->poll.tv_nsec = end_time.tv_nsec;
1073 			restart_block->poll.has_timeout = 1;
1074 		} else
1075 			restart_block->poll.has_timeout = 0;
1076 
1077 		ret = set_restart_fn(restart_block, do_restart_poll);
1078 	}
1079 	return ret;
1080 }
1081 
SYSCALL_DEFINE5(ppoll,struct pollfd __user *,ufds,unsigned int,nfds,struct __kernel_timespec __user *,tsp,const sigset_t __user *,sigmask,size_t,sigsetsize)1082 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1083 		struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
1084 		size_t, sigsetsize)
1085 {
1086 	struct timespec64 ts, end_time, *to = NULL;
1087 	int ret;
1088 
1089 	if (tsp) {
1090 		if (get_timespec64(&ts, tsp))
1091 			return -EFAULT;
1092 
1093 		to = &end_time;
1094 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1095 			return -EINVAL;
1096 	}
1097 
1098 	ret = set_user_sigmask(sigmask, sigsetsize);
1099 	if (ret)
1100 		return ret;
1101 
1102 	ret = do_sys_poll(ufds, nfds, to);
1103 	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1104 }
1105 
1106 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
1107 
SYSCALL_DEFINE5(ppoll_time32,struct pollfd __user *,ufds,unsigned int,nfds,struct old_timespec32 __user *,tsp,const sigset_t __user *,sigmask,size_t,sigsetsize)1108 SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
1109 		struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
1110 		size_t, sigsetsize)
1111 {
1112 	struct timespec64 ts, end_time, *to = NULL;
1113 	int ret;
1114 
1115 	if (tsp) {
1116 		if (get_old_timespec32(&ts, tsp))
1117 			return -EFAULT;
1118 
1119 		to = &end_time;
1120 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1121 			return -EINVAL;
1122 	}
1123 
1124 	ret = set_user_sigmask(sigmask, sigsetsize);
1125 	if (ret)
1126 		return ret;
1127 
1128 	ret = do_sys_poll(ufds, nfds, to);
1129 	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1130 }
1131 #endif
1132 
1133 #ifdef CONFIG_COMPAT
1134 #define __COMPAT_NFDBITS       (8 * sizeof(compat_ulong_t))
1135 
1136 /*
1137  * Ooo, nasty.  We need here to frob 32-bit unsigned longs to
1138  * 64-bit unsigned longs.
1139  */
1140 static
compat_get_fd_set(unsigned long nr,compat_ulong_t __user * ufdset,unsigned long * fdset)1141 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1142 			unsigned long *fdset)
1143 {
1144 	if (ufdset) {
1145 		return compat_get_bitmap(fdset, ufdset, nr);
1146 	} else {
1147 		zero_fd_set(nr, fdset);
1148 		return 0;
1149 	}
1150 }
1151 
1152 static
compat_set_fd_set(unsigned long nr,compat_ulong_t __user * ufdset,unsigned long * fdset)1153 int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1154 		      unsigned long *fdset)
1155 {
1156 	if (!ufdset)
1157 		return 0;
1158 	return compat_put_bitmap(ufdset, fdset, nr);
1159 }
1160 
1161 
1162 /*
1163  * This is a virtual copy of sys_select from fs/select.c and probably
1164  * should be compared to it from time to time
1165  */
1166 
1167 /*
1168  * We can actually return ERESTARTSYS instead of EINTR, but I'd
1169  * like to be certain this leads to no problems. So I return
1170  * EINTR just for safety.
1171  *
1172  * Update: ERESTARTSYS breaks at least the xview clock binary, so
1173  * I'm trying ERESTARTNOHAND which restart only when you want to.
1174  */
compat_core_sys_select(int n,compat_ulong_t __user * inp,compat_ulong_t __user * outp,compat_ulong_t __user * exp,struct timespec64 * end_time)1175 static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1176 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1177 	struct timespec64 *end_time)
1178 {
1179 	fd_set_bits fds;
1180 	void *bits;
1181 	int size, max_fds, ret = -EINVAL;
1182 	struct fdtable *fdt;
1183 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1184 
1185 	if (n < 0)
1186 		goto out_nofds;
1187 
1188 	/* max_fds can increase, so grab it once to avoid race */
1189 	rcu_read_lock();
1190 	fdt = files_fdtable(current->files);
1191 	max_fds = fdt->max_fds;
1192 	rcu_read_unlock();
1193 	if (n > max_fds)
1194 		n = max_fds;
1195 
1196 	/*
1197 	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1198 	 * since we used fdset we need to allocate memory in units of
1199 	 * long-words.
1200 	 */
1201 	size = FDS_BYTES(n);
1202 	bits = stack_fds;
1203 	if (size > sizeof(stack_fds) / 6) {
1204 		bits = kmalloc_array(6, size, GFP_KERNEL);
1205 		ret = -ENOMEM;
1206 		if (!bits)
1207 			goto out_nofds;
1208 	}
1209 	fds.in      = (unsigned long *)  bits;
1210 	fds.out     = (unsigned long *) (bits +   size);
1211 	fds.ex      = (unsigned long *) (bits + 2*size);
1212 	fds.res_in  = (unsigned long *) (bits + 3*size);
1213 	fds.res_out = (unsigned long *) (bits + 4*size);
1214 	fds.res_ex  = (unsigned long *) (bits + 5*size);
1215 
1216 	if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1217 	    (ret = compat_get_fd_set(n, outp, fds.out)) ||
1218 	    (ret = compat_get_fd_set(n, exp, fds.ex)))
1219 		goto out;
1220 	zero_fd_set(n, fds.res_in);
1221 	zero_fd_set(n, fds.res_out);
1222 	zero_fd_set(n, fds.res_ex);
1223 
1224 	ret = do_select(n, &fds, end_time);
1225 
1226 	if (ret < 0)
1227 		goto out;
1228 	if (!ret) {
1229 		ret = -ERESTARTNOHAND;
1230 		if (signal_pending(current))
1231 			goto out;
1232 		ret = 0;
1233 	}
1234 
1235 	if (compat_set_fd_set(n, inp, fds.res_in) ||
1236 	    compat_set_fd_set(n, outp, fds.res_out) ||
1237 	    compat_set_fd_set(n, exp, fds.res_ex))
1238 		ret = -EFAULT;
1239 out:
1240 	if (bits != stack_fds)
1241 		kfree(bits);
1242 out_nofds:
1243 	return ret;
1244 }
1245 
do_compat_select(int n,compat_ulong_t __user * inp,compat_ulong_t __user * outp,compat_ulong_t __user * exp,struct old_timeval32 __user * tvp)1246 static int do_compat_select(int n, compat_ulong_t __user *inp,
1247 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1248 	struct old_timeval32 __user *tvp)
1249 {
1250 	struct timespec64 end_time, *to = NULL;
1251 	struct old_timeval32 tv;
1252 	int ret;
1253 
1254 	if (tvp) {
1255 		if (copy_from_user(&tv, tvp, sizeof(tv)))
1256 			return -EFAULT;
1257 
1258 		to = &end_time;
1259 		if (poll_select_set_timeout(to,
1260 				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1261 				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1262 			return -EINVAL;
1263 	}
1264 
1265 	ret = compat_core_sys_select(n, inp, outp, exp, to);
1266 	return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret);
1267 }
1268 
COMPAT_SYSCALL_DEFINE5(select,int,n,compat_ulong_t __user *,inp,compat_ulong_t __user *,outp,compat_ulong_t __user *,exp,struct old_timeval32 __user *,tvp)1269 COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1270 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1271 	struct old_timeval32 __user *, tvp)
1272 {
1273 	return do_compat_select(n, inp, outp, exp, tvp);
1274 }
1275 
1276 struct compat_sel_arg_struct {
1277 	compat_ulong_t n;
1278 	compat_uptr_t inp;
1279 	compat_uptr_t outp;
1280 	compat_uptr_t exp;
1281 	compat_uptr_t tvp;
1282 };
1283 
COMPAT_SYSCALL_DEFINE1(old_select,struct compat_sel_arg_struct __user *,arg)1284 COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1285 {
1286 	struct compat_sel_arg_struct a;
1287 
1288 	if (copy_from_user(&a, arg, sizeof(a)))
1289 		return -EFAULT;
1290 	return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1291 				compat_ptr(a.exp), compat_ptr(a.tvp));
1292 }
1293 
do_compat_pselect(int n,compat_ulong_t __user * inp,compat_ulong_t __user * outp,compat_ulong_t __user * exp,void __user * tsp,compat_sigset_t __user * sigmask,compat_size_t sigsetsize,enum poll_time_type type)1294 static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1295 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1296 	void __user *tsp, compat_sigset_t __user *sigmask,
1297 	compat_size_t sigsetsize, enum poll_time_type type)
1298 {
1299 	struct timespec64 ts, end_time, *to = NULL;
1300 	int ret;
1301 
1302 	if (tsp) {
1303 		switch (type) {
1304 		case PT_OLD_TIMESPEC:
1305 			if (get_old_timespec32(&ts, tsp))
1306 				return -EFAULT;
1307 			break;
1308 		case PT_TIMESPEC:
1309 			if (get_timespec64(&ts, tsp))
1310 				return -EFAULT;
1311 			break;
1312 		default:
1313 			BUG();
1314 		}
1315 
1316 		to = &end_time;
1317 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1318 			return -EINVAL;
1319 	}
1320 
1321 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1322 	if (ret)
1323 		return ret;
1324 
1325 	ret = compat_core_sys_select(n, inp, outp, exp, to);
1326 	return poll_select_finish(&end_time, tsp, type, ret);
1327 }
1328 
COMPAT_SYSCALL_DEFINE6(pselect6_time64,int,n,compat_ulong_t __user *,inp,compat_ulong_t __user *,outp,compat_ulong_t __user *,exp,struct __kernel_timespec __user *,tsp,void __user *,sig)1329 COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
1330 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1331 	struct __kernel_timespec __user *, tsp, void __user *, sig)
1332 {
1333 	compat_size_t sigsetsize = 0;
1334 	compat_uptr_t up = 0;
1335 
1336 	if (sig) {
1337 		if (!access_ok(sig,
1338 				sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
1339 				__get_user(up, (compat_uptr_t __user *)sig) ||
1340 				__get_user(sigsetsize,
1341 				(compat_size_t __user *)(sig+sizeof(up))))
1342 			return -EFAULT;
1343 	}
1344 
1345 	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
1346 				 sigsetsize, PT_TIMESPEC);
1347 }
1348 
1349 #if defined(CONFIG_COMPAT_32BIT_TIME)
1350 
COMPAT_SYSCALL_DEFINE6(pselect6_time32,int,n,compat_ulong_t __user *,inp,compat_ulong_t __user *,outp,compat_ulong_t __user *,exp,struct old_timespec32 __user *,tsp,void __user *,sig)1351 COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
1352 	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1353 	struct old_timespec32 __user *, tsp, void __user *, sig)
1354 {
1355 	compat_size_t sigsetsize = 0;
1356 	compat_uptr_t up = 0;
1357 
1358 	if (sig) {
1359 		if (!access_ok(sig,
1360 				sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
1361 		    	__get_user(up, (compat_uptr_t __user *)sig) ||
1362 		    	__get_user(sigsetsize,
1363 				(compat_size_t __user *)(sig+sizeof(up))))
1364 			return -EFAULT;
1365 	}
1366 
1367 	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
1368 				 sigsetsize, PT_OLD_TIMESPEC);
1369 }
1370 
1371 #endif
1372 
1373 #if defined(CONFIG_COMPAT_32BIT_TIME)
COMPAT_SYSCALL_DEFINE5(ppoll_time32,struct pollfd __user *,ufds,unsigned int,nfds,struct old_timespec32 __user *,tsp,const compat_sigset_t __user *,sigmask,compat_size_t,sigsetsize)1374 COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
1375 	unsigned int,  nfds, struct old_timespec32 __user *, tsp,
1376 	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1377 {
1378 	struct timespec64 ts, end_time, *to = NULL;
1379 	int ret;
1380 
1381 	if (tsp) {
1382 		if (get_old_timespec32(&ts, tsp))
1383 			return -EFAULT;
1384 
1385 		to = &end_time;
1386 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1387 			return -EINVAL;
1388 	}
1389 
1390 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1391 	if (ret)
1392 		return ret;
1393 
1394 	ret = do_sys_poll(ufds, nfds, to);
1395 	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1396 }
1397 #endif
1398 
1399 /* New compat syscall for 64 bit time_t*/
COMPAT_SYSCALL_DEFINE5(ppoll_time64,struct pollfd __user *,ufds,unsigned int,nfds,struct __kernel_timespec __user *,tsp,const compat_sigset_t __user *,sigmask,compat_size_t,sigsetsize)1400 COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
1401 	unsigned int,  nfds, struct __kernel_timespec __user *, tsp,
1402 	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1403 {
1404 	struct timespec64 ts, end_time, *to = NULL;
1405 	int ret;
1406 
1407 	if (tsp) {
1408 		if (get_timespec64(&ts, tsp))
1409 			return -EFAULT;
1410 
1411 		to = &end_time;
1412 		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1413 			return -EINVAL;
1414 	}
1415 
1416 	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1417 	if (ret)
1418 		return ret;
1419 
1420 	ret = do_sys_poll(ufds, nfds, to);
1421 	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1422 }
1423 
1424 #endif
1425