1 /*
2 * This file contains the procedures for the handling of select and poll
3 *
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
6 *
7 * 4 February 1994
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
11 *
12 * 24 January 2000
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
26 #include <linux/fs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/hrtimer.h>
29 #include <linux/freezer.h>
30
31 #include <asm/uaccess.h>
32
33
34 /*
35 * Estimate expected accuracy in ns from a timeval.
36 *
37 * After quite a bit of churning around, we've settled on
38 * a simple thing of taking 0.1% of the timeout as the
39 * slack, with a cap of 100 msec.
40 * "nice" tasks get a 0.5% slack instead.
41 *
42 * Consider this comment an open invitation to come up with even
43 * better solutions..
44 */
45
46 #define MAX_SLACK (100 * NSEC_PER_MSEC)
47
__estimate_accuracy(struct timespec * tv)48 static long __estimate_accuracy(struct timespec *tv)
49 {
50 long slack;
51 int divfactor = 1000;
52
53 if (tv->tv_sec < 0)
54 return 0;
55
56 if (task_nice(current) > 0)
57 divfactor = divfactor / 5;
58
59 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
60 return MAX_SLACK;
61
62 slack = tv->tv_nsec / divfactor;
63 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
64
65 if (slack > MAX_SLACK)
66 return MAX_SLACK;
67
68 return slack;
69 }
70
select_estimate_accuracy(struct timespec * tv)71 long select_estimate_accuracy(struct timespec *tv)
72 {
73 unsigned long ret;
74 struct timespec now;
75
76 /*
77 * Realtime tasks get a slack of 0 for obvious reasons.
78 */
79
80 if (rt_task(current))
81 return 0;
82
83 ktime_get_ts(&now);
84 now = timespec_sub(*tv, now);
85 ret = __estimate_accuracy(&now);
86 if (ret < current->timer_slack_ns)
87 return current->timer_slack_ns;
88 return ret;
89 }
90
91
92
93 struct poll_table_page {
94 struct poll_table_page * next;
95 struct poll_table_entry * entry;
96 struct poll_table_entry entries[0];
97 };
98
99 #define POLL_TABLE_FULL(table) \
100 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
101
102 /*
103 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
104 * I have rewritten this, taking some shortcuts: This code may not be easy to
105 * follow, but it should be free of race-conditions, and it's practical. If you
106 * understand what I'm doing here, then you understand how the linux
107 * sleep/wakeup mechanism works.
108 *
109 * Two very simple procedures, poll_wait() and poll_freewait() make all the
110 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
111 * as all select/poll functions have to call it to add an entry to the
112 * poll table.
113 */
114 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
115 poll_table *p);
116
poll_initwait(struct poll_wqueues * pwq)117 void poll_initwait(struct poll_wqueues *pwq)
118 {
119 init_poll_funcptr(&pwq->pt, __pollwait);
120 pwq->polling_task = current;
121 pwq->triggered = 0;
122 pwq->error = 0;
123 pwq->table = NULL;
124 pwq->inline_index = 0;
125 }
126 EXPORT_SYMBOL(poll_initwait);
127
free_poll_entry(struct poll_table_entry * entry)128 static void free_poll_entry(struct poll_table_entry *entry)
129 {
130 remove_wait_queue(entry->wait_address, &entry->wait);
131 fput(entry->filp);
132 }
133
poll_freewait(struct poll_wqueues * pwq)134 void poll_freewait(struct poll_wqueues *pwq)
135 {
136 struct poll_table_page * p = pwq->table;
137 int i;
138 for (i = 0; i < pwq->inline_index; i++)
139 free_poll_entry(pwq->inline_entries + i);
140 while (p) {
141 struct poll_table_entry * entry;
142 struct poll_table_page *old;
143
144 entry = p->entry;
145 do {
146 entry--;
147 free_poll_entry(entry);
148 } while (entry > p->entries);
149 old = p;
150 p = p->next;
151 free_page((unsigned long) old);
152 }
153 }
154 EXPORT_SYMBOL(poll_freewait);
155
poll_get_entry(struct poll_wqueues * p)156 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
157 {
158 struct poll_table_page *table = p->table;
159
160 if (p->inline_index < N_INLINE_POLL_ENTRIES)
161 return p->inline_entries + p->inline_index++;
162
163 if (!table || POLL_TABLE_FULL(table)) {
164 struct poll_table_page *new_table;
165
166 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
167 if (!new_table) {
168 p->error = -ENOMEM;
169 return NULL;
170 }
171 new_table->entry = new_table->entries;
172 new_table->next = table;
173 p->table = new_table;
174 table = new_table;
175 }
176
177 return table->entry++;
178 }
179
__pollwake(wait_queue_t * wait,unsigned mode,int sync,void * key)180 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
181 {
182 struct poll_wqueues *pwq = wait->private;
183 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
184
185 /*
186 * Although this function is called under waitqueue lock, LOCK
187 * doesn't imply write barrier and the users expect write
188 * barrier semantics on wakeup functions. The following
189 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
190 * and is paired with set_mb() in poll_schedule_timeout.
191 */
192 smp_wmb();
193 pwq->triggered = 1;
194
195 /*
196 * Perform the default wake up operation using a dummy
197 * waitqueue.
198 *
199 * TODO: This is hacky but there currently is no interface to
200 * pass in @sync. @sync is scheduled to be removed and once
201 * that happens, wake_up_process() can be used directly.
202 */
203 return default_wake_function(&dummy_wait, mode, sync, key);
204 }
205
pollwake(wait_queue_t * wait,unsigned mode,int sync,void * key)206 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
207 {
208 struct poll_table_entry *entry;
209
210 entry = container_of(wait, struct poll_table_entry, wait);
211 if (key && !((unsigned long)key & entry->key))
212 return 0;
213 return __pollwake(wait, mode, sync, key);
214 }
215
216 /* Add a new entry */
__pollwait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)217 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
218 poll_table *p)
219 {
220 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
221 struct poll_table_entry *entry = poll_get_entry(pwq);
222 if (!entry)
223 return;
224 get_file(filp);
225 entry->filp = filp;
226 entry->wait_address = wait_address;
227 entry->key = p->_key;
228 init_waitqueue_func_entry(&entry->wait, pollwake);
229 entry->wait.private = pwq;
230 add_wait_queue(wait_address, &entry->wait);
231 }
232
poll_schedule_timeout(struct poll_wqueues * pwq,int state,ktime_t * expires,unsigned long slack)233 int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
234 ktime_t *expires, unsigned long slack)
235 {
236 int rc = -EINTR;
237
238 set_current_state(state);
239 if (!pwq->triggered)
240 rc = freezable_schedule_hrtimeout_range(expires, slack,
241 HRTIMER_MODE_ABS);
242 __set_current_state(TASK_RUNNING);
243
244 /*
245 * Prepare for the next iteration.
246 *
247 * The following set_mb() serves two purposes. First, it's
248 * the counterpart rmb of the wmb in pollwake() such that data
249 * written before wake up is always visible after wake up.
250 * Second, the full barrier guarantees that triggered clearing
251 * doesn't pass event check of the next iteration. Note that
252 * this problem doesn't exist for the first iteration as
253 * add_wait_queue() has full barrier semantics.
254 */
255 set_mb(pwq->triggered, 0);
256
257 return rc;
258 }
259 EXPORT_SYMBOL(poll_schedule_timeout);
260
261 /**
262 * poll_select_set_timeout - helper function to setup the timeout value
263 * @to: pointer to timespec variable for the final timeout
264 * @sec: seconds (from user space)
265 * @nsec: nanoseconds (from user space)
266 *
267 * Note, we do not use a timespec for the user space value here, That
268 * way we can use the function for timeval and compat interfaces as well.
269 *
270 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
271 */
poll_select_set_timeout(struct timespec * to,long sec,long nsec)272 int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
273 {
274 struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
275
276 if (!timespec_valid(&ts))
277 return -EINVAL;
278
279 /* Optimize for the zero timeout value here */
280 if (!sec && !nsec) {
281 to->tv_sec = to->tv_nsec = 0;
282 } else {
283 ktime_get_ts(to);
284 *to = timespec_add_safe(*to, ts);
285 }
286 return 0;
287 }
288
poll_select_copy_remaining(struct timespec * end_time,void __user * p,int timeval,int ret)289 static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
290 int timeval, int ret)
291 {
292 struct timespec rts;
293 struct timeval rtv;
294
295 if (!p)
296 return ret;
297
298 if (current->personality & STICKY_TIMEOUTS)
299 goto sticky;
300
301 /* No update for zero timeout */
302 if (!end_time->tv_sec && !end_time->tv_nsec)
303 return ret;
304
305 ktime_get_ts(&rts);
306 rts = timespec_sub(*end_time, rts);
307 if (rts.tv_sec < 0)
308 rts.tv_sec = rts.tv_nsec = 0;
309
310 if (timeval) {
311 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
312 memset(&rtv, 0, sizeof(rtv));
313 rtv.tv_sec = rts.tv_sec;
314 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
315
316 if (!copy_to_user(p, &rtv, sizeof(rtv)))
317 return ret;
318
319 } else if (!copy_to_user(p, &rts, sizeof(rts)))
320 return ret;
321
322 /*
323 * If an application puts its timeval in read-only memory, we
324 * don't want the Linux-specific update to the timeval to
325 * cause a fault after the select has completed
326 * successfully. However, because we're not updating the
327 * timeval, we can't restart the system call.
328 */
329
330 sticky:
331 if (ret == -ERESTARTNOHAND)
332 ret = -EINTR;
333 return ret;
334 }
335
336 #define FDS_IN(fds, n) (fds->in + n)
337 #define FDS_OUT(fds, n) (fds->out + n)
338 #define FDS_EX(fds, n) (fds->ex + n)
339
340 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
341
max_select_fd(unsigned long n,fd_set_bits * fds)342 static int max_select_fd(unsigned long n, fd_set_bits *fds)
343 {
344 unsigned long *open_fds;
345 unsigned long set;
346 int max;
347 struct fdtable *fdt;
348
349 /* handle last in-complete long-word first */
350 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
351 n /= BITS_PER_LONG;
352 fdt = files_fdtable(current->files);
353 open_fds = fdt->open_fds + n;
354 max = 0;
355 if (set) {
356 set &= BITS(fds, n);
357 if (set) {
358 if (!(set & ~*open_fds))
359 goto get_max;
360 return -EBADF;
361 }
362 }
363 while (n) {
364 open_fds--;
365 n--;
366 set = BITS(fds, n);
367 if (!set)
368 continue;
369 if (set & ~*open_fds)
370 return -EBADF;
371 if (max)
372 continue;
373 get_max:
374 do {
375 max++;
376 set >>= 1;
377 } while (set);
378 max += n * BITS_PER_LONG;
379 }
380
381 return max;
382 }
383
384 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
385 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
386 #define POLLEX_SET (POLLPRI)
387
wait_key_set(poll_table * wait,unsigned long in,unsigned long out,unsigned long bit)388 static inline void wait_key_set(poll_table *wait, unsigned long in,
389 unsigned long out, unsigned long bit)
390 {
391 wait->_key = POLLEX_SET;
392 if (in & bit)
393 wait->_key |= POLLIN_SET;
394 if (out & bit)
395 wait->_key |= POLLOUT_SET;
396 }
397
do_select(int n,fd_set_bits * fds,struct timespec * end_time)398 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
399 {
400 ktime_t expire, *to = NULL;
401 struct poll_wqueues table;
402 poll_table *wait;
403 int retval, i, timed_out = 0;
404 unsigned long slack = 0;
405
406 rcu_read_lock();
407 retval = max_select_fd(n, fds);
408 rcu_read_unlock();
409
410 if (retval < 0)
411 return retval;
412 n = retval;
413
414 poll_initwait(&table);
415 wait = &table.pt;
416 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
417 wait->_qproc = NULL;
418 timed_out = 1;
419 }
420
421 if (end_time && !timed_out)
422 slack = select_estimate_accuracy(end_time);
423
424 retval = 0;
425 for (;;) {
426 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
427
428 inp = fds->in; outp = fds->out; exp = fds->ex;
429 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
430
431 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
432 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
433 unsigned long res_in = 0, res_out = 0, res_ex = 0;
434 const struct file_operations *f_op = NULL;
435 struct file *file = NULL;
436
437 in = *inp++; out = *outp++; ex = *exp++;
438 all_bits = in | out | ex;
439 if (all_bits == 0) {
440 i += BITS_PER_LONG;
441 continue;
442 }
443
444 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
445 int fput_needed;
446 if (i >= n)
447 break;
448 if (!(bit & all_bits))
449 continue;
450 file = fget_light(i, &fput_needed);
451 if (file) {
452 f_op = file->f_op;
453 mask = DEFAULT_POLLMASK;
454 if (f_op && f_op->poll) {
455 wait_key_set(wait, in, out, bit);
456 mask = (*f_op->poll)(file, wait);
457 }
458 fput_light(file, fput_needed);
459 if ((mask & POLLIN_SET) && (in & bit)) {
460 res_in |= bit;
461 retval++;
462 wait->_qproc = NULL;
463 }
464 if ((mask & POLLOUT_SET) && (out & bit)) {
465 res_out |= bit;
466 retval++;
467 wait->_qproc = NULL;
468 }
469 if ((mask & POLLEX_SET) && (ex & bit)) {
470 res_ex |= bit;
471 retval++;
472 wait->_qproc = NULL;
473 }
474 }
475 }
476 if (res_in)
477 *rinp = res_in;
478 if (res_out)
479 *routp = res_out;
480 if (res_ex)
481 *rexp = res_ex;
482 cond_resched();
483 }
484 wait->_qproc = NULL;
485 if (retval || timed_out || signal_pending(current))
486 break;
487 if (table.error) {
488 retval = table.error;
489 break;
490 }
491
492 /*
493 * If this is the first loop and we have a timeout
494 * given, then we convert to ktime_t and set the to
495 * pointer to the expiry value.
496 */
497 if (end_time && !to) {
498 expire = timespec_to_ktime(*end_time);
499 to = &expire;
500 }
501
502 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
503 to, slack))
504 timed_out = 1;
505 }
506
507 poll_freewait(&table);
508
509 return retval;
510 }
511
512 /*
513 * We can actually return ERESTARTSYS instead of EINTR, but I'd
514 * like to be certain this leads to no problems. So I return
515 * EINTR just for safety.
516 *
517 * Update: ERESTARTSYS breaks at least the xview clock binary, so
518 * I'm trying ERESTARTNOHAND which restart only when you want to.
519 */
core_sys_select(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timespec * end_time)520 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
521 fd_set __user *exp, struct timespec *end_time)
522 {
523 fd_set_bits fds;
524 void *bits;
525 int ret, max_fds;
526 unsigned int size;
527 struct fdtable *fdt;
528 /* Allocate small arguments on the stack to save memory and be faster */
529 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
530
531 ret = -EINVAL;
532 if (n < 0)
533 goto out_nofds;
534
535 /* max_fds can increase, so grab it once to avoid race */
536 rcu_read_lock();
537 fdt = files_fdtable(current->files);
538 max_fds = fdt->max_fds;
539 rcu_read_unlock();
540 if (n > max_fds)
541 n = max_fds;
542
543 /*
544 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
545 * since we used fdset we need to allocate memory in units of
546 * long-words.
547 */
548 size = FDS_BYTES(n);
549 bits = stack_fds;
550 if (size > sizeof(stack_fds) / 6) {
551 /* Not enough space in on-stack array; must use kmalloc */
552 ret = -ENOMEM;
553 bits = kmalloc(6 * size, GFP_KERNEL);
554 if (!bits)
555 goto out_nofds;
556 }
557 fds.in = bits;
558 fds.out = bits + size;
559 fds.ex = bits + 2*size;
560 fds.res_in = bits + 3*size;
561 fds.res_out = bits + 4*size;
562 fds.res_ex = bits + 5*size;
563
564 if ((ret = get_fd_set(n, inp, fds.in)) ||
565 (ret = get_fd_set(n, outp, fds.out)) ||
566 (ret = get_fd_set(n, exp, fds.ex)))
567 goto out;
568 zero_fd_set(n, fds.res_in);
569 zero_fd_set(n, fds.res_out);
570 zero_fd_set(n, fds.res_ex);
571
572 ret = do_select(n, &fds, end_time);
573
574 if (ret < 0)
575 goto out;
576 if (!ret) {
577 ret = -ERESTARTNOHAND;
578 if (signal_pending(current))
579 goto out;
580 ret = 0;
581 }
582
583 if (set_fd_set(n, inp, fds.res_in) ||
584 set_fd_set(n, outp, fds.res_out) ||
585 set_fd_set(n, exp, fds.res_ex))
586 ret = -EFAULT;
587
588 out:
589 if (bits != stack_fds)
590 kfree(bits);
591 out_nofds:
592 return ret;
593 }
594
SYSCALL_DEFINE5(select,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct timeval __user *,tvp)595 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
596 fd_set __user *, exp, struct timeval __user *, tvp)
597 {
598 struct timespec end_time, *to = NULL;
599 struct timeval tv;
600 int ret;
601
602 if (tvp) {
603 if (copy_from_user(&tv, tvp, sizeof(tv)))
604 return -EFAULT;
605
606 to = &end_time;
607 if (poll_select_set_timeout(to,
608 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
609 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
610 return -EINVAL;
611 }
612
613 ret = core_sys_select(n, inp, outp, exp, to);
614 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
615
616 return ret;
617 }
618
619 #ifdef HAVE_SET_RESTORE_SIGMASK
do_pselect(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timespec __user * tsp,const sigset_t __user * sigmask,size_t sigsetsize)620 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
621 fd_set __user *exp, struct timespec __user *tsp,
622 const sigset_t __user *sigmask, size_t sigsetsize)
623 {
624 sigset_t ksigmask, sigsaved;
625 struct timespec ts, end_time, *to = NULL;
626 int ret;
627
628 if (tsp) {
629 if (copy_from_user(&ts, tsp, sizeof(ts)))
630 return -EFAULT;
631
632 to = &end_time;
633 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
634 return -EINVAL;
635 }
636
637 if (sigmask) {
638 /* XXX: Don't preclude handling different sized sigset_t's. */
639 if (sigsetsize != sizeof(sigset_t))
640 return -EINVAL;
641 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
642 return -EFAULT;
643
644 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
645 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
646 }
647
648 ret = core_sys_select(n, inp, outp, exp, to);
649 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
650
651 if (ret == -ERESTARTNOHAND) {
652 /*
653 * Don't restore the signal mask yet. Let do_signal() deliver
654 * the signal on the way back to userspace, before the signal
655 * mask is restored.
656 */
657 if (sigmask) {
658 memcpy(¤t->saved_sigmask, &sigsaved,
659 sizeof(sigsaved));
660 set_restore_sigmask();
661 }
662 } else if (sigmask)
663 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
664
665 return ret;
666 }
667
668 /*
669 * Most architectures can't handle 7-argument syscalls. So we provide a
670 * 6-argument version where the sixth argument is a pointer to a structure
671 * which has a pointer to the sigset_t itself followed by a size_t containing
672 * the sigset size.
673 */
SYSCALL_DEFINE6(pselect6,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct timespec __user *,tsp,void __user *,sig)674 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
675 fd_set __user *, exp, struct timespec __user *, tsp,
676 void __user *, sig)
677 {
678 size_t sigsetsize = 0;
679 sigset_t __user *up = NULL;
680
681 if (sig) {
682 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
683 || __get_user(up, (sigset_t __user * __user *)sig)
684 || __get_user(sigsetsize,
685 (size_t __user *)(sig+sizeof(void *))))
686 return -EFAULT;
687 }
688
689 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
690 }
691 #endif /* HAVE_SET_RESTORE_SIGMASK */
692
693 #ifdef __ARCH_WANT_SYS_OLD_SELECT
694 struct sel_arg_struct {
695 unsigned long n;
696 fd_set __user *inp, *outp, *exp;
697 struct timeval __user *tvp;
698 };
699
SYSCALL_DEFINE1(old_select,struct sel_arg_struct __user *,arg)700 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
701 {
702 struct sel_arg_struct a;
703
704 if (copy_from_user(&a, arg, sizeof(a)))
705 return -EFAULT;
706 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
707 }
708 #endif
709
710 struct poll_list {
711 struct poll_list *next;
712 int len;
713 struct pollfd entries[0];
714 };
715
716 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
717
718 /*
719 * Fish for pollable events on the pollfd->fd file descriptor. We're only
720 * interested in events matching the pollfd->events mask, and the result
721 * matching that mask is both recorded in pollfd->revents and returned. The
722 * pwait poll_table will be used by the fd-provided poll handler for waiting,
723 * if pwait->_qproc is non-NULL.
724 */
do_pollfd(struct pollfd * pollfd,poll_table * pwait)725 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
726 {
727 unsigned int mask;
728 int fd;
729
730 mask = 0;
731 fd = pollfd->fd;
732 if (fd >= 0) {
733 int fput_needed;
734 struct file * file;
735
736 file = fget_light(fd, &fput_needed);
737 mask = POLLNVAL;
738 if (file != NULL) {
739 mask = DEFAULT_POLLMASK;
740 if (file->f_op && file->f_op->poll) {
741 pwait->_key = pollfd->events|POLLERR|POLLHUP;
742 mask = file->f_op->poll(file, pwait);
743 }
744 /* Mask out unneeded events. */
745 mask &= pollfd->events | POLLERR | POLLHUP;
746 fput_light(file, fput_needed);
747 }
748 }
749 pollfd->revents = mask;
750
751 return mask;
752 }
753
do_poll(unsigned int nfds,struct poll_list * list,struct poll_wqueues * wait,struct timespec * end_time)754 static int do_poll(unsigned int nfds, struct poll_list *list,
755 struct poll_wqueues *wait, struct timespec *end_time)
756 {
757 poll_table* pt = &wait->pt;
758 ktime_t expire, *to = NULL;
759 int timed_out = 0, count = 0;
760 unsigned long slack = 0;
761
762 /* Optimise the no-wait case */
763 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
764 pt->_qproc = NULL;
765 timed_out = 1;
766 }
767
768 if (end_time && !timed_out)
769 slack = select_estimate_accuracy(end_time);
770
771 for (;;) {
772 struct poll_list *walk;
773
774 for (walk = list; walk != NULL; walk = walk->next) {
775 struct pollfd * pfd, * pfd_end;
776
777 pfd = walk->entries;
778 pfd_end = pfd + walk->len;
779 for (; pfd != pfd_end; pfd++) {
780 /*
781 * Fish for events. If we found one, record it
782 * and kill poll_table->_qproc, so we don't
783 * needlessly register any other waiters after
784 * this. They'll get immediately deregistered
785 * when we break out and return.
786 */
787 if (do_pollfd(pfd, pt)) {
788 count++;
789 pt->_qproc = NULL;
790 }
791 }
792 }
793 /*
794 * All waiters have already been registered, so don't provide
795 * a poll_table->_qproc to them on the next loop iteration.
796 */
797 pt->_qproc = NULL;
798 if (!count) {
799 count = wait->error;
800 if (signal_pending(current))
801 count = -EINTR;
802 }
803 if (count || timed_out)
804 break;
805
806 /*
807 * If this is the first loop and we have a timeout
808 * given, then we convert to ktime_t and set the to
809 * pointer to the expiry value.
810 */
811 if (end_time && !to) {
812 expire = timespec_to_ktime(*end_time);
813 to = &expire;
814 }
815
816 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
817 timed_out = 1;
818 }
819 return count;
820 }
821
822 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
823 sizeof(struct pollfd))
824
do_sys_poll(struct pollfd __user * ufds,unsigned int nfds,struct timespec * end_time)825 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
826 struct timespec *end_time)
827 {
828 struct poll_wqueues table;
829 int err = -EFAULT, fdcount, len, size;
830 /* Allocate small arguments on the stack to save memory and be
831 faster - use long to make sure the buffer is aligned properly
832 on 64 bit archs to avoid unaligned access */
833 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
834 struct poll_list *const head = (struct poll_list *)stack_pps;
835 struct poll_list *walk = head;
836 unsigned long todo = nfds;
837
838 if (nfds > rlimit(RLIMIT_NOFILE))
839 return -EINVAL;
840
841 len = min_t(unsigned int, nfds, N_STACK_PPS);
842 for (;;) {
843 walk->next = NULL;
844 walk->len = len;
845 if (!len)
846 break;
847
848 if (copy_from_user(walk->entries, ufds + nfds-todo,
849 sizeof(struct pollfd) * walk->len))
850 goto out_fds;
851
852 todo -= walk->len;
853 if (!todo)
854 break;
855
856 len = min(todo, POLLFD_PER_PAGE);
857 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
858 walk = walk->next = kmalloc(size, GFP_KERNEL);
859 if (!walk) {
860 err = -ENOMEM;
861 goto out_fds;
862 }
863 }
864
865 poll_initwait(&table);
866 fdcount = do_poll(nfds, head, &table, end_time);
867 poll_freewait(&table);
868
869 for (walk = head; walk; walk = walk->next) {
870 struct pollfd *fds = walk->entries;
871 int j;
872
873 for (j = 0; j < walk->len; j++, ufds++)
874 if (__put_user(fds[j].revents, &ufds->revents))
875 goto out_fds;
876 }
877
878 err = fdcount;
879 out_fds:
880 walk = head->next;
881 while (walk) {
882 struct poll_list *pos = walk;
883 walk = walk->next;
884 kfree(pos);
885 }
886
887 return err;
888 }
889
do_restart_poll(struct restart_block * restart_block)890 static long do_restart_poll(struct restart_block *restart_block)
891 {
892 struct pollfd __user *ufds = restart_block->poll.ufds;
893 int nfds = restart_block->poll.nfds;
894 struct timespec *to = NULL, end_time;
895 int ret;
896
897 if (restart_block->poll.has_timeout) {
898 end_time.tv_sec = restart_block->poll.tv_sec;
899 end_time.tv_nsec = restart_block->poll.tv_nsec;
900 to = &end_time;
901 }
902
903 ret = do_sys_poll(ufds, nfds, to);
904
905 if (ret == -EINTR) {
906 restart_block->fn = do_restart_poll;
907 ret = -ERESTART_RESTARTBLOCK;
908 }
909 return ret;
910 }
911
SYSCALL_DEFINE3(poll,struct pollfd __user *,ufds,unsigned int,nfds,int,timeout_msecs)912 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
913 int, timeout_msecs)
914 {
915 struct timespec end_time, *to = NULL;
916 int ret;
917
918 if (timeout_msecs >= 0) {
919 to = &end_time;
920 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
921 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
922 }
923
924 ret = do_sys_poll(ufds, nfds, to);
925
926 if (ret == -EINTR) {
927 struct restart_block *restart_block;
928
929 restart_block = ¤t_thread_info()->restart_block;
930 restart_block->fn = do_restart_poll;
931 restart_block->poll.ufds = ufds;
932 restart_block->poll.nfds = nfds;
933
934 if (timeout_msecs >= 0) {
935 restart_block->poll.tv_sec = end_time.tv_sec;
936 restart_block->poll.tv_nsec = end_time.tv_nsec;
937 restart_block->poll.has_timeout = 1;
938 } else
939 restart_block->poll.has_timeout = 0;
940
941 ret = -ERESTART_RESTARTBLOCK;
942 }
943 return ret;
944 }
945
946 #ifdef HAVE_SET_RESTORE_SIGMASK
SYSCALL_DEFINE5(ppoll,struct pollfd __user *,ufds,unsigned int,nfds,struct timespec __user *,tsp,const sigset_t __user *,sigmask,size_t,sigsetsize)947 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
948 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
949 size_t, sigsetsize)
950 {
951 sigset_t ksigmask, sigsaved;
952 struct timespec ts, end_time, *to = NULL;
953 int ret;
954
955 if (tsp) {
956 if (copy_from_user(&ts, tsp, sizeof(ts)))
957 return -EFAULT;
958
959 to = &end_time;
960 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
961 return -EINVAL;
962 }
963
964 if (sigmask) {
965 /* XXX: Don't preclude handling different sized sigset_t's. */
966 if (sigsetsize != sizeof(sigset_t))
967 return -EINVAL;
968 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
969 return -EFAULT;
970
971 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
972 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
973 }
974
975 ret = do_sys_poll(ufds, nfds, to);
976
977 /* We can restart this syscall, usually */
978 if (ret == -EINTR) {
979 /*
980 * Don't restore the signal mask yet. Let do_signal() deliver
981 * the signal on the way back to userspace, before the signal
982 * mask is restored.
983 */
984 if (sigmask) {
985 memcpy(¤t->saved_sigmask, &sigsaved,
986 sizeof(sigsaved));
987 set_restore_sigmask();
988 }
989 ret = -ERESTARTNOHAND;
990 } else if (sigmask)
991 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
992
993 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
994
995 return ret;
996 }
997 #endif /* HAVE_SET_RESTORE_SIGMASK */
998