1 #ifndef _LINUX_POLL_H
2 #define _LINUX_POLL_H
3
4 #include <asm/poll.h>
5
6 #ifdef __KERNEL__
7
8 #include <linux/compiler.h>
9 #include <linux/wait.h>
10 #include <linux/string.h>
11 #include <linux/fs.h>
12 #include <linux/sched.h>
13 #include <asm/uaccess.h>
14
15 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
16 additional memory. */
17 #define MAX_STACK_ALLOC 832
18 #define FRONTEND_STACK_ALLOC 256
19 #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
20 #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
21 #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
22 #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
23
24 #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
25
26 struct poll_table_struct;
27
28 /*
29 * structures and helpers for f_op->poll implementations
30 */
31 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
32
33 typedef struct poll_table_struct {
34 poll_queue_proc qproc;
35 } poll_table;
36
poll_wait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)37 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
38 {
39 if (p && wait_address)
40 p->qproc(filp, wait_address, p);
41 }
42
init_poll_funcptr(poll_table * pt,poll_queue_proc qproc)43 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
44 {
45 pt->qproc = qproc;
46 }
47
48 struct poll_table_entry {
49 struct file *filp;
50 wait_queue_t wait;
51 wait_queue_head_t *wait_address;
52 };
53
54 /*
55 * Structures and helpers for sys_poll/sys_poll
56 */
57 struct poll_wqueues {
58 poll_table pt;
59 struct poll_table_page *table;
60 struct task_struct *polling_task;
61 int triggered;
62 int error;
63 int inline_index;
64 struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
65 };
66
67 extern void poll_initwait(struct poll_wqueues *pwq);
68 extern void poll_freewait(struct poll_wqueues *pwq);
69 extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
70 ktime_t *expires, unsigned long slack);
71
poll_schedule(struct poll_wqueues * pwq,int state)72 static inline int poll_schedule(struct poll_wqueues *pwq, int state)
73 {
74 return poll_schedule_timeout(pwq, state, NULL, 0);
75 }
76
77 /*
78 * Scaleable version of the fd_set.
79 */
80
81 typedef struct {
82 unsigned long *in, *out, *ex;
83 unsigned long *res_in, *res_out, *res_ex;
84 } fd_set_bits;
85
86 /*
87 * How many longwords for "nr" bits?
88 */
89 #define FDS_BITPERLONG (8*sizeof(long))
90 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
91 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
92
93 /*
94 * We do a VERIFY_WRITE here even though we are only reading this time:
95 * we'll write to it eventually..
96 *
97 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
98 */
99 static inline
get_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)100 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
101 {
102 nr = FDS_BYTES(nr);
103 if (ufdset)
104 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
105
106 memset(fdset, 0, nr);
107 return 0;
108 }
109
110 static inline unsigned long __must_check
set_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)111 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
112 {
113 if (ufdset)
114 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
115 return 0;
116 }
117
118 static inline
zero_fd_set(unsigned long nr,unsigned long * fdset)119 void zero_fd_set(unsigned long nr, unsigned long *fdset)
120 {
121 memset(fdset, 0, FDS_BYTES(nr));
122 }
123
124 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
125
126 extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
127 extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
128 struct timespec *end_time);
129 extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
130 fd_set __user *exp, struct timespec *end_time);
131
132 extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
133
134 #endif /* KERNEL */
135
136 #endif /* _LINUX_POLL_H */
137