• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10 
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13 
14 typedef struct wait_queue_entry wait_queue_entry_t;
15 
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18 
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE	0x01
21 #define WQ_FLAG_WOKEN		0x02
22 #define WQ_FLAG_BOOKMARK	0x04
23 #define WQ_FLAG_CUSTOM		0x08
24 #define WQ_FLAG_DONE		0x10
25 
26 /*
27  * A single wait-queue entry structure:
28  */
29 struct wait_queue_entry {
30 	unsigned int		flags;
31 	void			*private;
32 	wait_queue_func_t	func;
33 	struct list_head	entry;
34 };
35 
36 struct wait_queue_head {
37 	spinlock_t		lock;
38 	struct list_head	head;
39 };
40 typedef struct wait_queue_head wait_queue_head_t;
41 
42 struct task_struct;
43 
44 /*
45  * Macros for declaration and initialisaton of the datatypes
46  */
47 
48 #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
49 	.private	= tsk,							\
50 	.func		= default_wake_function,				\
51 	.entry		= { NULL, NULL } }
52 
53 #define DECLARE_WAITQUEUE(name, tsk)						\
54 	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
55 
56 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
57 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
58 	.head		= { &(name).head, &(name).head } }
59 
60 #define DECLARE_WAIT_QUEUE_HEAD(name) \
61 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62 
63 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
64 
65 #define init_waitqueue_head(wq_head)						\
66 	do {									\
67 		static struct lock_class_key __key;				\
68 										\
69 		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
70 	} while (0)
71 
72 #ifdef CONFIG_LOCKDEP
73 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 	({ init_waitqueue_head(&name); name; })
75 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77 #else
78 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79 #endif
80 
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)81 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
82 {
83 	wq_entry->flags		= 0;
84 	wq_entry->private	= p;
85 	wq_entry->func		= default_wake_function;
86 }
87 
88 static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)89 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
90 {
91 	wq_entry->flags		= 0;
92 	wq_entry->private	= NULL;
93 	wq_entry->func		= func;
94 }
95 
96 /**
97  * waitqueue_active -- locklessly test for waiters on the queue
98  * @wq_head: the waitqueue to test for waiters
99  *
100  * returns true if the wait list is not empty
101  *
102  * NOTE: this function is lockless and requires care, incorrect usage _will_
103  * lead to sporadic and non-obvious failure.
104  *
105  * Use either while holding wait_queue_head::lock or when used for wakeups
106  * with an extra smp_mb() like::
107  *
108  *      CPU0 - waker                    CPU1 - waiter
109  *
110  *                                      for (;;) {
111  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
112  *      smp_mb();                         // smp_mb() from set_current_state()
113  *      if (waitqueue_active(wq_head))         if (@cond)
114  *        wake_up(wq_head);                      break;
115  *                                        schedule();
116  *                                      }
117  *                                      finish_wait(&wq_head, &wait);
118  *
119  * Because without the explicit smp_mb() it's possible for the
120  * waitqueue_active() load to get hoisted over the @cond store such that we'll
121  * observe an empty wait list while the waiter might not observe @cond.
122  *
123  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
124  * which (when the lock is uncontended) are of roughly equal cost.
125  */
waitqueue_active(struct wait_queue_head * wq_head)126 static inline int waitqueue_active(struct wait_queue_head *wq_head)
127 {
128 	return !list_empty(&wq_head->head);
129 }
130 
131 /**
132  * wq_has_single_sleeper - check if there is only one sleeper
133  * @wq_head: wait queue head
134  *
135  * Returns true of wq_head has only one sleeper on the list.
136  *
137  * Please refer to the comment for waitqueue_active.
138  */
wq_has_single_sleeper(struct wait_queue_head * wq_head)139 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
140 {
141 	return list_is_singular(&wq_head->head);
142 }
143 
144 /**
145  * wq_has_sleeper - check if there are any waiting processes
146  * @wq_head: wait queue head
147  *
148  * Returns true if wq_head has waiting processes
149  *
150  * Please refer to the comment for waitqueue_active.
151  */
wq_has_sleeper(struct wait_queue_head * wq_head)152 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
153 {
154 	/*
155 	 * We need to be sure we are in sync with the
156 	 * add_wait_queue modifications to the wait queue.
157 	 *
158 	 * This memory barrier should be paired with one on the
159 	 * waiting side.
160 	 */
161 	smp_mb();
162 	return waitqueue_active(wq_head);
163 }
164 
165 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168 
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)169 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170 {
171 	list_add(&wq_entry->entry, &wq_head->head);
172 }
173 
174 /*
175  * Used for wake-one threads:
176  */
177 static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)178 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
179 {
180 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
181 	__add_wait_queue(wq_head, wq_entry);
182 }
183 
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)184 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
185 {
186 	list_add_tail(&wq_entry->entry, &wq_head->head);
187 }
188 
189 static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)190 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
191 {
192 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
193 	__add_wait_queue_entry_tail(wq_head, wq_entry);
194 }
195 
196 static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)197 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
198 {
199 	list_del(&wq_entry->entry);
200 }
201 
202 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
203 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
204 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
205 		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
206 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
207 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
208 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
210 void __wake_up_pollfree(struct wait_queue_head *wq_head);
211 
212 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
213 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
214 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
215 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
216 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
217 
218 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
219 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
220 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
221 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
222 #define wake_up_sync(x)			__wake_up_sync((x), TASK_NORMAL)
223 
224 /*
225  * Wakeup macros to be used to report events to the targets.
226  */
227 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
228 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
229 #define wake_up_poll(x, m)							\
230 	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
231 #define wake_up_locked_poll(x, m)						\
232 	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
233 #define wake_up_interruptible_poll(x, m)					\
234 	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
235 #define wake_up_interruptible_sync_poll(x, m)					\
236 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
237 #define wake_up_interruptible_sync_poll_locked(x, m)				\
238 	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
239 
240 /**
241  * wake_up_pollfree - signal that a polled waitqueue is going away
242  * @wq_head: the wait queue head
243  *
244  * In the very rare cases where a ->poll() implementation uses a waitqueue whose
245  * lifetime is tied to a task rather than to the 'struct file' being polled,
246  * this function must be called before the waitqueue is freed so that
247  * non-blocking polls (e.g. epoll) are notified that the queue is going away.
248  *
249  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
250  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
251  */
wake_up_pollfree(struct wait_queue_head * wq_head)252 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
253 {
254 	/*
255 	 * For performance reasons, we don't always take the queue lock here.
256 	 * Therefore, we might race with someone removing the last entry from
257 	 * the queue, and proceed while they still hold the queue lock.
258 	 * However, rcu_read_lock() is required to be held in such cases, so we
259 	 * can safely proceed with an RCU-delayed free.
260 	 */
261 	if (waitqueue_active(wq_head))
262 		__wake_up_pollfree(wq_head);
263 }
264 
265 #define ___wait_cond_timeout(condition)						\
266 ({										\
267 	bool __cond = (condition);						\
268 	if (__cond && !__ret)							\
269 		__ret = 1;							\
270 	__cond || !__ret;							\
271 })
272 
273 #define ___wait_is_interruptible(state)						\
274 	(!__builtin_constant_p(state) ||					\
275 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
276 
277 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
278 
279 /*
280  * The below macro ___wait_event() has an explicit shadow of the __ret
281  * variable when used from the wait_event_*() macros.
282  *
283  * This is so that both can use the ___wait_cond_timeout() construct
284  * to wrap the condition.
285  *
286  * The type inconsistency of the wait_event_*() __ret variable is also
287  * on purpose; we use long where we can return timeout values and int
288  * otherwise.
289  */
290 
291 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
292 ({										\
293 	__label__ __out;							\
294 	struct wait_queue_entry __wq_entry;					\
295 	long __ret = ret;	/* explicit shadow */				\
296 										\
297 	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
298 	for (;;) {								\
299 		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
300 										\
301 		if (condition)							\
302 			break;							\
303 										\
304 		if (___wait_is_interruptible(state) && __int) {			\
305 			__ret = __int;						\
306 			goto __out;						\
307 		}								\
308 										\
309 		cmd;								\
310 	}									\
311 	finish_wait(&wq_head, &__wq_entry);					\
312 __out:	__ret;									\
313 })
314 
315 #define __wait_event(wq_head, condition)					\
316 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
317 			    schedule())
318 
319 /**
320  * wait_event - sleep until a condition gets true
321  * @wq_head: the waitqueue to wait on
322  * @condition: a C expression for the event to wait for
323  *
324  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
325  * @condition evaluates to true. The @condition is checked each time
326  * the waitqueue @wq_head is woken up.
327  *
328  * wake_up() has to be called after changing any variable that could
329  * change the result of the wait condition.
330  */
331 #define wait_event(wq_head, condition)						\
332 do {										\
333 	might_sleep();								\
334 	if (condition)								\
335 		break;								\
336 	__wait_event(wq_head, condition);					\
337 } while (0)
338 
339 #define __io_wait_event(wq_head, condition)					\
340 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
341 			    io_schedule())
342 
343 /*
344  * io_wait_event() -- like wait_event() but with io_schedule()
345  */
346 #define io_wait_event(wq_head, condition)					\
347 do {										\
348 	might_sleep();								\
349 	if (condition)								\
350 		break;								\
351 	__io_wait_event(wq_head, condition);					\
352 } while (0)
353 
354 #define __wait_event_freezable(wq_head, condition)				\
355 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
356 			    freezable_schedule())
357 
358 /**
359  * wait_event_freezable - sleep (or freeze) until a condition gets true
360  * @wq_head: the waitqueue to wait on
361  * @condition: a C expression for the event to wait for
362  *
363  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
364  * to system load) until the @condition evaluates to true. The
365  * @condition is checked each time the waitqueue @wq_head is woken up.
366  *
367  * wake_up() has to be called after changing any variable that could
368  * change the result of the wait condition.
369  */
370 #define wait_event_freezable(wq_head, condition)				\
371 ({										\
372 	int __ret = 0;								\
373 	might_sleep();								\
374 	if (!(condition))							\
375 		__ret = __wait_event_freezable(wq_head, condition);		\
376 	__ret;									\
377 })
378 
379 #define __wait_event_timeout(wq_head, condition, timeout)			\
380 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
381 		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
382 		      __ret = schedule_timeout(__ret))
383 
384 /**
385  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
386  * @wq_head: the waitqueue to wait on
387  * @condition: a C expression for the event to wait for
388  * @timeout: timeout, in jiffies
389  *
390  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
391  * @condition evaluates to true. The @condition is checked each time
392  * the waitqueue @wq_head is woken up.
393  *
394  * wake_up() has to be called after changing any variable that could
395  * change the result of the wait condition.
396  *
397  * Returns:
398  * 0 if the @condition evaluated to %false after the @timeout elapsed,
399  * 1 if the @condition evaluated to %true after the @timeout elapsed,
400  * or the remaining jiffies (at least 1) if the @condition evaluated
401  * to %true before the @timeout elapsed.
402  */
403 #define wait_event_timeout(wq_head, condition, timeout)				\
404 ({										\
405 	long __ret = timeout;							\
406 	might_sleep();								\
407 	if (!___wait_cond_timeout(condition))					\
408 		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
409 	__ret;									\
410 })
411 
412 #define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
413 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
414 		      TASK_INTERRUPTIBLE, 0, timeout,				\
415 		      __ret = freezable_schedule_timeout(__ret))
416 
417 /*
418  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
419  * increasing load and is freezable.
420  */
421 #define wait_event_freezable_timeout(wq_head, condition, timeout)		\
422 ({										\
423 	long __ret = timeout;							\
424 	might_sleep();								\
425 	if (!___wait_cond_timeout(condition))					\
426 		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
427 	__ret;									\
428 })
429 
430 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
431 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
432 			    cmd1; schedule(); cmd2)
433 /*
434  * Just like wait_event_cmd(), except it sets exclusive flag
435  */
436 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
437 do {										\
438 	if (condition)								\
439 		break;								\
440 	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
441 } while (0)
442 
443 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
444 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
445 			    cmd1; schedule(); cmd2)
446 
447 /**
448  * wait_event_cmd - sleep until a condition gets true
449  * @wq_head: the waitqueue to wait on
450  * @condition: a C expression for the event to wait for
451  * @cmd1: the command will be executed before sleep
452  * @cmd2: the command will be executed after sleep
453  *
454  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
455  * @condition evaluates to true. The @condition is checked each time
456  * the waitqueue @wq_head is woken up.
457  *
458  * wake_up() has to be called after changing any variable that could
459  * change the result of the wait condition.
460  */
461 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
462 do {										\
463 	if (condition)								\
464 		break;								\
465 	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
466 } while (0)
467 
468 #define __wait_event_interruptible(wq_head, condition)				\
469 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
470 		      schedule())
471 
472 /**
473  * wait_event_interruptible - sleep until a condition gets true
474  * @wq_head: the waitqueue to wait on
475  * @condition: a C expression for the event to wait for
476  *
477  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
478  * @condition evaluates to true or a signal is received.
479  * The @condition is checked each time the waitqueue @wq_head is woken up.
480  *
481  * wake_up() has to be called after changing any variable that could
482  * change the result of the wait condition.
483  *
484  * The function will return -ERESTARTSYS if it was interrupted by a
485  * signal and 0 if @condition evaluated to true.
486  */
487 #define wait_event_interruptible(wq_head, condition)				\
488 ({										\
489 	int __ret = 0;								\
490 	might_sleep();								\
491 	if (!(condition))							\
492 		__ret = __wait_event_interruptible(wq_head, condition);		\
493 	__ret;									\
494 })
495 
496 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
497 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
498 		      TASK_INTERRUPTIBLE, 0, timeout,				\
499 		      __ret = schedule_timeout(__ret))
500 
501 /**
502  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
503  * @wq_head: the waitqueue to wait on
504  * @condition: a C expression for the event to wait for
505  * @timeout: timeout, in jiffies
506  *
507  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
508  * @condition evaluates to true or a signal is received.
509  * The @condition is checked each time the waitqueue @wq_head is woken up.
510  *
511  * wake_up() has to be called after changing any variable that could
512  * change the result of the wait condition.
513  *
514  * Returns:
515  * 0 if the @condition evaluated to %false after the @timeout elapsed,
516  * 1 if the @condition evaluated to %true after the @timeout elapsed,
517  * the remaining jiffies (at least 1) if the @condition evaluated
518  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
519  * interrupted by a signal.
520  */
521 #define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
522 ({										\
523 	long __ret = timeout;							\
524 	might_sleep();								\
525 	if (!___wait_cond_timeout(condition))					\
526 		__ret = __wait_event_interruptible_timeout(wq_head,		\
527 						condition, timeout);		\
528 	__ret;									\
529 })
530 
531 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
532 ({										\
533 	int __ret = 0;								\
534 	struct hrtimer_sleeper __t;						\
535 										\
536 	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
537 				      HRTIMER_MODE_REL);			\
538 	if ((timeout) != KTIME_MAX) {						\
539 		hrtimer_set_expires_range_ns(&__t.timer, timeout,		\
540 					current->timer_slack_ns);		\
541 		hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL);		\
542 	}									\
543 										\
544 	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
545 		if (!__t.task) {						\
546 			__ret = -ETIME;						\
547 			break;							\
548 		}								\
549 		schedule());							\
550 										\
551 	hrtimer_cancel(&__t.timer);						\
552 	destroy_hrtimer_on_stack(&__t.timer);					\
553 	__ret;									\
554 })
555 
556 /**
557  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
558  * @wq_head: the waitqueue to wait on
559  * @condition: a C expression for the event to wait for
560  * @timeout: timeout, as a ktime_t
561  *
562  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
563  * @condition evaluates to true or a signal is received.
564  * The @condition is checked each time the waitqueue @wq_head is woken up.
565  *
566  * wake_up() has to be called after changing any variable that could
567  * change the result of the wait condition.
568  *
569  * The function returns 0 if @condition became true, or -ETIME if the timeout
570  * elapsed.
571  */
572 #define wait_event_hrtimeout(wq_head, condition, timeout)			\
573 ({										\
574 	int __ret = 0;								\
575 	might_sleep();								\
576 	if (!(condition))							\
577 		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
578 					       TASK_UNINTERRUPTIBLE);		\
579 	__ret;									\
580 })
581 
582 /**
583  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
584  * @wq: the waitqueue to wait on
585  * @condition: a C expression for the event to wait for
586  * @timeout: timeout, as a ktime_t
587  *
588  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
589  * @condition evaluates to true or a signal is received.
590  * The @condition is checked each time the waitqueue @wq is woken up.
591  *
592  * wake_up() has to be called after changing any variable that could
593  * change the result of the wait condition.
594  *
595  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
596  * interrupted by a signal, or -ETIME if the timeout elapsed.
597  */
598 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
599 ({										\
600 	long __ret = 0;								\
601 	might_sleep();								\
602 	if (!(condition))							\
603 		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
604 					       TASK_INTERRUPTIBLE);		\
605 	__ret;									\
606 })
607 
608 #define __wait_event_interruptible_exclusive(wq, condition)			\
609 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
610 		      schedule())
611 
612 #define wait_event_interruptible_exclusive(wq, condition)			\
613 ({										\
614 	int __ret = 0;								\
615 	might_sleep();								\
616 	if (!(condition))							\
617 		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
618 	__ret;									\
619 })
620 
621 #define __wait_event_killable_exclusive(wq, condition)				\
622 	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
623 		      schedule())
624 
625 #define wait_event_killable_exclusive(wq, condition)				\
626 ({										\
627 	int __ret = 0;								\
628 	might_sleep();								\
629 	if (!(condition))							\
630 		__ret = __wait_event_killable_exclusive(wq, condition);		\
631 	__ret;									\
632 })
633 
634 
635 #define __wait_event_freezable_exclusive(wq, condition)				\
636 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
637 			freezable_schedule())
638 
639 #define wait_event_freezable_exclusive(wq, condition)				\
640 ({										\
641 	int __ret = 0;								\
642 	might_sleep();								\
643 	if (!(condition))							\
644 		__ret = __wait_event_freezable_exclusive(wq, condition);	\
645 	__ret;									\
646 })
647 
648 /**
649  * wait_event_idle - wait for a condition without contributing to system load
650  * @wq_head: the waitqueue to wait on
651  * @condition: a C expression for the event to wait for
652  *
653  * The process is put to sleep (TASK_IDLE) until the
654  * @condition evaluates to true.
655  * The @condition is checked each time the waitqueue @wq_head is woken up.
656  *
657  * wake_up() has to be called after changing any variable that could
658  * change the result of the wait condition.
659  *
660  */
661 #define wait_event_idle(wq_head, condition)					\
662 do {										\
663 	might_sleep();								\
664 	if (!(condition))							\
665 		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
666 } while (0)
667 
668 /**
669  * wait_event_idle_exclusive - wait for a condition with contributing to system load
670  * @wq_head: the waitqueue to wait on
671  * @condition: a C expression for the event to wait for
672  *
673  * The process is put to sleep (TASK_IDLE) until the
674  * @condition evaluates to true.
675  * The @condition is checked each time the waitqueue @wq_head is woken up.
676  *
677  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
678  * set thus if other processes wait on the same list, when this
679  * process is woken further processes are not considered.
680  *
681  * wake_up() has to be called after changing any variable that could
682  * change the result of the wait condition.
683  *
684  */
685 #define wait_event_idle_exclusive(wq_head, condition)				\
686 do {										\
687 	might_sleep();								\
688 	if (!(condition))							\
689 		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
690 } while (0)
691 
692 #define __wait_event_idle_timeout(wq_head, condition, timeout)			\
693 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
694 		      TASK_IDLE, 0, timeout,					\
695 		      __ret = schedule_timeout(__ret))
696 
697 /**
698  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
699  * @wq_head: the waitqueue to wait on
700  * @condition: a C expression for the event to wait for
701  * @timeout: timeout, in jiffies
702  *
703  * The process is put to sleep (TASK_IDLE) until the
704  * @condition evaluates to true. The @condition is checked each time
705  * the waitqueue @wq_head is woken up.
706  *
707  * wake_up() has to be called after changing any variable that could
708  * change the result of the wait condition.
709  *
710  * Returns:
711  * 0 if the @condition evaluated to %false after the @timeout elapsed,
712  * 1 if the @condition evaluated to %true after the @timeout elapsed,
713  * or the remaining jiffies (at least 1) if the @condition evaluated
714  * to %true before the @timeout elapsed.
715  */
716 #define wait_event_idle_timeout(wq_head, condition, timeout)			\
717 ({										\
718 	long __ret = timeout;							\
719 	might_sleep();								\
720 	if (!___wait_cond_timeout(condition))					\
721 		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
722 	__ret;									\
723 })
724 
725 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
726 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
727 		      TASK_IDLE, 1, timeout,					\
728 		      __ret = schedule_timeout(__ret))
729 
730 /**
731  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
732  * @wq_head: the waitqueue to wait on
733  * @condition: a C expression for the event to wait for
734  * @timeout: timeout, in jiffies
735  *
736  * The process is put to sleep (TASK_IDLE) until the
737  * @condition evaluates to true. The @condition is checked each time
738  * the waitqueue @wq_head is woken up.
739  *
740  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
741  * set thus if other processes wait on the same list, when this
742  * process is woken further processes are not considered.
743  *
744  * wake_up() has to be called after changing any variable that could
745  * change the result of the wait condition.
746  *
747  * Returns:
748  * 0 if the @condition evaluated to %false after the @timeout elapsed,
749  * 1 if the @condition evaluated to %true after the @timeout elapsed,
750  * or the remaining jiffies (at least 1) if the @condition evaluated
751  * to %true before the @timeout elapsed.
752  */
753 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
754 ({										\
755 	long __ret = timeout;							\
756 	might_sleep();								\
757 	if (!___wait_cond_timeout(condition))					\
758 		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
759 	__ret;									\
760 })
761 
762 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
763 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
764 
765 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
766 ({										\
767 	int __ret;								\
768 	DEFINE_WAIT(__wait);							\
769 	if (exclusive)								\
770 		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
771 	do {									\
772 		__ret = fn(&(wq), &__wait);					\
773 		if (__ret)							\
774 			break;							\
775 	} while (!(condition));							\
776 	__remove_wait_queue(&(wq), &__wait);					\
777 	__set_current_state(TASK_RUNNING);					\
778 	__ret;									\
779 })
780 
781 
782 /**
783  * wait_event_interruptible_locked - sleep until a condition gets true
784  * @wq: the waitqueue to wait on
785  * @condition: a C expression for the event to wait for
786  *
787  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
788  * @condition evaluates to true or a signal is received.
789  * The @condition is checked each time the waitqueue @wq is woken up.
790  *
791  * It must be called with wq.lock being held.  This spinlock is
792  * unlocked while sleeping but @condition testing is done while lock
793  * is held and when this macro exits the lock is held.
794  *
795  * The lock is locked/unlocked using spin_lock()/spin_unlock()
796  * functions which must match the way they are locked/unlocked outside
797  * of this macro.
798  *
799  * wake_up_locked() has to be called after changing any variable that could
800  * change the result of the wait condition.
801  *
802  * The function will return -ERESTARTSYS if it was interrupted by a
803  * signal and 0 if @condition evaluated to true.
804  */
805 #define wait_event_interruptible_locked(wq, condition)				\
806 	((condition)								\
807 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
808 
809 /**
810  * wait_event_interruptible_locked_irq - sleep until a condition gets true
811  * @wq: the waitqueue to wait on
812  * @condition: a C expression for the event to wait for
813  *
814  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
815  * @condition evaluates to true or a signal is received.
816  * The @condition is checked each time the waitqueue @wq is woken up.
817  *
818  * It must be called with wq.lock being held.  This spinlock is
819  * unlocked while sleeping but @condition testing is done while lock
820  * is held and when this macro exits the lock is held.
821  *
822  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
823  * functions which must match the way they are locked/unlocked outside
824  * of this macro.
825  *
826  * wake_up_locked() has to be called after changing any variable that could
827  * change the result of the wait condition.
828  *
829  * The function will return -ERESTARTSYS if it was interrupted by a
830  * signal and 0 if @condition evaluated to true.
831  */
832 #define wait_event_interruptible_locked_irq(wq, condition)			\
833 	((condition)								\
834 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
835 
836 /**
837  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
838  * @wq: the waitqueue to wait on
839  * @condition: a C expression for the event to wait for
840  *
841  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
842  * @condition evaluates to true or a signal is received.
843  * The @condition is checked each time the waitqueue @wq is woken up.
844  *
845  * It must be called with wq.lock being held.  This spinlock is
846  * unlocked while sleeping but @condition testing is done while lock
847  * is held and when this macro exits the lock is held.
848  *
849  * The lock is locked/unlocked using spin_lock()/spin_unlock()
850  * functions which must match the way they are locked/unlocked outside
851  * of this macro.
852  *
853  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
854  * set thus when other process waits process on the list if this
855  * process is awaken further processes are not considered.
856  *
857  * wake_up_locked() has to be called after changing any variable that could
858  * change the result of the wait condition.
859  *
860  * The function will return -ERESTARTSYS if it was interrupted by a
861  * signal and 0 if @condition evaluated to true.
862  */
863 #define wait_event_interruptible_exclusive_locked(wq, condition)		\
864 	((condition)								\
865 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
866 
867 /**
868  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
869  * @wq: the waitqueue to wait on
870  * @condition: a C expression for the event to wait for
871  *
872  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
873  * @condition evaluates to true or a signal is received.
874  * The @condition is checked each time the waitqueue @wq is woken up.
875  *
876  * It must be called with wq.lock being held.  This spinlock is
877  * unlocked while sleeping but @condition testing is done while lock
878  * is held and when this macro exits the lock is held.
879  *
880  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
881  * functions which must match the way they are locked/unlocked outside
882  * of this macro.
883  *
884  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
885  * set thus when other process waits process on the list if this
886  * process is awaken further processes are not considered.
887  *
888  * wake_up_locked() has to be called after changing any variable that could
889  * change the result of the wait condition.
890  *
891  * The function will return -ERESTARTSYS if it was interrupted by a
892  * signal and 0 if @condition evaluated to true.
893  */
894 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
895 	((condition)								\
896 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
897 
898 
899 #define __wait_event_killable(wq, condition)					\
900 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
901 
902 /**
903  * wait_event_killable - sleep until a condition gets true
904  * @wq_head: the waitqueue to wait on
905  * @condition: a C expression for the event to wait for
906  *
907  * The process is put to sleep (TASK_KILLABLE) until the
908  * @condition evaluates to true or a signal is received.
909  * The @condition is checked each time the waitqueue @wq_head is woken up.
910  *
911  * wake_up() has to be called after changing any variable that could
912  * change the result of the wait condition.
913  *
914  * The function will return -ERESTARTSYS if it was interrupted by a
915  * signal and 0 if @condition evaluated to true.
916  */
917 #define wait_event_killable(wq_head, condition)					\
918 ({										\
919 	int __ret = 0;								\
920 	might_sleep();								\
921 	if (!(condition))							\
922 		__ret = __wait_event_killable(wq_head, condition);		\
923 	__ret;									\
924 })
925 
926 #define __wait_event_killable_timeout(wq_head, condition, timeout)		\
927 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
928 		      TASK_KILLABLE, 0, timeout,				\
929 		      __ret = schedule_timeout(__ret))
930 
931 /**
932  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
933  * @wq_head: the waitqueue to wait on
934  * @condition: a C expression for the event to wait for
935  * @timeout: timeout, in jiffies
936  *
937  * The process is put to sleep (TASK_KILLABLE) until the
938  * @condition evaluates to true or a kill signal is received.
939  * The @condition is checked each time the waitqueue @wq_head is woken up.
940  *
941  * wake_up() has to be called after changing any variable that could
942  * change the result of the wait condition.
943  *
944  * Returns:
945  * 0 if the @condition evaluated to %false after the @timeout elapsed,
946  * 1 if the @condition evaluated to %true after the @timeout elapsed,
947  * the remaining jiffies (at least 1) if the @condition evaluated
948  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
949  * interrupted by a kill signal.
950  *
951  * Only kill signals interrupt this process.
952  */
953 #define wait_event_killable_timeout(wq_head, condition, timeout)		\
954 ({										\
955 	long __ret = timeout;							\
956 	might_sleep();								\
957 	if (!___wait_cond_timeout(condition))					\
958 		__ret = __wait_event_killable_timeout(wq_head,			\
959 						condition, timeout);		\
960 	__ret;									\
961 })
962 
963 
964 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
965 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
966 			    spin_unlock_irq(&lock);				\
967 			    cmd;						\
968 			    schedule();						\
969 			    spin_lock_irq(&lock))
970 
971 /**
972  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
973  *			     condition is checked under the lock. This
974  *			     is expected to be called with the lock
975  *			     taken.
976  * @wq_head: the waitqueue to wait on
977  * @condition: a C expression for the event to wait for
978  * @lock: a locked spinlock_t, which will be released before cmd
979  *	  and schedule() and reacquired afterwards.
980  * @cmd: a command which is invoked outside the critical section before
981  *	 sleep
982  *
983  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
984  * @condition evaluates to true. The @condition is checked each time
985  * the waitqueue @wq_head is woken up.
986  *
987  * wake_up() has to be called after changing any variable that could
988  * change the result of the wait condition.
989  *
990  * This is supposed to be called while holding the lock. The lock is
991  * dropped before invoking the cmd and going to sleep and is reacquired
992  * afterwards.
993  */
994 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
995 do {										\
996 	if (condition)								\
997 		break;								\
998 	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
999 } while (0)
1000 
1001 /**
1002  * wait_event_lock_irq - sleep until a condition gets true. The
1003  *			 condition is checked under the lock. This
1004  *			 is expected to be called with the lock
1005  *			 taken.
1006  * @wq_head: the waitqueue to wait on
1007  * @condition: a C expression for the event to wait for
1008  * @lock: a locked spinlock_t, which will be released before schedule()
1009  *	  and reacquired afterwards.
1010  *
1011  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1012  * @condition evaluates to true. The @condition is checked each time
1013  * the waitqueue @wq_head is woken up.
1014  *
1015  * wake_up() has to be called after changing any variable that could
1016  * change the result of the wait condition.
1017  *
1018  * This is supposed to be called while holding the lock. The lock is
1019  * dropped before going to sleep and is reacquired afterwards.
1020  */
1021 #define wait_event_lock_irq(wq_head, condition, lock)				\
1022 do {										\
1023 	if (condition)								\
1024 		break;								\
1025 	__wait_event_lock_irq(wq_head, condition, lock, );			\
1026 } while (0)
1027 
1028 
1029 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1030 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1031 		      spin_unlock_irq(&lock);					\
1032 		      cmd;							\
1033 		      schedule();						\
1034 		      spin_lock_irq(&lock))
1035 
1036 /**
1037  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1038  *		The condition is checked under the lock. This is expected to
1039  *		be called with the lock taken.
1040  * @wq_head: the waitqueue to wait on
1041  * @condition: a C expression for the event to wait for
1042  * @lock: a locked spinlock_t, which will be released before cmd and
1043  *	  schedule() and reacquired afterwards.
1044  * @cmd: a command which is invoked outside the critical section before
1045  *	 sleep
1046  *
1047  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1048  * @condition evaluates to true or a signal is received. The @condition is
1049  * checked each time the waitqueue @wq_head is woken up.
1050  *
1051  * wake_up() has to be called after changing any variable that could
1052  * change the result of the wait condition.
1053  *
1054  * This is supposed to be called while holding the lock. The lock is
1055  * dropped before invoking the cmd and going to sleep and is reacquired
1056  * afterwards.
1057  *
1058  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1059  * and 0 if @condition evaluated to true.
1060  */
1061 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1062 ({										\
1063 	int __ret = 0;								\
1064 	if (!(condition))							\
1065 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1066 						condition, lock, cmd);		\
1067 	__ret;									\
1068 })
1069 
1070 /**
1071  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1072  *		The condition is checked under the lock. This is expected
1073  *		to be called with the lock taken.
1074  * @wq_head: the waitqueue to wait on
1075  * @condition: a C expression for the event to wait for
1076  * @lock: a locked spinlock_t, which will be released before schedule()
1077  *	  and reacquired afterwards.
1078  *
1079  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1080  * @condition evaluates to true or signal is received. The @condition is
1081  * checked each time the waitqueue @wq_head is woken up.
1082  *
1083  * wake_up() has to be called after changing any variable that could
1084  * change the result of the wait condition.
1085  *
1086  * This is supposed to be called while holding the lock. The lock is
1087  * dropped before going to sleep and is reacquired afterwards.
1088  *
1089  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1090  * and 0 if @condition evaluated to true.
1091  */
1092 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1093 ({										\
1094 	int __ret = 0;								\
1095 	if (!(condition))							\
1096 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1097 						condition, lock,);		\
1098 	__ret;									\
1099 })
1100 
1101 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1102 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1103 		      state, 0, timeout,					\
1104 		      spin_unlock_irq(&lock);					\
1105 		      __ret = schedule_timeout(__ret);				\
1106 		      spin_lock_irq(&lock));
1107 
1108 /**
1109  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1110  *		true or a timeout elapses. The condition is checked under
1111  *		the lock. This is expected to be called with the lock taken.
1112  * @wq_head: the waitqueue to wait on
1113  * @condition: a C expression for the event to wait for
1114  * @lock: a locked spinlock_t, which will be released before schedule()
1115  *	  and reacquired afterwards.
1116  * @timeout: timeout, in jiffies
1117  *
1118  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1119  * @condition evaluates to true or signal is received. The @condition is
1120  * checked each time the waitqueue @wq_head is woken up.
1121  *
1122  * wake_up() has to be called after changing any variable that could
1123  * change the result of the wait condition.
1124  *
1125  * This is supposed to be called while holding the lock. The lock is
1126  * dropped before going to sleep and is reacquired afterwards.
1127  *
1128  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1129  * was interrupted by a signal, and the remaining jiffies otherwise
1130  * if the condition evaluated to true before the timeout elapsed.
1131  */
1132 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1133 						  timeout)			\
1134 ({										\
1135 	long __ret = timeout;							\
1136 	if (!___wait_cond_timeout(condition))					\
1137 		__ret = __wait_event_lock_irq_timeout(				\
1138 					wq_head, condition, lock, timeout,	\
1139 					TASK_INTERRUPTIBLE);			\
1140 	__ret;									\
1141 })
1142 
1143 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1144 ({										\
1145 	long __ret = timeout;							\
1146 	if (!___wait_cond_timeout(condition))					\
1147 		__ret = __wait_event_lock_irq_timeout(				\
1148 					wq_head, condition, lock, timeout,	\
1149 					TASK_UNINTERRUPTIBLE);			\
1150 	__ret;									\
1151 })
1152 
1153 /*
1154  * Waitqueues which are removed from the waitqueue_head at wakeup time
1155  */
1156 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1157 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1158 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1159 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1160 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1161 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1162 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1163 
1164 #define DEFINE_WAIT_FUNC(name, function)					\
1165 	struct wait_queue_entry name = {					\
1166 		.private	= current,					\
1167 		.func		= function,					\
1168 		.entry		= LIST_HEAD_INIT((name).entry),			\
1169 	}
1170 
1171 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1172 
1173 #define init_wait(wait)								\
1174 	do {									\
1175 		(wait)->private = current;					\
1176 		(wait)->func = autoremove_wake_function;			\
1177 		INIT_LIST_HEAD(&(wait)->entry);					\
1178 		(wait)->flags = 0;						\
1179 	} while (0)
1180 
1181 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1182 
1183 #endif /* _LINUX_WAIT_H */
1184