• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10 
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13 
14 typedef struct wait_queue_entry wait_queue_entry_t;
15 
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18 
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE	0x01
21 #define WQ_FLAG_WOKEN		0x02
22 #define WQ_FLAG_BOOKMARK	0x04
23 #define WQ_FLAG_CUSTOM		0x08
24 #define WQ_FLAG_DONE		0x10
25 #define WQ_FLAG_PRIORITY	0x20
26 
27 /*
28  * A single wait-queue entry structure:
29  */
30 struct wait_queue_entry {
31 	unsigned int		flags;
32 	void			*private;
33 	wait_queue_func_t	func;
34 	struct list_head	entry;
35 };
36 
37 struct wait_queue_head {
38 	spinlock_t		lock;
39 	struct list_head	head;
40 };
41 typedef struct wait_queue_head wait_queue_head_t;
42 
43 struct task_struct;
44 
45 /*
46  * Macros for declaration and initialisaton of the datatypes
47  */
48 
49 #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
50 	.private	= tsk,							\
51 	.func		= default_wake_function,				\
52 	.entry		= { NULL, NULL } }
53 
54 #define DECLARE_WAITQUEUE(name, tsk)						\
55 	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
56 
57 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
58 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
59 	.head		= LIST_HEAD_INIT(name.head) }
60 
61 #define DECLARE_WAIT_QUEUE_HEAD(name) \
62 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
63 
64 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
65 
66 #define init_waitqueue_head(wq_head)						\
67 	do {									\
68 		static struct lock_class_key __key;				\
69 										\
70 		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
71 	} while (0)
72 
73 #ifdef CONFIG_LOCKDEP
74 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
75 	({ init_waitqueue_head(&name); name; })
76 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
77 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
78 #else
79 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
80 #endif
81 
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)82 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
83 {
84 	wq_entry->flags		= 0;
85 	wq_entry->private	= p;
86 	wq_entry->func		= default_wake_function;
87 }
88 
89 static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)90 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
91 {
92 	wq_entry->flags		= 0;
93 	wq_entry->private	= NULL;
94 	wq_entry->func		= func;
95 }
96 
97 /**
98  * waitqueue_active -- locklessly test for waiters on the queue
99  * @wq_head: the waitqueue to test for waiters
100  *
101  * returns true if the wait list is not empty
102  *
103  * NOTE: this function is lockless and requires care, incorrect usage _will_
104  * lead to sporadic and non-obvious failure.
105  *
106  * Use either while holding wait_queue_head::lock or when used for wakeups
107  * with an extra smp_mb() like::
108  *
109  *      CPU0 - waker                    CPU1 - waiter
110  *
111  *                                      for (;;) {
112  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
113  *      smp_mb();                         // smp_mb() from set_current_state()
114  *      if (waitqueue_active(wq_head))         if (@cond)
115  *        wake_up(wq_head);                      break;
116  *                                        schedule();
117  *                                      }
118  *                                      finish_wait(&wq_head, &wait);
119  *
120  * Because without the explicit smp_mb() it's possible for the
121  * waitqueue_active() load to get hoisted over the @cond store such that we'll
122  * observe an empty wait list while the waiter might not observe @cond.
123  *
124  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
125  * which (when the lock is uncontended) are of roughly equal cost.
126  */
waitqueue_active(struct wait_queue_head * wq_head)127 static inline int waitqueue_active(struct wait_queue_head *wq_head)
128 {
129 	return !list_empty(&wq_head->head);
130 }
131 
132 /**
133  * wq_has_single_sleeper - check if there is only one sleeper
134  * @wq_head: wait queue head
135  *
136  * Returns true of wq_head has only one sleeper on the list.
137  *
138  * Please refer to the comment for waitqueue_active.
139  */
wq_has_single_sleeper(struct wait_queue_head * wq_head)140 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141 {
142 	return list_is_singular(&wq_head->head);
143 }
144 
145 /**
146  * wq_has_sleeper - check if there are any waiting processes
147  * @wq_head: wait queue head
148  *
149  * Returns true if wq_head has waiting processes
150  *
151  * Please refer to the comment for waitqueue_active.
152  */
wq_has_sleeper(struct wait_queue_head * wq_head)153 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
154 {
155 	/*
156 	 * We need to be sure we are in sync with the
157 	 * add_wait_queue modifications to the wait queue.
158 	 *
159 	 * This memory barrier should be paired with one on the
160 	 * waiting side.
161 	 */
162 	smp_mb();
163 	return waitqueue_active(wq_head);
164 }
165 
166 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168 extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
169 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
170 
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)171 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
172 {
173 	struct list_head *head = &wq_head->head;
174 	struct wait_queue_entry *wq;
175 
176 	list_for_each_entry(wq, &wq_head->head, entry) {
177 		if (!(wq->flags & WQ_FLAG_PRIORITY))
178 			break;
179 		head = &wq->entry;
180 	}
181 	list_add(&wq_entry->entry, head);
182 }
183 
184 /*
185  * Used for wake-one threads:
186  */
187 static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)188 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189 {
190 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191 	__add_wait_queue(wq_head, wq_entry);
192 }
193 
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)194 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
195 {
196 	list_add_tail(&wq_entry->entry, &wq_head->head);
197 }
198 
199 static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)200 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
201 {
202 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
203 	__add_wait_queue_entry_tail(wq_head, wq_entry);
204 }
205 
206 static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)207 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
208 {
209 	list_del(&wq_entry->entry);
210 }
211 
212 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
213 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
214 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
215 		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
216 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
217 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
218 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
219 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
220 void __wake_up_pollfree(struct wait_queue_head *wq_head);
221 
222 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
223 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
224 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
225 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
226 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
227 
228 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
229 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
230 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
231 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
232 #define wake_up_sync(x)			__wake_up_sync((x), TASK_NORMAL)
233 
234 /*
235  * Wakeup macros to be used to report events to the targets.
236  */
237 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
238 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
239 #define wake_up_poll(x, m)							\
240 	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
241 #define wake_up_locked_poll(x, m)						\
242 	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
243 #define wake_up_interruptible_poll(x, m)					\
244 	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
245 #define wake_up_interruptible_sync_poll(x, m)					\
246 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
247 #define wake_up_interruptible_sync_poll_locked(x, m)				\
248 	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
249 
250 /**
251  * wake_up_pollfree - signal that a polled waitqueue is going away
252  * @wq_head: the wait queue head
253  *
254  * In the very rare cases where a ->poll() implementation uses a waitqueue whose
255  * lifetime is tied to a task rather than to the 'struct file' being polled,
256  * this function must be called before the waitqueue is freed so that
257  * non-blocking polls (e.g. epoll) are notified that the queue is going away.
258  *
259  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
260  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
261  */
wake_up_pollfree(struct wait_queue_head * wq_head)262 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
263 {
264 	/*
265 	 * For performance reasons, we don't always take the queue lock here.
266 	 * Therefore, we might race with someone removing the last entry from
267 	 * the queue, and proceed while they still hold the queue lock.
268 	 * However, rcu_read_lock() is required to be held in such cases, so we
269 	 * can safely proceed with an RCU-delayed free.
270 	 */
271 	if (waitqueue_active(wq_head))
272 		__wake_up_pollfree(wq_head);
273 }
274 
275 #define ___wait_cond_timeout(condition)						\
276 ({										\
277 	bool __cond = (condition);						\
278 	if (__cond && !__ret)							\
279 		__ret = 1;							\
280 	__cond || !__ret;							\
281 })
282 
283 #define ___wait_is_interruptible(state)						\
284 	(!__builtin_constant_p(state) ||					\
285 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
286 
287 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
288 
289 /*
290  * The below macro ___wait_event() has an explicit shadow of the __ret
291  * variable when used from the wait_event_*() macros.
292  *
293  * This is so that both can use the ___wait_cond_timeout() construct
294  * to wrap the condition.
295  *
296  * The type inconsistency of the wait_event_*() __ret variable is also
297  * on purpose; we use long where we can return timeout values and int
298  * otherwise.
299  */
300 
301 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
302 ({										\
303 	__label__ __out;							\
304 	struct wait_queue_entry __wq_entry;					\
305 	long __ret = ret;	/* explicit shadow */				\
306 										\
307 	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
308 	for (;;) {								\
309 		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
310 										\
311 		if (condition)							\
312 			break;							\
313 										\
314 		if (___wait_is_interruptible(state) && __int) {			\
315 			__ret = __int;						\
316 			goto __out;						\
317 		}								\
318 										\
319 		cmd;								\
320 	}									\
321 	finish_wait(&wq_head, &__wq_entry);					\
322 __out:	__ret;									\
323 })
324 
325 #define __wait_event(wq_head, condition)					\
326 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
327 			    schedule())
328 
329 /**
330  * wait_event - sleep until a condition gets true
331  * @wq_head: the waitqueue to wait on
332  * @condition: a C expression for the event to wait for
333  *
334  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
335  * @condition evaluates to true. The @condition is checked each time
336  * the waitqueue @wq_head is woken up.
337  *
338  * wake_up() has to be called after changing any variable that could
339  * change the result of the wait condition.
340  */
341 #define wait_event(wq_head, condition)						\
342 do {										\
343 	might_sleep();								\
344 	if (condition)								\
345 		break;								\
346 	__wait_event(wq_head, condition);					\
347 } while (0)
348 
349 #define __io_wait_event(wq_head, condition)					\
350 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
351 			    io_schedule())
352 
353 /*
354  * io_wait_event() -- like wait_event() but with io_schedule()
355  */
356 #define io_wait_event(wq_head, condition)					\
357 do {										\
358 	might_sleep();								\
359 	if (condition)								\
360 		break;								\
361 	__io_wait_event(wq_head, condition);					\
362 } while (0)
363 
364 #define __wait_event_freezable(wq_head, condition)				\
365 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
366 			    freezable_schedule())
367 
368 /**
369  * wait_event_freezable - sleep (or freeze) until a condition gets true
370  * @wq_head: the waitqueue to wait on
371  * @condition: a C expression for the event to wait for
372  *
373  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
374  * to system load) until the @condition evaluates to true. The
375  * @condition is checked each time the waitqueue @wq_head is woken up.
376  *
377  * wake_up() has to be called after changing any variable that could
378  * change the result of the wait condition.
379  */
380 #define wait_event_freezable(wq_head, condition)				\
381 ({										\
382 	int __ret = 0;								\
383 	might_sleep();								\
384 	if (!(condition))							\
385 		__ret = __wait_event_freezable(wq_head, condition);		\
386 	__ret;									\
387 })
388 
389 #define __wait_event_timeout(wq_head, condition, timeout)			\
390 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
391 		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
392 		      __ret = schedule_timeout(__ret))
393 
394 /**
395  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
396  * @wq_head: the waitqueue to wait on
397  * @condition: a C expression for the event to wait for
398  * @timeout: timeout, in jiffies
399  *
400  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
401  * @condition evaluates to true. The @condition is checked each time
402  * the waitqueue @wq_head is woken up.
403  *
404  * wake_up() has to be called after changing any variable that could
405  * change the result of the wait condition.
406  *
407  * Returns:
408  * 0 if the @condition evaluated to %false after the @timeout elapsed,
409  * 1 if the @condition evaluated to %true after the @timeout elapsed,
410  * or the remaining jiffies (at least 1) if the @condition evaluated
411  * to %true before the @timeout elapsed.
412  */
413 #define wait_event_timeout(wq_head, condition, timeout)				\
414 ({										\
415 	long __ret = timeout;							\
416 	might_sleep();								\
417 	if (!___wait_cond_timeout(condition))					\
418 		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
419 	__ret;									\
420 })
421 
422 #define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
423 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
424 		      TASK_INTERRUPTIBLE, 0, timeout,				\
425 		      __ret = freezable_schedule_timeout(__ret))
426 
427 /*
428  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
429  * increasing load and is freezable.
430  */
431 #define wait_event_freezable_timeout(wq_head, condition, timeout)		\
432 ({										\
433 	long __ret = timeout;							\
434 	might_sleep();								\
435 	if (!___wait_cond_timeout(condition))					\
436 		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
437 	__ret;									\
438 })
439 
440 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
441 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
442 			    cmd1; schedule(); cmd2)
443 /*
444  * Just like wait_event_cmd(), except it sets exclusive flag
445  */
446 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
447 do {										\
448 	if (condition)								\
449 		break;								\
450 	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
451 } while (0)
452 
453 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
454 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
455 			    cmd1; schedule(); cmd2)
456 
457 /**
458  * wait_event_cmd - sleep until a condition gets true
459  * @wq_head: the waitqueue to wait on
460  * @condition: a C expression for the event to wait for
461  * @cmd1: the command will be executed before sleep
462  * @cmd2: the command will be executed after sleep
463  *
464  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
465  * @condition evaluates to true. The @condition is checked each time
466  * the waitqueue @wq_head is woken up.
467  *
468  * wake_up() has to be called after changing any variable that could
469  * change the result of the wait condition.
470  */
471 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
472 do {										\
473 	if (condition)								\
474 		break;								\
475 	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
476 } while (0)
477 
478 #define __wait_event_interruptible(wq_head, condition)				\
479 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
480 		      schedule())
481 
482 /**
483  * wait_event_interruptible - sleep until a condition gets true
484  * @wq_head: the waitqueue to wait on
485  * @condition: a C expression for the event to wait for
486  *
487  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
488  * @condition evaluates to true or a signal is received.
489  * The @condition is checked each time the waitqueue @wq_head is woken up.
490  *
491  * wake_up() has to be called after changing any variable that could
492  * change the result of the wait condition.
493  *
494  * The function will return -ERESTARTSYS if it was interrupted by a
495  * signal and 0 if @condition evaluated to true.
496  */
497 #define wait_event_interruptible(wq_head, condition)				\
498 ({										\
499 	int __ret = 0;								\
500 	might_sleep();								\
501 	if (!(condition))							\
502 		__ret = __wait_event_interruptible(wq_head, condition);		\
503 	__ret;									\
504 })
505 
506 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
507 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
508 		      TASK_INTERRUPTIBLE, 0, timeout,				\
509 		      __ret = schedule_timeout(__ret))
510 
511 /**
512  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
513  * @wq_head: the waitqueue to wait on
514  * @condition: a C expression for the event to wait for
515  * @timeout: timeout, in jiffies
516  *
517  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
518  * @condition evaluates to true or a signal is received.
519  * The @condition is checked each time the waitqueue @wq_head is woken up.
520  *
521  * wake_up() has to be called after changing any variable that could
522  * change the result of the wait condition.
523  *
524  * Returns:
525  * 0 if the @condition evaluated to %false after the @timeout elapsed,
526  * 1 if the @condition evaluated to %true after the @timeout elapsed,
527  * the remaining jiffies (at least 1) if the @condition evaluated
528  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
529  * interrupted by a signal.
530  */
531 #define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
532 ({										\
533 	long __ret = timeout;							\
534 	might_sleep();								\
535 	if (!___wait_cond_timeout(condition))					\
536 		__ret = __wait_event_interruptible_timeout(wq_head,		\
537 						condition, timeout);		\
538 	__ret;									\
539 })
540 
541 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
542 ({										\
543 	int __ret = 0;								\
544 	struct hrtimer_sleeper __t;						\
545 										\
546 	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
547 				      HRTIMER_MODE_REL);			\
548 	if ((timeout) != KTIME_MAX) {						\
549 		hrtimer_set_expires_range_ns(&__t.timer, timeout,		\
550 					current->timer_slack_ns);		\
551 		hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL);		\
552 	}									\
553 										\
554 	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
555 		if (!__t.task) {						\
556 			__ret = -ETIME;						\
557 			break;							\
558 		}								\
559 		schedule());							\
560 										\
561 	hrtimer_cancel(&__t.timer);						\
562 	destroy_hrtimer_on_stack(&__t.timer);					\
563 	__ret;									\
564 })
565 
566 /**
567  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
568  * @wq_head: the waitqueue to wait on
569  * @condition: a C expression for the event to wait for
570  * @timeout: timeout, as a ktime_t
571  *
572  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
573  * @condition evaluates to true or a signal is received.
574  * The @condition is checked each time the waitqueue @wq_head is woken up.
575  *
576  * wake_up() has to be called after changing any variable that could
577  * change the result of the wait condition.
578  *
579  * The function returns 0 if @condition became true, or -ETIME if the timeout
580  * elapsed.
581  */
582 #define wait_event_hrtimeout(wq_head, condition, timeout)			\
583 ({										\
584 	int __ret = 0;								\
585 	might_sleep();								\
586 	if (!(condition))							\
587 		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
588 					       TASK_UNINTERRUPTIBLE);		\
589 	__ret;									\
590 })
591 
592 /**
593  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
594  * @wq: the waitqueue to wait on
595  * @condition: a C expression for the event to wait for
596  * @timeout: timeout, as a ktime_t
597  *
598  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
599  * @condition evaluates to true or a signal is received.
600  * The @condition is checked each time the waitqueue @wq is woken up.
601  *
602  * wake_up() has to be called after changing any variable that could
603  * change the result of the wait condition.
604  *
605  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
606  * interrupted by a signal, or -ETIME if the timeout elapsed.
607  */
608 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
609 ({										\
610 	long __ret = 0;								\
611 	might_sleep();								\
612 	if (!(condition))							\
613 		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
614 					       TASK_INTERRUPTIBLE);		\
615 	__ret;									\
616 })
617 
618 #define __wait_event_interruptible_exclusive(wq, condition)			\
619 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
620 		      schedule())
621 
622 #define wait_event_interruptible_exclusive(wq, condition)			\
623 ({										\
624 	int __ret = 0;								\
625 	might_sleep();								\
626 	if (!(condition))							\
627 		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
628 	__ret;									\
629 })
630 
631 #define __wait_event_killable_exclusive(wq, condition)				\
632 	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
633 		      schedule())
634 
635 #define wait_event_killable_exclusive(wq, condition)				\
636 ({										\
637 	int __ret = 0;								\
638 	might_sleep();								\
639 	if (!(condition))							\
640 		__ret = __wait_event_killable_exclusive(wq, condition);		\
641 	__ret;									\
642 })
643 
644 
645 #define __wait_event_freezable_exclusive(wq, condition)				\
646 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
647 			freezable_schedule())
648 
649 #define wait_event_freezable_exclusive(wq, condition)				\
650 ({										\
651 	int __ret = 0;								\
652 	might_sleep();								\
653 	if (!(condition))							\
654 		__ret = __wait_event_freezable_exclusive(wq, condition);	\
655 	__ret;									\
656 })
657 
658 /**
659  * wait_event_idle - wait for a condition without contributing to system load
660  * @wq_head: the waitqueue to wait on
661  * @condition: a C expression for the event to wait for
662  *
663  * The process is put to sleep (TASK_IDLE) until the
664  * @condition evaluates to true.
665  * The @condition is checked each time the waitqueue @wq_head is woken up.
666  *
667  * wake_up() has to be called after changing any variable that could
668  * change the result of the wait condition.
669  *
670  */
671 #define wait_event_idle(wq_head, condition)					\
672 do {										\
673 	might_sleep();								\
674 	if (!(condition))							\
675 		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
676 } while (0)
677 
678 /**
679  * wait_event_idle_exclusive - wait for a condition with contributing to system load
680  * @wq_head: the waitqueue to wait on
681  * @condition: a C expression for the event to wait for
682  *
683  * The process is put to sleep (TASK_IDLE) until the
684  * @condition evaluates to true.
685  * The @condition is checked each time the waitqueue @wq_head is woken up.
686  *
687  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
688  * set thus if other processes wait on the same list, when this
689  * process is woken further processes are not considered.
690  *
691  * wake_up() has to be called after changing any variable that could
692  * change the result of the wait condition.
693  *
694  */
695 #define wait_event_idle_exclusive(wq_head, condition)				\
696 do {										\
697 	might_sleep();								\
698 	if (!(condition))							\
699 		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
700 } while (0)
701 
702 #define __wait_event_idle_timeout(wq_head, condition, timeout)			\
703 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
704 		      TASK_IDLE, 0, timeout,					\
705 		      __ret = schedule_timeout(__ret))
706 
707 /**
708  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
709  * @wq_head: the waitqueue to wait on
710  * @condition: a C expression for the event to wait for
711  * @timeout: timeout, in jiffies
712  *
713  * The process is put to sleep (TASK_IDLE) until the
714  * @condition evaluates to true. The @condition is checked each time
715  * the waitqueue @wq_head is woken up.
716  *
717  * wake_up() has to be called after changing any variable that could
718  * change the result of the wait condition.
719  *
720  * Returns:
721  * 0 if the @condition evaluated to %false after the @timeout elapsed,
722  * 1 if the @condition evaluated to %true after the @timeout elapsed,
723  * or the remaining jiffies (at least 1) if the @condition evaluated
724  * to %true before the @timeout elapsed.
725  */
726 #define wait_event_idle_timeout(wq_head, condition, timeout)			\
727 ({										\
728 	long __ret = timeout;							\
729 	might_sleep();								\
730 	if (!___wait_cond_timeout(condition))					\
731 		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
732 	__ret;									\
733 })
734 
735 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
736 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
737 		      TASK_IDLE, 1, timeout,					\
738 		      __ret = schedule_timeout(__ret))
739 
740 /**
741  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
742  * @wq_head: the waitqueue to wait on
743  * @condition: a C expression for the event to wait for
744  * @timeout: timeout, in jiffies
745  *
746  * The process is put to sleep (TASK_IDLE) until the
747  * @condition evaluates to true. The @condition is checked each time
748  * the waitqueue @wq_head is woken up.
749  *
750  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
751  * set thus if other processes wait on the same list, when this
752  * process is woken further processes are not considered.
753  *
754  * wake_up() has to be called after changing any variable that could
755  * change the result of the wait condition.
756  *
757  * Returns:
758  * 0 if the @condition evaluated to %false after the @timeout elapsed,
759  * 1 if the @condition evaluated to %true after the @timeout elapsed,
760  * or the remaining jiffies (at least 1) if the @condition evaluated
761  * to %true before the @timeout elapsed.
762  */
763 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
764 ({										\
765 	long __ret = timeout;							\
766 	might_sleep();								\
767 	if (!___wait_cond_timeout(condition))					\
768 		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
769 	__ret;									\
770 })
771 
772 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
773 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
774 
775 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
776 ({										\
777 	int __ret;								\
778 	DEFINE_WAIT(__wait);							\
779 	if (exclusive)								\
780 		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
781 	do {									\
782 		__ret = fn(&(wq), &__wait);					\
783 		if (__ret)							\
784 			break;							\
785 	} while (!(condition));							\
786 	__remove_wait_queue(&(wq), &__wait);					\
787 	__set_current_state(TASK_RUNNING);					\
788 	__ret;									\
789 })
790 
791 
792 /**
793  * wait_event_interruptible_locked - sleep until a condition gets true
794  * @wq: the waitqueue to wait on
795  * @condition: a C expression for the event to wait for
796  *
797  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
798  * @condition evaluates to true or a signal is received.
799  * The @condition is checked each time the waitqueue @wq is woken up.
800  *
801  * It must be called with wq.lock being held.  This spinlock is
802  * unlocked while sleeping but @condition testing is done while lock
803  * is held and when this macro exits the lock is held.
804  *
805  * The lock is locked/unlocked using spin_lock()/spin_unlock()
806  * functions which must match the way they are locked/unlocked outside
807  * of this macro.
808  *
809  * wake_up_locked() has to be called after changing any variable that could
810  * change the result of the wait condition.
811  *
812  * The function will return -ERESTARTSYS if it was interrupted by a
813  * signal and 0 if @condition evaluated to true.
814  */
815 #define wait_event_interruptible_locked(wq, condition)				\
816 	((condition)								\
817 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
818 
819 /**
820  * wait_event_interruptible_locked_irq - sleep until a condition gets true
821  * @wq: the waitqueue to wait on
822  * @condition: a C expression for the event to wait for
823  *
824  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
825  * @condition evaluates to true or a signal is received.
826  * The @condition is checked each time the waitqueue @wq is woken up.
827  *
828  * It must be called with wq.lock being held.  This spinlock is
829  * unlocked while sleeping but @condition testing is done while lock
830  * is held and when this macro exits the lock is held.
831  *
832  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
833  * functions which must match the way they are locked/unlocked outside
834  * of this macro.
835  *
836  * wake_up_locked() has to be called after changing any variable that could
837  * change the result of the wait condition.
838  *
839  * The function will return -ERESTARTSYS if it was interrupted by a
840  * signal and 0 if @condition evaluated to true.
841  */
842 #define wait_event_interruptible_locked_irq(wq, condition)			\
843 	((condition)								\
844 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
845 
846 /**
847  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
848  * @wq: the waitqueue to wait on
849  * @condition: a C expression for the event to wait for
850  *
851  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
852  * @condition evaluates to true or a signal is received.
853  * The @condition is checked each time the waitqueue @wq is woken up.
854  *
855  * It must be called with wq.lock being held.  This spinlock is
856  * unlocked while sleeping but @condition testing is done while lock
857  * is held and when this macro exits the lock is held.
858  *
859  * The lock is locked/unlocked using spin_lock()/spin_unlock()
860  * functions which must match the way they are locked/unlocked outside
861  * of this macro.
862  *
863  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
864  * set thus when other process waits process on the list if this
865  * process is awaken further processes are not considered.
866  *
867  * wake_up_locked() has to be called after changing any variable that could
868  * change the result of the wait condition.
869  *
870  * The function will return -ERESTARTSYS if it was interrupted by a
871  * signal and 0 if @condition evaluated to true.
872  */
873 #define wait_event_interruptible_exclusive_locked(wq, condition)		\
874 	((condition)								\
875 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
876 
877 /**
878  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
879  * @wq: the waitqueue to wait on
880  * @condition: a C expression for the event to wait for
881  *
882  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
883  * @condition evaluates to true or a signal is received.
884  * The @condition is checked each time the waitqueue @wq is woken up.
885  *
886  * It must be called with wq.lock being held.  This spinlock is
887  * unlocked while sleeping but @condition testing is done while lock
888  * is held and when this macro exits the lock is held.
889  *
890  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
891  * functions which must match the way they are locked/unlocked outside
892  * of this macro.
893  *
894  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
895  * set thus when other process waits process on the list if this
896  * process is awaken further processes are not considered.
897  *
898  * wake_up_locked() has to be called after changing any variable that could
899  * change the result of the wait condition.
900  *
901  * The function will return -ERESTARTSYS if it was interrupted by a
902  * signal and 0 if @condition evaluated to true.
903  */
904 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
905 	((condition)								\
906 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
907 
908 
909 #define __wait_event_killable(wq, condition)					\
910 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
911 
912 /**
913  * wait_event_killable - sleep until a condition gets true
914  * @wq_head: the waitqueue to wait on
915  * @condition: a C expression for the event to wait for
916  *
917  * The process is put to sleep (TASK_KILLABLE) until the
918  * @condition evaluates to true or a signal is received.
919  * The @condition is checked each time the waitqueue @wq_head is woken up.
920  *
921  * wake_up() has to be called after changing any variable that could
922  * change the result of the wait condition.
923  *
924  * The function will return -ERESTARTSYS if it was interrupted by a
925  * signal and 0 if @condition evaluated to true.
926  */
927 #define wait_event_killable(wq_head, condition)					\
928 ({										\
929 	int __ret = 0;								\
930 	might_sleep();								\
931 	if (!(condition))							\
932 		__ret = __wait_event_killable(wq_head, condition);		\
933 	__ret;									\
934 })
935 
936 #define __wait_event_killable_timeout(wq_head, condition, timeout)		\
937 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
938 		      TASK_KILLABLE, 0, timeout,				\
939 		      __ret = schedule_timeout(__ret))
940 
941 /**
942  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
943  * @wq_head: the waitqueue to wait on
944  * @condition: a C expression for the event to wait for
945  * @timeout: timeout, in jiffies
946  *
947  * The process is put to sleep (TASK_KILLABLE) until the
948  * @condition evaluates to true or a kill signal is received.
949  * The @condition is checked each time the waitqueue @wq_head is woken up.
950  *
951  * wake_up() has to be called after changing any variable that could
952  * change the result of the wait condition.
953  *
954  * Returns:
955  * 0 if the @condition evaluated to %false after the @timeout elapsed,
956  * 1 if the @condition evaluated to %true after the @timeout elapsed,
957  * the remaining jiffies (at least 1) if the @condition evaluated
958  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
959  * interrupted by a kill signal.
960  *
961  * Only kill signals interrupt this process.
962  */
963 #define wait_event_killable_timeout(wq_head, condition, timeout)		\
964 ({										\
965 	long __ret = timeout;							\
966 	might_sleep();								\
967 	if (!___wait_cond_timeout(condition))					\
968 		__ret = __wait_event_killable_timeout(wq_head,			\
969 						condition, timeout);		\
970 	__ret;									\
971 })
972 
973 
974 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
975 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
976 			    spin_unlock_irq(&lock);				\
977 			    cmd;						\
978 			    schedule();						\
979 			    spin_lock_irq(&lock))
980 
981 /**
982  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
983  *			     condition is checked under the lock. This
984  *			     is expected to be called with the lock
985  *			     taken.
986  * @wq_head: the waitqueue to wait on
987  * @condition: a C expression for the event to wait for
988  * @lock: a locked spinlock_t, which will be released before cmd
989  *	  and schedule() and reacquired afterwards.
990  * @cmd: a command which is invoked outside the critical section before
991  *	 sleep
992  *
993  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
994  * @condition evaluates to true. The @condition is checked each time
995  * the waitqueue @wq_head is woken up.
996  *
997  * wake_up() has to be called after changing any variable that could
998  * change the result of the wait condition.
999  *
1000  * This is supposed to be called while holding the lock. The lock is
1001  * dropped before invoking the cmd and going to sleep and is reacquired
1002  * afterwards.
1003  */
1004 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
1005 do {										\
1006 	if (condition)								\
1007 		break;								\
1008 	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
1009 } while (0)
1010 
1011 /**
1012  * wait_event_lock_irq - sleep until a condition gets true. The
1013  *			 condition is checked under the lock. This
1014  *			 is expected to be called with the lock
1015  *			 taken.
1016  * @wq_head: the waitqueue to wait on
1017  * @condition: a C expression for the event to wait for
1018  * @lock: a locked spinlock_t, which will be released before schedule()
1019  *	  and reacquired afterwards.
1020  *
1021  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1022  * @condition evaluates to true. The @condition is checked each time
1023  * the waitqueue @wq_head is woken up.
1024  *
1025  * wake_up() has to be called after changing any variable that could
1026  * change the result of the wait condition.
1027  *
1028  * This is supposed to be called while holding the lock. The lock is
1029  * dropped before going to sleep and is reacquired afterwards.
1030  */
1031 #define wait_event_lock_irq(wq_head, condition, lock)				\
1032 do {										\
1033 	if (condition)								\
1034 		break;								\
1035 	__wait_event_lock_irq(wq_head, condition, lock, );			\
1036 } while (0)
1037 
1038 
1039 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1040 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1041 		      spin_unlock_irq(&lock);					\
1042 		      cmd;							\
1043 		      schedule();						\
1044 		      spin_lock_irq(&lock))
1045 
1046 /**
1047  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1048  *		The condition is checked under the lock. This is expected to
1049  *		be called with the lock taken.
1050  * @wq_head: the waitqueue to wait on
1051  * @condition: a C expression for the event to wait for
1052  * @lock: a locked spinlock_t, which will be released before cmd and
1053  *	  schedule() and reacquired afterwards.
1054  * @cmd: a command which is invoked outside the critical section before
1055  *	 sleep
1056  *
1057  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1058  * @condition evaluates to true or a signal is received. The @condition is
1059  * checked each time the waitqueue @wq_head is woken up.
1060  *
1061  * wake_up() has to be called after changing any variable that could
1062  * change the result of the wait condition.
1063  *
1064  * This is supposed to be called while holding the lock. The lock is
1065  * dropped before invoking the cmd and going to sleep and is reacquired
1066  * afterwards.
1067  *
1068  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1069  * and 0 if @condition evaluated to true.
1070  */
1071 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1072 ({										\
1073 	int __ret = 0;								\
1074 	if (!(condition))							\
1075 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1076 						condition, lock, cmd);		\
1077 	__ret;									\
1078 })
1079 
1080 /**
1081  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1082  *		The condition is checked under the lock. This is expected
1083  *		to be called with the lock taken.
1084  * @wq_head: the waitqueue to wait on
1085  * @condition: a C expression for the event to wait for
1086  * @lock: a locked spinlock_t, which will be released before schedule()
1087  *	  and reacquired afterwards.
1088  *
1089  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1090  * @condition evaluates to true or signal is received. The @condition is
1091  * checked each time the waitqueue @wq_head is woken up.
1092  *
1093  * wake_up() has to be called after changing any variable that could
1094  * change the result of the wait condition.
1095  *
1096  * This is supposed to be called while holding the lock. The lock is
1097  * dropped before going to sleep and is reacquired afterwards.
1098  *
1099  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1100  * and 0 if @condition evaluated to true.
1101  */
1102 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1103 ({										\
1104 	int __ret = 0;								\
1105 	if (!(condition))							\
1106 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1107 						condition, lock,);		\
1108 	__ret;									\
1109 })
1110 
1111 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1112 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1113 		      state, 0, timeout,					\
1114 		      spin_unlock_irq(&lock);					\
1115 		      __ret = schedule_timeout(__ret);				\
1116 		      spin_lock_irq(&lock));
1117 
1118 /**
1119  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1120  *		true or a timeout elapses. The condition is checked under
1121  *		the lock. This is expected to be called with the lock taken.
1122  * @wq_head: the waitqueue to wait on
1123  * @condition: a C expression for the event to wait for
1124  * @lock: a locked spinlock_t, which will be released before schedule()
1125  *	  and reacquired afterwards.
1126  * @timeout: timeout, in jiffies
1127  *
1128  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1129  * @condition evaluates to true or signal is received. The @condition is
1130  * checked each time the waitqueue @wq_head is woken up.
1131  *
1132  * wake_up() has to be called after changing any variable that could
1133  * change the result of the wait condition.
1134  *
1135  * This is supposed to be called while holding the lock. The lock is
1136  * dropped before going to sleep and is reacquired afterwards.
1137  *
1138  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1139  * was interrupted by a signal, and the remaining jiffies otherwise
1140  * if the condition evaluated to true before the timeout elapsed.
1141  */
1142 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1143 						  timeout)			\
1144 ({										\
1145 	long __ret = timeout;							\
1146 	if (!___wait_cond_timeout(condition))					\
1147 		__ret = __wait_event_lock_irq_timeout(				\
1148 					wq_head, condition, lock, timeout,	\
1149 					TASK_INTERRUPTIBLE);			\
1150 	__ret;									\
1151 })
1152 
1153 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1154 ({										\
1155 	long __ret = timeout;							\
1156 	if (!___wait_cond_timeout(condition))					\
1157 		__ret = __wait_event_lock_irq_timeout(				\
1158 					wq_head, condition, lock, timeout,	\
1159 					TASK_UNINTERRUPTIBLE);			\
1160 	__ret;									\
1161 })
1162 
1163 /*
1164  * Waitqueues which are removed from the waitqueue_head at wakeup time
1165  */
1166 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1167 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1168 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1169 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1170 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1171 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1172 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1173 
1174 #define DEFINE_WAIT_FUNC(name, function)					\
1175 	struct wait_queue_entry name = {					\
1176 		.private	= current,					\
1177 		.func		= function,					\
1178 		.entry		= LIST_HEAD_INIT((name).entry),			\
1179 	}
1180 
1181 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1182 
1183 #define init_wait(wait)								\
1184 	do {									\
1185 		(wait)->private = current;					\
1186 		(wait)->func = autoremove_wake_function;			\
1187 		INIT_LIST_HEAD(&(wait)->entry);					\
1188 		(wait)->flags = 0;						\
1189 	} while (0)
1190 
1191 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1192 
1193 #endif /* _LINUX_WAIT_H */
1194