1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5 * Linux wait queue related types and methods
6 */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13
14 typedef struct wait_queue_entry wait_queue_entry_t;
15
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE 0x01
21 #define WQ_FLAG_WOKEN 0x02
22 #define WQ_FLAG_BOOKMARK 0x04
23 #define WQ_FLAG_CUSTOM 0x08
24 #define WQ_FLAG_DONE 0x10
25
26 /*
27 * A single wait-queue entry structure:
28 */
29 struct wait_queue_entry {
30 unsigned int flags;
31 void *private;
32 wait_queue_func_t func;
33 struct list_head entry;
34 };
35
36 struct wait_queue_head {
37 spinlock_t lock;
38 struct list_head head;
39 };
40 typedef struct wait_queue_head wait_queue_head_t;
41
42 struct task_struct;
43
44 /*
45 * Macros for declaration and initialisaton of the datatypes
46 */
47
48 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
49 .private = tsk, \
50 .func = default_wake_function, \
51 .entry = { NULL, NULL } }
52
53 #define DECLARE_WAITQUEUE(name, tsk) \
54 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
57 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
58 .head = { &(name).head, &(name).head } }
59
60 #define DECLARE_WAIT_QUEUE_HEAD(name) \
61 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
64
65 #define init_waitqueue_head(wq_head) \
66 do { \
67 static struct lock_class_key __key; \
68 \
69 __init_waitqueue_head((wq_head), #wq_head, &__key); \
70 } while (0)
71
72 #ifdef CONFIG_LOCKDEP
73 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 ({ init_waitqueue_head(&name); name; })
75 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77 #else
78 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79 #endif
80
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)81 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
82 {
83 wq_entry->flags = 0;
84 wq_entry->private = p;
85 wq_entry->func = default_wake_function;
86 }
87
88 static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)89 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
90 {
91 wq_entry->flags = 0;
92 wq_entry->private = NULL;
93 wq_entry->func = func;
94 }
95
96 /**
97 * waitqueue_active -- locklessly test for waiters on the queue
98 * @wq_head: the waitqueue to test for waiters
99 *
100 * returns true if the wait list is not empty
101 *
102 * NOTE: this function is lockless and requires care, incorrect usage _will_
103 * lead to sporadic and non-obvious failure.
104 *
105 * Use either while holding wait_queue_head::lock or when used for wakeups
106 * with an extra smp_mb() like::
107 *
108 * CPU0 - waker CPU1 - waiter
109 *
110 * for (;;) {
111 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
112 * smp_mb(); // smp_mb() from set_current_state()
113 * if (waitqueue_active(wq_head)) if (@cond)
114 * wake_up(wq_head); break;
115 * schedule();
116 * }
117 * finish_wait(&wq_head, &wait);
118 *
119 * Because without the explicit smp_mb() it's possible for the
120 * waitqueue_active() load to get hoisted over the @cond store such that we'll
121 * observe an empty wait list while the waiter might not observe @cond.
122 *
123 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
124 * which (when the lock is uncontended) are of roughly equal cost.
125 */
waitqueue_active(struct wait_queue_head * wq_head)126 static inline int waitqueue_active(struct wait_queue_head *wq_head)
127 {
128 return !list_empty(&wq_head->head);
129 }
130
131 /**
132 * wq_has_single_sleeper - check if there is only one sleeper
133 * @wq_head: wait queue head
134 *
135 * Returns true of wq_head has only one sleeper on the list.
136 *
137 * Please refer to the comment for waitqueue_active.
138 */
wq_has_single_sleeper(struct wait_queue_head * wq_head)139 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
140 {
141 return list_is_singular(&wq_head->head);
142 }
143
144 /**
145 * wq_has_sleeper - check if there are any waiting processes
146 * @wq_head: wait queue head
147 *
148 * Returns true if wq_head has waiting processes
149 *
150 * Please refer to the comment for waitqueue_active.
151 */
wq_has_sleeper(struct wait_queue_head * wq_head)152 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
153 {
154 /*
155 * We need to be sure we are in sync with the
156 * add_wait_queue modifications to the wait queue.
157 *
158 * This memory barrier should be paired with one on the
159 * waiting side.
160 */
161 smp_mb();
162 return waitqueue_active(wq_head);
163 }
164
165 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)169 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170 {
171 list_add(&wq_entry->entry, &wq_head->head);
172 }
173
174 /*
175 * Used for wake-one threads:
176 */
177 static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)178 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
179 {
180 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
181 __add_wait_queue(wq_head, wq_entry);
182 }
183
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)184 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
185 {
186 list_add_tail(&wq_entry->entry, &wq_head->head);
187 }
188
189 static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)190 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
191 {
192 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
193 __add_wait_queue_entry_tail(wq_head, wq_entry);
194 }
195
196 static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)197 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
198 {
199 list_del(&wq_entry->entry);
200 }
201
202 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
203 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
204 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
205 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
206 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
207 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
208 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209 void __wake_up_pollfree(struct wait_queue_head *wq_head);
210
211 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
212 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
213 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
214 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
215 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
216
217 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
218 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
219 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
220 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
221
222 /*
223 * Wakeup macros to be used to report events to the targets.
224 */
225 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
226 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
227 #define wake_up_poll(x, m) \
228 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
229 #define wake_up_locked_poll(x, m) \
230 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
231 #define wake_up_interruptible_poll(x, m) \
232 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
233 #define wake_up_interruptible_sync_poll(x, m) \
234 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
235
236 /**
237 * wake_up_pollfree - signal that a polled waitqueue is going away
238 * @wq_head: the wait queue head
239 *
240 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
241 * lifetime is tied to a task rather than to the 'struct file' being polled,
242 * this function must be called before the waitqueue is freed so that
243 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
244 *
245 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
246 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
247 */
wake_up_pollfree(struct wait_queue_head * wq_head)248 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
249 {
250 /*
251 * For performance reasons, we don't always take the queue lock here.
252 * Therefore, we might race with someone removing the last entry from
253 * the queue, and proceed while they still hold the queue lock.
254 * However, rcu_read_lock() is required to be held in such cases, so we
255 * can safely proceed with an RCU-delayed free.
256 */
257 if (waitqueue_active(wq_head))
258 __wake_up_pollfree(wq_head);
259 }
260
261 #define ___wait_cond_timeout(condition) \
262 ({ \
263 bool __cond = (condition); \
264 if (__cond && !__ret) \
265 __ret = 1; \
266 __cond || !__ret; \
267 })
268
269 #define ___wait_is_interruptible(state) \
270 (!__builtin_constant_p(state) || \
271 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
272
273 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
274
275 /*
276 * The below macro ___wait_event() has an explicit shadow of the __ret
277 * variable when used from the wait_event_*() macros.
278 *
279 * This is so that both can use the ___wait_cond_timeout() construct
280 * to wrap the condition.
281 *
282 * The type inconsistency of the wait_event_*() __ret variable is also
283 * on purpose; we use long where we can return timeout values and int
284 * otherwise.
285 */
286
287 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
288 ({ \
289 __label__ __out; \
290 struct wait_queue_entry __wq_entry; \
291 long __ret = ret; /* explicit shadow */ \
292 \
293 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
294 for (;;) { \
295 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
296 \
297 if (condition) \
298 break; \
299 \
300 if (___wait_is_interruptible(state) && __int) { \
301 __ret = __int; \
302 goto __out; \
303 } \
304 \
305 cmd; \
306 } \
307 finish_wait(&wq_head, &__wq_entry); \
308 __out: __ret; \
309 })
310
311 #define __wait_event(wq_head, condition) \
312 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
313 schedule())
314
315 /**
316 * wait_event - sleep until a condition gets true
317 * @wq_head: the waitqueue to wait on
318 * @condition: a C expression for the event to wait for
319 *
320 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
321 * @condition evaluates to true. The @condition is checked each time
322 * the waitqueue @wq_head is woken up.
323 *
324 * wake_up() has to be called after changing any variable that could
325 * change the result of the wait condition.
326 */
327 #define wait_event(wq_head, condition) \
328 do { \
329 might_sleep(); \
330 if (condition) \
331 break; \
332 __wait_event(wq_head, condition); \
333 } while (0)
334
335 #define __io_wait_event(wq_head, condition) \
336 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
337 io_schedule())
338
339 /*
340 * io_wait_event() -- like wait_event() but with io_schedule()
341 */
342 #define io_wait_event(wq_head, condition) \
343 do { \
344 might_sleep(); \
345 if (condition) \
346 break; \
347 __io_wait_event(wq_head, condition); \
348 } while (0)
349
350 #define __wait_event_freezable(wq_head, condition) \
351 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
352 freezable_schedule())
353
354 /**
355 * wait_event_freezable - sleep (or freeze) until a condition gets true
356 * @wq_head: the waitqueue to wait on
357 * @condition: a C expression for the event to wait for
358 *
359 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
360 * to system load) until the @condition evaluates to true. The
361 * @condition is checked each time the waitqueue @wq_head is woken up.
362 *
363 * wake_up() has to be called after changing any variable that could
364 * change the result of the wait condition.
365 */
366 #define wait_event_freezable(wq_head, condition) \
367 ({ \
368 int __ret = 0; \
369 might_sleep(); \
370 if (!(condition)) \
371 __ret = __wait_event_freezable(wq_head, condition); \
372 __ret; \
373 })
374
375 #define __wait_event_timeout(wq_head, condition, timeout) \
376 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
377 TASK_UNINTERRUPTIBLE, 0, timeout, \
378 __ret = schedule_timeout(__ret))
379
380 /**
381 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
382 * @wq_head: the waitqueue to wait on
383 * @condition: a C expression for the event to wait for
384 * @timeout: timeout, in jiffies
385 *
386 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
387 * @condition evaluates to true. The @condition is checked each time
388 * the waitqueue @wq_head is woken up.
389 *
390 * wake_up() has to be called after changing any variable that could
391 * change the result of the wait condition.
392 *
393 * Returns:
394 * 0 if the @condition evaluated to %false after the @timeout elapsed,
395 * 1 if the @condition evaluated to %true after the @timeout elapsed,
396 * or the remaining jiffies (at least 1) if the @condition evaluated
397 * to %true before the @timeout elapsed.
398 */
399 #define wait_event_timeout(wq_head, condition, timeout) \
400 ({ \
401 long __ret = timeout; \
402 might_sleep(); \
403 if (!___wait_cond_timeout(condition)) \
404 __ret = __wait_event_timeout(wq_head, condition, timeout); \
405 __ret; \
406 })
407
408 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
409 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
410 TASK_INTERRUPTIBLE, 0, timeout, \
411 __ret = freezable_schedule_timeout(__ret))
412
413 /*
414 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
415 * increasing load and is freezable.
416 */
417 #define wait_event_freezable_timeout(wq_head, condition, timeout) \
418 ({ \
419 long __ret = timeout; \
420 might_sleep(); \
421 if (!___wait_cond_timeout(condition)) \
422 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
423 __ret; \
424 })
425
426 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
427 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
428 cmd1; schedule(); cmd2)
429 /*
430 * Just like wait_event_cmd(), except it sets exclusive flag
431 */
432 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
433 do { \
434 if (condition) \
435 break; \
436 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
437 } while (0)
438
439 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
440 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
441 cmd1; schedule(); cmd2)
442
443 /**
444 * wait_event_cmd - sleep until a condition gets true
445 * @wq_head: the waitqueue to wait on
446 * @condition: a C expression for the event to wait for
447 * @cmd1: the command will be executed before sleep
448 * @cmd2: the command will be executed after sleep
449 *
450 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
451 * @condition evaluates to true. The @condition is checked each time
452 * the waitqueue @wq_head is woken up.
453 *
454 * wake_up() has to be called after changing any variable that could
455 * change the result of the wait condition.
456 */
457 #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
458 do { \
459 if (condition) \
460 break; \
461 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
462 } while (0)
463
464 #define __wait_event_interruptible(wq_head, condition) \
465 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
466 schedule())
467
468 /**
469 * wait_event_interruptible - sleep until a condition gets true
470 * @wq_head: the waitqueue to wait on
471 * @condition: a C expression for the event to wait for
472 *
473 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
474 * @condition evaluates to true or a signal is received.
475 * The @condition is checked each time the waitqueue @wq_head is woken up.
476 *
477 * wake_up() has to be called after changing any variable that could
478 * change the result of the wait condition.
479 *
480 * The function will return -ERESTARTSYS if it was interrupted by a
481 * signal and 0 if @condition evaluated to true.
482 */
483 #define wait_event_interruptible(wq_head, condition) \
484 ({ \
485 int __ret = 0; \
486 might_sleep(); \
487 if (!(condition)) \
488 __ret = __wait_event_interruptible(wq_head, condition); \
489 __ret; \
490 })
491
492 #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
493 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
494 TASK_INTERRUPTIBLE, 0, timeout, \
495 __ret = schedule_timeout(__ret))
496
497 /**
498 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
499 * @wq_head: the waitqueue to wait on
500 * @condition: a C expression for the event to wait for
501 * @timeout: timeout, in jiffies
502 *
503 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
504 * @condition evaluates to true or a signal is received.
505 * The @condition is checked each time the waitqueue @wq_head is woken up.
506 *
507 * wake_up() has to be called after changing any variable that could
508 * change the result of the wait condition.
509 *
510 * Returns:
511 * 0 if the @condition evaluated to %false after the @timeout elapsed,
512 * 1 if the @condition evaluated to %true after the @timeout elapsed,
513 * the remaining jiffies (at least 1) if the @condition evaluated
514 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
515 * interrupted by a signal.
516 */
517 #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
518 ({ \
519 long __ret = timeout; \
520 might_sleep(); \
521 if (!___wait_cond_timeout(condition)) \
522 __ret = __wait_event_interruptible_timeout(wq_head, \
523 condition, timeout); \
524 __ret; \
525 })
526
527 #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
528 ({ \
529 int __ret = 0; \
530 struct hrtimer_sleeper __t; \
531 \
532 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
533 HRTIMER_MODE_REL); \
534 if ((timeout) != KTIME_MAX) { \
535 hrtimer_set_expires_range_ns(&__t.timer, timeout, \
536 current->timer_slack_ns); \
537 hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
538 } \
539 \
540 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
541 if (!__t.task) { \
542 __ret = -ETIME; \
543 break; \
544 } \
545 schedule()); \
546 \
547 hrtimer_cancel(&__t.timer); \
548 destroy_hrtimer_on_stack(&__t.timer); \
549 __ret; \
550 })
551
552 /**
553 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
554 * @wq_head: the waitqueue to wait on
555 * @condition: a C expression for the event to wait for
556 * @timeout: timeout, as a ktime_t
557 *
558 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
559 * @condition evaluates to true or a signal is received.
560 * The @condition is checked each time the waitqueue @wq_head is woken up.
561 *
562 * wake_up() has to be called after changing any variable that could
563 * change the result of the wait condition.
564 *
565 * The function returns 0 if @condition became true, or -ETIME if the timeout
566 * elapsed.
567 */
568 #define wait_event_hrtimeout(wq_head, condition, timeout) \
569 ({ \
570 int __ret = 0; \
571 might_sleep(); \
572 if (!(condition)) \
573 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
574 TASK_UNINTERRUPTIBLE); \
575 __ret; \
576 })
577
578 /**
579 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
580 * @wq: the waitqueue to wait on
581 * @condition: a C expression for the event to wait for
582 * @timeout: timeout, as a ktime_t
583 *
584 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
585 * @condition evaluates to true or a signal is received.
586 * The @condition is checked each time the waitqueue @wq is woken up.
587 *
588 * wake_up() has to be called after changing any variable that could
589 * change the result of the wait condition.
590 *
591 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
592 * interrupted by a signal, or -ETIME if the timeout elapsed.
593 */
594 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
595 ({ \
596 long __ret = 0; \
597 might_sleep(); \
598 if (!(condition)) \
599 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
600 TASK_INTERRUPTIBLE); \
601 __ret; \
602 })
603
604 #define __wait_event_interruptible_exclusive(wq, condition) \
605 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
606 schedule())
607
608 #define wait_event_interruptible_exclusive(wq, condition) \
609 ({ \
610 int __ret = 0; \
611 might_sleep(); \
612 if (!(condition)) \
613 __ret = __wait_event_interruptible_exclusive(wq, condition); \
614 __ret; \
615 })
616
617 #define __wait_event_killable_exclusive(wq, condition) \
618 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
619 schedule())
620
621 #define wait_event_killable_exclusive(wq, condition) \
622 ({ \
623 int __ret = 0; \
624 might_sleep(); \
625 if (!(condition)) \
626 __ret = __wait_event_killable_exclusive(wq, condition); \
627 __ret; \
628 })
629
630
631 #define __wait_event_freezable_exclusive(wq, condition) \
632 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
633 freezable_schedule())
634
635 #define wait_event_freezable_exclusive(wq, condition) \
636 ({ \
637 int __ret = 0; \
638 might_sleep(); \
639 if (!(condition)) \
640 __ret = __wait_event_freezable_exclusive(wq, condition); \
641 __ret; \
642 })
643
644 /**
645 * wait_event_idle - wait for a condition without contributing to system load
646 * @wq_head: the waitqueue to wait on
647 * @condition: a C expression for the event to wait for
648 *
649 * The process is put to sleep (TASK_IDLE) until the
650 * @condition evaluates to true.
651 * The @condition is checked each time the waitqueue @wq_head is woken up.
652 *
653 * wake_up() has to be called after changing any variable that could
654 * change the result of the wait condition.
655 *
656 */
657 #define wait_event_idle(wq_head, condition) \
658 do { \
659 might_sleep(); \
660 if (!(condition)) \
661 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
662 } while (0)
663
664 /**
665 * wait_event_idle_exclusive - wait for a condition with contributing to system load
666 * @wq_head: the waitqueue to wait on
667 * @condition: a C expression for the event to wait for
668 *
669 * The process is put to sleep (TASK_IDLE) until the
670 * @condition evaluates to true.
671 * The @condition is checked each time the waitqueue @wq_head is woken up.
672 *
673 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
674 * set thus if other processes wait on the same list, when this
675 * process is woken further processes are not considered.
676 *
677 * wake_up() has to be called after changing any variable that could
678 * change the result of the wait condition.
679 *
680 */
681 #define wait_event_idle_exclusive(wq_head, condition) \
682 do { \
683 might_sleep(); \
684 if (!(condition)) \
685 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
686 } while (0)
687
688 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
689 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
690 TASK_IDLE, 0, timeout, \
691 __ret = schedule_timeout(__ret))
692
693 /**
694 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
695 * @wq_head: the waitqueue to wait on
696 * @condition: a C expression for the event to wait for
697 * @timeout: timeout, in jiffies
698 *
699 * The process is put to sleep (TASK_IDLE) until the
700 * @condition evaluates to true. The @condition is checked each time
701 * the waitqueue @wq_head is woken up.
702 *
703 * wake_up() has to be called after changing any variable that could
704 * change the result of the wait condition.
705 *
706 * Returns:
707 * 0 if the @condition evaluated to %false after the @timeout elapsed,
708 * 1 if the @condition evaluated to %true after the @timeout elapsed,
709 * or the remaining jiffies (at least 1) if the @condition evaluated
710 * to %true before the @timeout elapsed.
711 */
712 #define wait_event_idle_timeout(wq_head, condition, timeout) \
713 ({ \
714 long __ret = timeout; \
715 might_sleep(); \
716 if (!___wait_cond_timeout(condition)) \
717 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
718 __ret; \
719 })
720
721 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
722 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
723 TASK_IDLE, 1, timeout, \
724 __ret = schedule_timeout(__ret))
725
726 /**
727 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
728 * @wq_head: the waitqueue to wait on
729 * @condition: a C expression for the event to wait for
730 * @timeout: timeout, in jiffies
731 *
732 * The process is put to sleep (TASK_IDLE) until the
733 * @condition evaluates to true. The @condition is checked each time
734 * the waitqueue @wq_head is woken up.
735 *
736 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
737 * set thus if other processes wait on the same list, when this
738 * process is woken further processes are not considered.
739 *
740 * wake_up() has to be called after changing any variable that could
741 * change the result of the wait condition.
742 *
743 * Returns:
744 * 0 if the @condition evaluated to %false after the @timeout elapsed,
745 * 1 if the @condition evaluated to %true after the @timeout elapsed,
746 * or the remaining jiffies (at least 1) if the @condition evaluated
747 * to %true before the @timeout elapsed.
748 */
749 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
750 ({ \
751 long __ret = timeout; \
752 might_sleep(); \
753 if (!___wait_cond_timeout(condition)) \
754 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
755 __ret; \
756 })
757
758 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
759 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
760
761 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
762 ({ \
763 int __ret; \
764 DEFINE_WAIT(__wait); \
765 if (exclusive) \
766 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
767 do { \
768 __ret = fn(&(wq), &__wait); \
769 if (__ret) \
770 break; \
771 } while (!(condition)); \
772 __remove_wait_queue(&(wq), &__wait); \
773 __set_current_state(TASK_RUNNING); \
774 __ret; \
775 })
776
777
778 /**
779 * wait_event_interruptible_locked - sleep until a condition gets true
780 * @wq: the waitqueue to wait on
781 * @condition: a C expression for the event to wait for
782 *
783 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
784 * @condition evaluates to true or a signal is received.
785 * The @condition is checked each time the waitqueue @wq is woken up.
786 *
787 * It must be called with wq.lock being held. This spinlock is
788 * unlocked while sleeping but @condition testing is done while lock
789 * is held and when this macro exits the lock is held.
790 *
791 * The lock is locked/unlocked using spin_lock()/spin_unlock()
792 * functions which must match the way they are locked/unlocked outside
793 * of this macro.
794 *
795 * wake_up_locked() has to be called after changing any variable that could
796 * change the result of the wait condition.
797 *
798 * The function will return -ERESTARTSYS if it was interrupted by a
799 * signal and 0 if @condition evaluated to true.
800 */
801 #define wait_event_interruptible_locked(wq, condition) \
802 ((condition) \
803 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
804
805 /**
806 * wait_event_interruptible_locked_irq - sleep until a condition gets true
807 * @wq: the waitqueue to wait on
808 * @condition: a C expression for the event to wait for
809 *
810 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
811 * @condition evaluates to true or a signal is received.
812 * The @condition is checked each time the waitqueue @wq is woken up.
813 *
814 * It must be called with wq.lock being held. This spinlock is
815 * unlocked while sleeping but @condition testing is done while lock
816 * is held and when this macro exits the lock is held.
817 *
818 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
819 * functions which must match the way they are locked/unlocked outside
820 * of this macro.
821 *
822 * wake_up_locked() has to be called after changing any variable that could
823 * change the result of the wait condition.
824 *
825 * The function will return -ERESTARTSYS if it was interrupted by a
826 * signal and 0 if @condition evaluated to true.
827 */
828 #define wait_event_interruptible_locked_irq(wq, condition) \
829 ((condition) \
830 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
831
832 /**
833 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
834 * @wq: the waitqueue to wait on
835 * @condition: a C expression for the event to wait for
836 *
837 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
838 * @condition evaluates to true or a signal is received.
839 * The @condition is checked each time the waitqueue @wq is woken up.
840 *
841 * It must be called with wq.lock being held. This spinlock is
842 * unlocked while sleeping but @condition testing is done while lock
843 * is held and when this macro exits the lock is held.
844 *
845 * The lock is locked/unlocked using spin_lock()/spin_unlock()
846 * functions which must match the way they are locked/unlocked outside
847 * of this macro.
848 *
849 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
850 * set thus when other process waits process on the list if this
851 * process is awaken further processes are not considered.
852 *
853 * wake_up_locked() has to be called after changing any variable that could
854 * change the result of the wait condition.
855 *
856 * The function will return -ERESTARTSYS if it was interrupted by a
857 * signal and 0 if @condition evaluated to true.
858 */
859 #define wait_event_interruptible_exclusive_locked(wq, condition) \
860 ((condition) \
861 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
862
863 /**
864 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
865 * @wq: the waitqueue to wait on
866 * @condition: a C expression for the event to wait for
867 *
868 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
869 * @condition evaluates to true or a signal is received.
870 * The @condition is checked each time the waitqueue @wq is woken up.
871 *
872 * It must be called with wq.lock being held. This spinlock is
873 * unlocked while sleeping but @condition testing is done while lock
874 * is held and when this macro exits the lock is held.
875 *
876 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
877 * functions which must match the way they are locked/unlocked outside
878 * of this macro.
879 *
880 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
881 * set thus when other process waits process on the list if this
882 * process is awaken further processes are not considered.
883 *
884 * wake_up_locked() has to be called after changing any variable that could
885 * change the result of the wait condition.
886 *
887 * The function will return -ERESTARTSYS if it was interrupted by a
888 * signal and 0 if @condition evaluated to true.
889 */
890 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
891 ((condition) \
892 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
893
894
895 #define __wait_event_killable(wq, condition) \
896 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
897
898 /**
899 * wait_event_killable - sleep until a condition gets true
900 * @wq_head: the waitqueue to wait on
901 * @condition: a C expression for the event to wait for
902 *
903 * The process is put to sleep (TASK_KILLABLE) until the
904 * @condition evaluates to true or a signal is received.
905 * The @condition is checked each time the waitqueue @wq_head is woken up.
906 *
907 * wake_up() has to be called after changing any variable that could
908 * change the result of the wait condition.
909 *
910 * The function will return -ERESTARTSYS if it was interrupted by a
911 * signal and 0 if @condition evaluated to true.
912 */
913 #define wait_event_killable(wq_head, condition) \
914 ({ \
915 int __ret = 0; \
916 might_sleep(); \
917 if (!(condition)) \
918 __ret = __wait_event_killable(wq_head, condition); \
919 __ret; \
920 })
921
922 #define __wait_event_killable_timeout(wq_head, condition, timeout) \
923 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
924 TASK_KILLABLE, 0, timeout, \
925 __ret = schedule_timeout(__ret))
926
927 /**
928 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
929 * @wq_head: the waitqueue to wait on
930 * @condition: a C expression for the event to wait for
931 * @timeout: timeout, in jiffies
932 *
933 * The process is put to sleep (TASK_KILLABLE) until the
934 * @condition evaluates to true or a kill signal is received.
935 * The @condition is checked each time the waitqueue @wq_head is woken up.
936 *
937 * wake_up() has to be called after changing any variable that could
938 * change the result of the wait condition.
939 *
940 * Returns:
941 * 0 if the @condition evaluated to %false after the @timeout elapsed,
942 * 1 if the @condition evaluated to %true after the @timeout elapsed,
943 * the remaining jiffies (at least 1) if the @condition evaluated
944 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
945 * interrupted by a kill signal.
946 *
947 * Only kill signals interrupt this process.
948 */
949 #define wait_event_killable_timeout(wq_head, condition, timeout) \
950 ({ \
951 long __ret = timeout; \
952 might_sleep(); \
953 if (!___wait_cond_timeout(condition)) \
954 __ret = __wait_event_killable_timeout(wq_head, \
955 condition, timeout); \
956 __ret; \
957 })
958
959
960 #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
961 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
962 spin_unlock_irq(&lock); \
963 cmd; \
964 schedule(); \
965 spin_lock_irq(&lock))
966
967 /**
968 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
969 * condition is checked under the lock. This
970 * is expected to be called with the lock
971 * taken.
972 * @wq_head: the waitqueue to wait on
973 * @condition: a C expression for the event to wait for
974 * @lock: a locked spinlock_t, which will be released before cmd
975 * and schedule() and reacquired afterwards.
976 * @cmd: a command which is invoked outside the critical section before
977 * sleep
978 *
979 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
980 * @condition evaluates to true. The @condition is checked each time
981 * the waitqueue @wq_head is woken up.
982 *
983 * wake_up() has to be called after changing any variable that could
984 * change the result of the wait condition.
985 *
986 * This is supposed to be called while holding the lock. The lock is
987 * dropped before invoking the cmd and going to sleep and is reacquired
988 * afterwards.
989 */
990 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
991 do { \
992 if (condition) \
993 break; \
994 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
995 } while (0)
996
997 /**
998 * wait_event_lock_irq - sleep until a condition gets true. The
999 * condition is checked under the lock. This
1000 * is expected to be called with the lock
1001 * taken.
1002 * @wq_head: the waitqueue to wait on
1003 * @condition: a C expression for the event to wait for
1004 * @lock: a locked spinlock_t, which will be released before schedule()
1005 * and reacquired afterwards.
1006 *
1007 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1008 * @condition evaluates to true. The @condition is checked each time
1009 * the waitqueue @wq_head is woken up.
1010 *
1011 * wake_up() has to be called after changing any variable that could
1012 * change the result of the wait condition.
1013 *
1014 * This is supposed to be called while holding the lock. The lock is
1015 * dropped before going to sleep and is reacquired afterwards.
1016 */
1017 #define wait_event_lock_irq(wq_head, condition, lock) \
1018 do { \
1019 if (condition) \
1020 break; \
1021 __wait_event_lock_irq(wq_head, condition, lock, ); \
1022 } while (0)
1023
1024
1025 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1026 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1027 spin_unlock_irq(&lock); \
1028 cmd; \
1029 schedule(); \
1030 spin_lock_irq(&lock))
1031
1032 /**
1033 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1034 * The condition is checked under the lock. This is expected to
1035 * be called with the lock taken.
1036 * @wq_head: the waitqueue to wait on
1037 * @condition: a C expression for the event to wait for
1038 * @lock: a locked spinlock_t, which will be released before cmd and
1039 * schedule() and reacquired afterwards.
1040 * @cmd: a command which is invoked outside the critical section before
1041 * sleep
1042 *
1043 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1044 * @condition evaluates to true or a signal is received. The @condition is
1045 * checked each time the waitqueue @wq_head is woken up.
1046 *
1047 * wake_up() has to be called after changing any variable that could
1048 * change the result of the wait condition.
1049 *
1050 * This is supposed to be called while holding the lock. The lock is
1051 * dropped before invoking the cmd and going to sleep and is reacquired
1052 * afterwards.
1053 *
1054 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1055 * and 0 if @condition evaluated to true.
1056 */
1057 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1058 ({ \
1059 int __ret = 0; \
1060 if (!(condition)) \
1061 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1062 condition, lock, cmd); \
1063 __ret; \
1064 })
1065
1066 /**
1067 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1068 * The condition is checked under the lock. This is expected
1069 * to be called with the lock taken.
1070 * @wq_head: the waitqueue to wait on
1071 * @condition: a C expression for the event to wait for
1072 * @lock: a locked spinlock_t, which will be released before schedule()
1073 * and reacquired afterwards.
1074 *
1075 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1076 * @condition evaluates to true or signal is received. The @condition is
1077 * checked each time the waitqueue @wq_head is woken up.
1078 *
1079 * wake_up() has to be called after changing any variable that could
1080 * change the result of the wait condition.
1081 *
1082 * This is supposed to be called while holding the lock. The lock is
1083 * dropped before going to sleep and is reacquired afterwards.
1084 *
1085 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1086 * and 0 if @condition evaluated to true.
1087 */
1088 #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1089 ({ \
1090 int __ret = 0; \
1091 if (!(condition)) \
1092 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1093 condition, lock,); \
1094 __ret; \
1095 })
1096
1097 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1098 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1099 state, 0, timeout, \
1100 spin_unlock_irq(&lock); \
1101 __ret = schedule_timeout(__ret); \
1102 spin_lock_irq(&lock));
1103
1104 /**
1105 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1106 * true or a timeout elapses. The condition is checked under
1107 * the lock. This is expected to be called with the lock taken.
1108 * @wq_head: the waitqueue to wait on
1109 * @condition: a C expression for the event to wait for
1110 * @lock: a locked spinlock_t, which will be released before schedule()
1111 * and reacquired afterwards.
1112 * @timeout: timeout, in jiffies
1113 *
1114 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1115 * @condition evaluates to true or signal is received. The @condition is
1116 * checked each time the waitqueue @wq_head is woken up.
1117 *
1118 * wake_up() has to be called after changing any variable that could
1119 * change the result of the wait condition.
1120 *
1121 * This is supposed to be called while holding the lock. The lock is
1122 * dropped before going to sleep and is reacquired afterwards.
1123 *
1124 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1125 * was interrupted by a signal, and the remaining jiffies otherwise
1126 * if the condition evaluated to true before the timeout elapsed.
1127 */
1128 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1129 timeout) \
1130 ({ \
1131 long __ret = timeout; \
1132 if (!___wait_cond_timeout(condition)) \
1133 __ret = __wait_event_lock_irq_timeout( \
1134 wq_head, condition, lock, timeout, \
1135 TASK_INTERRUPTIBLE); \
1136 __ret; \
1137 })
1138
1139 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1140 ({ \
1141 long __ret = timeout; \
1142 if (!___wait_cond_timeout(condition)) \
1143 __ret = __wait_event_lock_irq_timeout( \
1144 wq_head, condition, lock, timeout, \
1145 TASK_UNINTERRUPTIBLE); \
1146 __ret; \
1147 })
1148
1149 /*
1150 * Waitqueues which are removed from the waitqueue_head at wakeup time
1151 */
1152 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1153 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1154 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1155 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1156 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1157 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1158 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1159
1160 #define DEFINE_WAIT_FUNC(name, function) \
1161 struct wait_queue_entry name = { \
1162 .private = current, \
1163 .func = function, \
1164 .entry = LIST_HEAD_INIT((name).entry), \
1165 }
1166
1167 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1168
1169 #define init_wait(wait) \
1170 do { \
1171 (wait)->private = current; \
1172 (wait)->func = autoremove_wake_function; \
1173 INIT_LIST_HEAD(&(wait)->entry); \
1174 (wait)->flags = 0; \
1175 } while (0)
1176
1177 #endif /* _LINUX_WAIT_H */
1178