1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5 * Linux wait queue related types and methods
6 */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13
14 typedef struct wait_queue_entry wait_queue_entry_t;
15
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE 0x01
21 #define WQ_FLAG_WOKEN 0x02
22 #define WQ_FLAG_BOOKMARK 0x04
23 #define WQ_FLAG_CUSTOM 0x08
24 #define WQ_FLAG_DONE 0x10
25
26 /*
27 * A single wait-queue entry structure:
28 */
29 struct wait_queue_entry {
30 unsigned int flags;
31 void *private;
32 wait_queue_func_t func;
33 struct list_head entry;
34 };
35
36 struct wait_queue_head {
37 spinlock_t lock;
38 struct list_head head;
39 };
40 typedef struct wait_queue_head wait_queue_head_t;
41
42 struct task_struct;
43
44 /*
45 * Macros for declaration and initialisaton of the datatypes
46 */
47
48 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
49 .private = tsk, \
50 .func = default_wake_function, \
51 .entry = { NULL, NULL } }
52
53 #define DECLARE_WAITQUEUE(name, tsk) \
54 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
57 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
58 .head = { &(name).head, &(name).head } }
59
60 #define DECLARE_WAIT_QUEUE_HEAD(name) \
61 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
64
65 #define init_waitqueue_head(wq_head) \
66 do { \
67 static struct lock_class_key __key; \
68 \
69 __init_waitqueue_head((wq_head), #wq_head, &__key); \
70 } while (0)
71
72 #ifdef CONFIG_LOCKDEP
73 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 ({ init_waitqueue_head(&name); name; })
75 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77 #else
78 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79 #endif
80
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)81 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
82 {
83 wq_entry->flags = 0;
84 wq_entry->private = p;
85 wq_entry->func = default_wake_function;
86 }
87
88 static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)89 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
90 {
91 wq_entry->flags = 0;
92 wq_entry->private = NULL;
93 wq_entry->func = func;
94 }
95
96 /**
97 * waitqueue_active -- locklessly test for waiters on the queue
98 * @wq_head: the waitqueue to test for waiters
99 *
100 * returns true if the wait list is not empty
101 *
102 * NOTE: this function is lockless and requires care, incorrect usage _will_
103 * lead to sporadic and non-obvious failure.
104 *
105 * Use either while holding wait_queue_head::lock or when used for wakeups
106 * with an extra smp_mb() like::
107 *
108 * CPU0 - waker CPU1 - waiter
109 *
110 * for (;;) {
111 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
112 * smp_mb(); // smp_mb() from set_current_state()
113 * if (waitqueue_active(wq_head)) if (@cond)
114 * wake_up(wq_head); break;
115 * schedule();
116 * }
117 * finish_wait(&wq_head, &wait);
118 *
119 * Because without the explicit smp_mb() it's possible for the
120 * waitqueue_active() load to get hoisted over the @cond store such that we'll
121 * observe an empty wait list while the waiter might not observe @cond.
122 *
123 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
124 * which (when the lock is uncontended) are of roughly equal cost.
125 */
waitqueue_active(struct wait_queue_head * wq_head)126 static inline int waitqueue_active(struct wait_queue_head *wq_head)
127 {
128 return !list_empty(&wq_head->head);
129 }
130
131 /**
132 * wq_has_single_sleeper - check if there is only one sleeper
133 * @wq_head: wait queue head
134 *
135 * Returns true of wq_head has only one sleeper on the list.
136 *
137 * Please refer to the comment for waitqueue_active.
138 */
wq_has_single_sleeper(struct wait_queue_head * wq_head)139 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
140 {
141 return list_is_singular(&wq_head->head);
142 }
143
144 /**
145 * wq_has_sleeper - check if there are any waiting processes
146 * @wq_head: wait queue head
147 *
148 * Returns true if wq_head has waiting processes
149 *
150 * Please refer to the comment for waitqueue_active.
151 */
wq_has_sleeper(struct wait_queue_head * wq_head)152 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
153 {
154 /*
155 * We need to be sure we are in sync with the
156 * add_wait_queue modifications to the wait queue.
157 *
158 * This memory barrier should be paired with one on the
159 * waiting side.
160 */
161 smp_mb();
162 return waitqueue_active(wq_head);
163 }
164
165 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)169 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170 {
171 list_add(&wq_entry->entry, &wq_head->head);
172 }
173
174 /*
175 * Used for wake-one threads:
176 */
177 static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)178 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
179 {
180 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
181 __add_wait_queue(wq_head, wq_entry);
182 }
183
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)184 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
185 {
186 list_add_tail(&wq_entry->entry, &wq_head->head);
187 }
188
189 static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)190 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
191 {
192 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
193 __add_wait_queue_entry_tail(wq_head, wq_entry);
194 }
195
196 static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)197 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
198 {
199 list_del(&wq_entry->entry);
200 }
201
202 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
203 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
204 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
205 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
206 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
207 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
208 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
210 void __wake_up_pollfree(struct wait_queue_head *wq_head);
211
212 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
213 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
214 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
215 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
216 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
217
218 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
219 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
220 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
221 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
222
223 /*
224 * Wakeup macros to be used to report events to the targets.
225 */
226 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
227 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
228 #define wake_up_poll(x, m) \
229 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
230 #define wake_up_locked_poll(x, m) \
231 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
232 #define wake_up_interruptible_poll(x, m) \
233 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
234 #define wake_up_interruptible_sync_poll(x, m) \
235 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
236 #define wake_up_interruptible_sync_poll_locked(x, m) \
237 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
238
239 /**
240 * wake_up_pollfree - signal that a polled waitqueue is going away
241 * @wq_head: the wait queue head
242 *
243 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
244 * lifetime is tied to a task rather than to the 'struct file' being polled,
245 * this function must be called before the waitqueue is freed so that
246 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
247 *
248 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
249 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
250 */
wake_up_pollfree(struct wait_queue_head * wq_head)251 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
252 {
253 /*
254 * For performance reasons, we don't always take the queue lock here.
255 * Therefore, we might race with someone removing the last entry from
256 * the queue, and proceed while they still hold the queue lock.
257 * However, rcu_read_lock() is required to be held in such cases, so we
258 * can safely proceed with an RCU-delayed free.
259 */
260 if (waitqueue_active(wq_head))
261 __wake_up_pollfree(wq_head);
262 }
263
264 #define ___wait_cond_timeout(condition) \
265 ({ \
266 bool __cond = (condition); \
267 if (__cond && !__ret) \
268 __ret = 1; \
269 __cond || !__ret; \
270 })
271
272 #define ___wait_is_interruptible(state) \
273 (!__builtin_constant_p(state) || \
274 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
275
276 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
277
278 /*
279 * The below macro ___wait_event() has an explicit shadow of the __ret
280 * variable when used from the wait_event_*() macros.
281 *
282 * This is so that both can use the ___wait_cond_timeout() construct
283 * to wrap the condition.
284 *
285 * The type inconsistency of the wait_event_*() __ret variable is also
286 * on purpose; we use long where we can return timeout values and int
287 * otherwise.
288 */
289
290 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
291 ({ \
292 __label__ __out; \
293 struct wait_queue_entry __wq_entry; \
294 long __ret = ret; /* explicit shadow */ \
295 \
296 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
297 for (;;) { \
298 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
299 \
300 if (condition) \
301 break; \
302 \
303 if (___wait_is_interruptible(state) && __int) { \
304 __ret = __int; \
305 goto __out; \
306 } \
307 \
308 cmd; \
309 } \
310 finish_wait(&wq_head, &__wq_entry); \
311 __out: __ret; \
312 })
313
314 #define __wait_event(wq_head, condition) \
315 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
316 schedule())
317
318 /**
319 * wait_event - sleep until a condition gets true
320 * @wq_head: the waitqueue to wait on
321 * @condition: a C expression for the event to wait for
322 *
323 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
324 * @condition evaluates to true. The @condition is checked each time
325 * the waitqueue @wq_head is woken up.
326 *
327 * wake_up() has to be called after changing any variable that could
328 * change the result of the wait condition.
329 */
330 #define wait_event(wq_head, condition) \
331 do { \
332 might_sleep(); \
333 if (condition) \
334 break; \
335 __wait_event(wq_head, condition); \
336 } while (0)
337
338 #define __io_wait_event(wq_head, condition) \
339 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
340 io_schedule())
341
342 /*
343 * io_wait_event() -- like wait_event() but with io_schedule()
344 */
345 #define io_wait_event(wq_head, condition) \
346 do { \
347 might_sleep(); \
348 if (condition) \
349 break; \
350 __io_wait_event(wq_head, condition); \
351 } while (0)
352
353 #define __wait_event_freezable(wq_head, condition) \
354 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
355 freezable_schedule())
356
357 /**
358 * wait_event_freezable - sleep (or freeze) until a condition gets true
359 * @wq_head: the waitqueue to wait on
360 * @condition: a C expression for the event to wait for
361 *
362 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
363 * to system load) until the @condition evaluates to true. The
364 * @condition is checked each time the waitqueue @wq_head is woken up.
365 *
366 * wake_up() has to be called after changing any variable that could
367 * change the result of the wait condition.
368 */
369 #define wait_event_freezable(wq_head, condition) \
370 ({ \
371 int __ret = 0; \
372 might_sleep(); \
373 if (!(condition)) \
374 __ret = __wait_event_freezable(wq_head, condition); \
375 __ret; \
376 })
377
378 #define __wait_event_timeout(wq_head, condition, timeout) \
379 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
380 TASK_UNINTERRUPTIBLE, 0, timeout, \
381 __ret = schedule_timeout(__ret))
382
383 /**
384 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
385 * @wq_head: the waitqueue to wait on
386 * @condition: a C expression for the event to wait for
387 * @timeout: timeout, in jiffies
388 *
389 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
390 * @condition evaluates to true. The @condition is checked each time
391 * the waitqueue @wq_head is woken up.
392 *
393 * wake_up() has to be called after changing any variable that could
394 * change the result of the wait condition.
395 *
396 * Returns:
397 * 0 if the @condition evaluated to %false after the @timeout elapsed,
398 * 1 if the @condition evaluated to %true after the @timeout elapsed,
399 * or the remaining jiffies (at least 1) if the @condition evaluated
400 * to %true before the @timeout elapsed.
401 */
402 #define wait_event_timeout(wq_head, condition, timeout) \
403 ({ \
404 long __ret = timeout; \
405 might_sleep(); \
406 if (!___wait_cond_timeout(condition)) \
407 __ret = __wait_event_timeout(wq_head, condition, timeout); \
408 __ret; \
409 })
410
411 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
412 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
413 TASK_INTERRUPTIBLE, 0, timeout, \
414 __ret = freezable_schedule_timeout(__ret))
415
416 /*
417 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
418 * increasing load and is freezable.
419 */
420 #define wait_event_freezable_timeout(wq_head, condition, timeout) \
421 ({ \
422 long __ret = timeout; \
423 might_sleep(); \
424 if (!___wait_cond_timeout(condition)) \
425 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
426 __ret; \
427 })
428
429 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
430 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
431 cmd1; schedule(); cmd2)
432 /*
433 * Just like wait_event_cmd(), except it sets exclusive flag
434 */
435 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
436 do { \
437 if (condition) \
438 break; \
439 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
440 } while (0)
441
442 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
443 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
444 cmd1; schedule(); cmd2)
445
446 /**
447 * wait_event_cmd - sleep until a condition gets true
448 * @wq_head: the waitqueue to wait on
449 * @condition: a C expression for the event to wait for
450 * @cmd1: the command will be executed before sleep
451 * @cmd2: the command will be executed after sleep
452 *
453 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
454 * @condition evaluates to true. The @condition is checked each time
455 * the waitqueue @wq_head is woken up.
456 *
457 * wake_up() has to be called after changing any variable that could
458 * change the result of the wait condition.
459 */
460 #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
461 do { \
462 if (condition) \
463 break; \
464 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
465 } while (0)
466
467 #define __wait_event_interruptible(wq_head, condition) \
468 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
469 schedule())
470
471 /**
472 * wait_event_interruptible - sleep until a condition gets true
473 * @wq_head: the waitqueue to wait on
474 * @condition: a C expression for the event to wait for
475 *
476 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
477 * @condition evaluates to true or a signal is received.
478 * The @condition is checked each time the waitqueue @wq_head is woken up.
479 *
480 * wake_up() has to be called after changing any variable that could
481 * change the result of the wait condition.
482 *
483 * The function will return -ERESTARTSYS if it was interrupted by a
484 * signal and 0 if @condition evaluated to true.
485 */
486 #define wait_event_interruptible(wq_head, condition) \
487 ({ \
488 int __ret = 0; \
489 might_sleep(); \
490 if (!(condition)) \
491 __ret = __wait_event_interruptible(wq_head, condition); \
492 __ret; \
493 })
494
495 #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
496 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
497 TASK_INTERRUPTIBLE, 0, timeout, \
498 __ret = schedule_timeout(__ret))
499
500 /**
501 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
502 * @wq_head: the waitqueue to wait on
503 * @condition: a C expression for the event to wait for
504 * @timeout: timeout, in jiffies
505 *
506 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
507 * @condition evaluates to true or a signal is received.
508 * The @condition is checked each time the waitqueue @wq_head is woken up.
509 *
510 * wake_up() has to be called after changing any variable that could
511 * change the result of the wait condition.
512 *
513 * Returns:
514 * 0 if the @condition evaluated to %false after the @timeout elapsed,
515 * 1 if the @condition evaluated to %true after the @timeout elapsed,
516 * the remaining jiffies (at least 1) if the @condition evaluated
517 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
518 * interrupted by a signal.
519 */
520 #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
521 ({ \
522 long __ret = timeout; \
523 might_sleep(); \
524 if (!___wait_cond_timeout(condition)) \
525 __ret = __wait_event_interruptible_timeout(wq_head, \
526 condition, timeout); \
527 __ret; \
528 })
529
530 #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
531 ({ \
532 int __ret = 0; \
533 struct hrtimer_sleeper __t; \
534 \
535 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
536 HRTIMER_MODE_REL); \
537 if ((timeout) != KTIME_MAX) \
538 hrtimer_start_range_ns(&__t.timer, timeout, \
539 current->timer_slack_ns, \
540 HRTIMER_MODE_REL); \
541 \
542 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
543 if (!__t.task) { \
544 __ret = -ETIME; \
545 break; \
546 } \
547 schedule()); \
548 \
549 hrtimer_cancel(&__t.timer); \
550 destroy_hrtimer_on_stack(&__t.timer); \
551 __ret; \
552 })
553
554 /**
555 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
556 * @wq_head: the waitqueue to wait on
557 * @condition: a C expression for the event to wait for
558 * @timeout: timeout, as a ktime_t
559 *
560 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
561 * @condition evaluates to true or a signal is received.
562 * The @condition is checked each time the waitqueue @wq_head is woken up.
563 *
564 * wake_up() has to be called after changing any variable that could
565 * change the result of the wait condition.
566 *
567 * The function returns 0 if @condition became true, or -ETIME if the timeout
568 * elapsed.
569 */
570 #define wait_event_hrtimeout(wq_head, condition, timeout) \
571 ({ \
572 int __ret = 0; \
573 might_sleep(); \
574 if (!(condition)) \
575 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
576 TASK_UNINTERRUPTIBLE); \
577 __ret; \
578 })
579
580 /**
581 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
582 * @wq: the waitqueue to wait on
583 * @condition: a C expression for the event to wait for
584 * @timeout: timeout, as a ktime_t
585 *
586 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
587 * @condition evaluates to true or a signal is received.
588 * The @condition is checked each time the waitqueue @wq is woken up.
589 *
590 * wake_up() has to be called after changing any variable that could
591 * change the result of the wait condition.
592 *
593 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
594 * interrupted by a signal, or -ETIME if the timeout elapsed.
595 */
596 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
597 ({ \
598 long __ret = 0; \
599 might_sleep(); \
600 if (!(condition)) \
601 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
602 TASK_INTERRUPTIBLE); \
603 __ret; \
604 })
605
606 #define __wait_event_interruptible_exclusive(wq, condition) \
607 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
608 schedule())
609
610 #define wait_event_interruptible_exclusive(wq, condition) \
611 ({ \
612 int __ret = 0; \
613 might_sleep(); \
614 if (!(condition)) \
615 __ret = __wait_event_interruptible_exclusive(wq, condition); \
616 __ret; \
617 })
618
619 #define __wait_event_killable_exclusive(wq, condition) \
620 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
621 schedule())
622
623 #define wait_event_killable_exclusive(wq, condition) \
624 ({ \
625 int __ret = 0; \
626 might_sleep(); \
627 if (!(condition)) \
628 __ret = __wait_event_killable_exclusive(wq, condition); \
629 __ret; \
630 })
631
632
633 #define __wait_event_freezable_exclusive(wq, condition) \
634 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
635 freezable_schedule())
636
637 #define wait_event_freezable_exclusive(wq, condition) \
638 ({ \
639 int __ret = 0; \
640 might_sleep(); \
641 if (!(condition)) \
642 __ret = __wait_event_freezable_exclusive(wq, condition); \
643 __ret; \
644 })
645
646 /**
647 * wait_event_idle - wait for a condition without contributing to system load
648 * @wq_head: the waitqueue to wait on
649 * @condition: a C expression for the event to wait for
650 *
651 * The process is put to sleep (TASK_IDLE) until the
652 * @condition evaluates to true.
653 * The @condition is checked each time the waitqueue @wq_head is woken up.
654 *
655 * wake_up() has to be called after changing any variable that could
656 * change the result of the wait condition.
657 *
658 */
659 #define wait_event_idle(wq_head, condition) \
660 do { \
661 might_sleep(); \
662 if (!(condition)) \
663 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
664 } while (0)
665
666 /**
667 * wait_event_idle_exclusive - wait for a condition with contributing to system load
668 * @wq_head: the waitqueue to wait on
669 * @condition: a C expression for the event to wait for
670 *
671 * The process is put to sleep (TASK_IDLE) until the
672 * @condition evaluates to true.
673 * The @condition is checked each time the waitqueue @wq_head is woken up.
674 *
675 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
676 * set thus if other processes wait on the same list, when this
677 * process is woken further processes are not considered.
678 *
679 * wake_up() has to be called after changing any variable that could
680 * change the result of the wait condition.
681 *
682 */
683 #define wait_event_idle_exclusive(wq_head, condition) \
684 do { \
685 might_sleep(); \
686 if (!(condition)) \
687 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
688 } while (0)
689
690 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
691 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
692 TASK_IDLE, 0, timeout, \
693 __ret = schedule_timeout(__ret))
694
695 /**
696 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
697 * @wq_head: the waitqueue to wait on
698 * @condition: a C expression for the event to wait for
699 * @timeout: timeout, in jiffies
700 *
701 * The process is put to sleep (TASK_IDLE) until the
702 * @condition evaluates to true. The @condition is checked each time
703 * the waitqueue @wq_head is woken up.
704 *
705 * wake_up() has to be called after changing any variable that could
706 * change the result of the wait condition.
707 *
708 * Returns:
709 * 0 if the @condition evaluated to %false after the @timeout elapsed,
710 * 1 if the @condition evaluated to %true after the @timeout elapsed,
711 * or the remaining jiffies (at least 1) if the @condition evaluated
712 * to %true before the @timeout elapsed.
713 */
714 #define wait_event_idle_timeout(wq_head, condition, timeout) \
715 ({ \
716 long __ret = timeout; \
717 might_sleep(); \
718 if (!___wait_cond_timeout(condition)) \
719 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
720 __ret; \
721 })
722
723 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
724 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
725 TASK_IDLE, 1, timeout, \
726 __ret = schedule_timeout(__ret))
727
728 /**
729 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
730 * @wq_head: the waitqueue to wait on
731 * @condition: a C expression for the event to wait for
732 * @timeout: timeout, in jiffies
733 *
734 * The process is put to sleep (TASK_IDLE) until the
735 * @condition evaluates to true. The @condition is checked each time
736 * the waitqueue @wq_head is woken up.
737 *
738 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
739 * set thus if other processes wait on the same list, when this
740 * process is woken further processes are not considered.
741 *
742 * wake_up() has to be called after changing any variable that could
743 * change the result of the wait condition.
744 *
745 * Returns:
746 * 0 if the @condition evaluated to %false after the @timeout elapsed,
747 * 1 if the @condition evaluated to %true after the @timeout elapsed,
748 * or the remaining jiffies (at least 1) if the @condition evaluated
749 * to %true before the @timeout elapsed.
750 */
751 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
752 ({ \
753 long __ret = timeout; \
754 might_sleep(); \
755 if (!___wait_cond_timeout(condition)) \
756 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
757 __ret; \
758 })
759
760 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
761 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
762
763 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
764 ({ \
765 int __ret; \
766 DEFINE_WAIT(__wait); \
767 if (exclusive) \
768 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
769 do { \
770 __ret = fn(&(wq), &__wait); \
771 if (__ret) \
772 break; \
773 } while (!(condition)); \
774 __remove_wait_queue(&(wq), &__wait); \
775 __set_current_state(TASK_RUNNING); \
776 __ret; \
777 })
778
779
780 /**
781 * wait_event_interruptible_locked - sleep until a condition gets true
782 * @wq: the waitqueue to wait on
783 * @condition: a C expression for the event to wait for
784 *
785 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
786 * @condition evaluates to true or a signal is received.
787 * The @condition is checked each time the waitqueue @wq is woken up.
788 *
789 * It must be called with wq.lock being held. This spinlock is
790 * unlocked while sleeping but @condition testing is done while lock
791 * is held and when this macro exits the lock is held.
792 *
793 * The lock is locked/unlocked using spin_lock()/spin_unlock()
794 * functions which must match the way they are locked/unlocked outside
795 * of this macro.
796 *
797 * wake_up_locked() has to be called after changing any variable that could
798 * change the result of the wait condition.
799 *
800 * The function will return -ERESTARTSYS if it was interrupted by a
801 * signal and 0 if @condition evaluated to true.
802 */
803 #define wait_event_interruptible_locked(wq, condition) \
804 ((condition) \
805 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
806
807 /**
808 * wait_event_interruptible_locked_irq - sleep until a condition gets true
809 * @wq: the waitqueue to wait on
810 * @condition: a C expression for the event to wait for
811 *
812 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
813 * @condition evaluates to true or a signal is received.
814 * The @condition is checked each time the waitqueue @wq is woken up.
815 *
816 * It must be called with wq.lock being held. This spinlock is
817 * unlocked while sleeping but @condition testing is done while lock
818 * is held and when this macro exits the lock is held.
819 *
820 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
821 * functions which must match the way they are locked/unlocked outside
822 * of this macro.
823 *
824 * wake_up_locked() has to be called after changing any variable that could
825 * change the result of the wait condition.
826 *
827 * The function will return -ERESTARTSYS if it was interrupted by a
828 * signal and 0 if @condition evaluated to true.
829 */
830 #define wait_event_interruptible_locked_irq(wq, condition) \
831 ((condition) \
832 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
833
834 /**
835 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
836 * @wq: the waitqueue to wait on
837 * @condition: a C expression for the event to wait for
838 *
839 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
840 * @condition evaluates to true or a signal is received.
841 * The @condition is checked each time the waitqueue @wq is woken up.
842 *
843 * It must be called with wq.lock being held. This spinlock is
844 * unlocked while sleeping but @condition testing is done while lock
845 * is held and when this macro exits the lock is held.
846 *
847 * The lock is locked/unlocked using spin_lock()/spin_unlock()
848 * functions which must match the way they are locked/unlocked outside
849 * of this macro.
850 *
851 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
852 * set thus when other process waits process on the list if this
853 * process is awaken further processes are not considered.
854 *
855 * wake_up_locked() has to be called after changing any variable that could
856 * change the result of the wait condition.
857 *
858 * The function will return -ERESTARTSYS if it was interrupted by a
859 * signal and 0 if @condition evaluated to true.
860 */
861 #define wait_event_interruptible_exclusive_locked(wq, condition) \
862 ((condition) \
863 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
864
865 /**
866 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
867 * @wq: the waitqueue to wait on
868 * @condition: a C expression for the event to wait for
869 *
870 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
871 * @condition evaluates to true or a signal is received.
872 * The @condition is checked each time the waitqueue @wq is woken up.
873 *
874 * It must be called with wq.lock being held. This spinlock is
875 * unlocked while sleeping but @condition testing is done while lock
876 * is held and when this macro exits the lock is held.
877 *
878 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
879 * functions which must match the way they are locked/unlocked outside
880 * of this macro.
881 *
882 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
883 * set thus when other process waits process on the list if this
884 * process is awaken further processes are not considered.
885 *
886 * wake_up_locked() has to be called after changing any variable that could
887 * change the result of the wait condition.
888 *
889 * The function will return -ERESTARTSYS if it was interrupted by a
890 * signal and 0 if @condition evaluated to true.
891 */
892 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
893 ((condition) \
894 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
895
896
897 #define __wait_event_killable(wq, condition) \
898 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
899
900 /**
901 * wait_event_killable - sleep until a condition gets true
902 * @wq_head: the waitqueue to wait on
903 * @condition: a C expression for the event to wait for
904 *
905 * The process is put to sleep (TASK_KILLABLE) until the
906 * @condition evaluates to true or a signal is received.
907 * The @condition is checked each time the waitqueue @wq_head is woken up.
908 *
909 * wake_up() has to be called after changing any variable that could
910 * change the result of the wait condition.
911 *
912 * The function will return -ERESTARTSYS if it was interrupted by a
913 * signal and 0 if @condition evaluated to true.
914 */
915 #define wait_event_killable(wq_head, condition) \
916 ({ \
917 int __ret = 0; \
918 might_sleep(); \
919 if (!(condition)) \
920 __ret = __wait_event_killable(wq_head, condition); \
921 __ret; \
922 })
923
924 #define __wait_event_killable_timeout(wq_head, condition, timeout) \
925 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
926 TASK_KILLABLE, 0, timeout, \
927 __ret = schedule_timeout(__ret))
928
929 /**
930 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
931 * @wq_head: the waitqueue to wait on
932 * @condition: a C expression for the event to wait for
933 * @timeout: timeout, in jiffies
934 *
935 * The process is put to sleep (TASK_KILLABLE) until the
936 * @condition evaluates to true or a kill signal is received.
937 * The @condition is checked each time the waitqueue @wq_head is woken up.
938 *
939 * wake_up() has to be called after changing any variable that could
940 * change the result of the wait condition.
941 *
942 * Returns:
943 * 0 if the @condition evaluated to %false after the @timeout elapsed,
944 * 1 if the @condition evaluated to %true after the @timeout elapsed,
945 * the remaining jiffies (at least 1) if the @condition evaluated
946 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
947 * interrupted by a kill signal.
948 *
949 * Only kill signals interrupt this process.
950 */
951 #define wait_event_killable_timeout(wq_head, condition, timeout) \
952 ({ \
953 long __ret = timeout; \
954 might_sleep(); \
955 if (!___wait_cond_timeout(condition)) \
956 __ret = __wait_event_killable_timeout(wq_head, \
957 condition, timeout); \
958 __ret; \
959 })
960
961
962 #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
963 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
964 spin_unlock_irq(&lock); \
965 cmd; \
966 schedule(); \
967 spin_lock_irq(&lock))
968
969 /**
970 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
971 * condition is checked under the lock. This
972 * is expected to be called with the lock
973 * taken.
974 * @wq_head: the waitqueue to wait on
975 * @condition: a C expression for the event to wait for
976 * @lock: a locked spinlock_t, which will be released before cmd
977 * and schedule() and reacquired afterwards.
978 * @cmd: a command which is invoked outside the critical section before
979 * sleep
980 *
981 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
982 * @condition evaluates to true. The @condition is checked each time
983 * the waitqueue @wq_head is woken up.
984 *
985 * wake_up() has to be called after changing any variable that could
986 * change the result of the wait condition.
987 *
988 * This is supposed to be called while holding the lock. The lock is
989 * dropped before invoking the cmd and going to sleep and is reacquired
990 * afterwards.
991 */
992 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
993 do { \
994 if (condition) \
995 break; \
996 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
997 } while (0)
998
999 /**
1000 * wait_event_lock_irq - sleep until a condition gets true. The
1001 * condition is checked under the lock. This
1002 * is expected to be called with the lock
1003 * taken.
1004 * @wq_head: the waitqueue to wait on
1005 * @condition: a C expression for the event to wait for
1006 * @lock: a locked spinlock_t, which will be released before schedule()
1007 * and reacquired afterwards.
1008 *
1009 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1010 * @condition evaluates to true. The @condition is checked each time
1011 * the waitqueue @wq_head is woken up.
1012 *
1013 * wake_up() has to be called after changing any variable that could
1014 * change the result of the wait condition.
1015 *
1016 * This is supposed to be called while holding the lock. The lock is
1017 * dropped before going to sleep and is reacquired afterwards.
1018 */
1019 #define wait_event_lock_irq(wq_head, condition, lock) \
1020 do { \
1021 if (condition) \
1022 break; \
1023 __wait_event_lock_irq(wq_head, condition, lock, ); \
1024 } while (0)
1025
1026
1027 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1028 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1029 spin_unlock_irq(&lock); \
1030 cmd; \
1031 schedule(); \
1032 spin_lock_irq(&lock))
1033
1034 /**
1035 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1036 * The condition is checked under the lock. This is expected to
1037 * be called with the lock taken.
1038 * @wq_head: the waitqueue to wait on
1039 * @condition: a C expression for the event to wait for
1040 * @lock: a locked spinlock_t, which will be released before cmd and
1041 * schedule() and reacquired afterwards.
1042 * @cmd: a command which is invoked outside the critical section before
1043 * sleep
1044 *
1045 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1046 * @condition evaluates to true or a signal is received. The @condition is
1047 * checked each time the waitqueue @wq_head is woken up.
1048 *
1049 * wake_up() has to be called after changing any variable that could
1050 * change the result of the wait condition.
1051 *
1052 * This is supposed to be called while holding the lock. The lock is
1053 * dropped before invoking the cmd and going to sleep and is reacquired
1054 * afterwards.
1055 *
1056 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1057 * and 0 if @condition evaluated to true.
1058 */
1059 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1060 ({ \
1061 int __ret = 0; \
1062 if (!(condition)) \
1063 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1064 condition, lock, cmd); \
1065 __ret; \
1066 })
1067
1068 /**
1069 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1070 * The condition is checked under the lock. This is expected
1071 * to be called with the lock taken.
1072 * @wq_head: the waitqueue to wait on
1073 * @condition: a C expression for the event to wait for
1074 * @lock: a locked spinlock_t, which will be released before schedule()
1075 * and reacquired afterwards.
1076 *
1077 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1078 * @condition evaluates to true or signal is received. The @condition is
1079 * checked each time the waitqueue @wq_head is woken up.
1080 *
1081 * wake_up() has to be called after changing any variable that could
1082 * change the result of the wait condition.
1083 *
1084 * This is supposed to be called while holding the lock. The lock is
1085 * dropped before going to sleep and is reacquired afterwards.
1086 *
1087 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1088 * and 0 if @condition evaluated to true.
1089 */
1090 #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1091 ({ \
1092 int __ret = 0; \
1093 if (!(condition)) \
1094 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1095 condition, lock,); \
1096 __ret; \
1097 })
1098
1099 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1100 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1101 state, 0, timeout, \
1102 spin_unlock_irq(&lock); \
1103 __ret = schedule_timeout(__ret); \
1104 spin_lock_irq(&lock));
1105
1106 /**
1107 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1108 * true or a timeout elapses. The condition is checked under
1109 * the lock. This is expected to be called with the lock taken.
1110 * @wq_head: the waitqueue to wait on
1111 * @condition: a C expression for the event to wait for
1112 * @lock: a locked spinlock_t, which will be released before schedule()
1113 * and reacquired afterwards.
1114 * @timeout: timeout, in jiffies
1115 *
1116 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1117 * @condition evaluates to true or signal is received. The @condition is
1118 * checked each time the waitqueue @wq_head is woken up.
1119 *
1120 * wake_up() has to be called after changing any variable that could
1121 * change the result of the wait condition.
1122 *
1123 * This is supposed to be called while holding the lock. The lock is
1124 * dropped before going to sleep and is reacquired afterwards.
1125 *
1126 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1127 * was interrupted by a signal, and the remaining jiffies otherwise
1128 * if the condition evaluated to true before the timeout elapsed.
1129 */
1130 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1131 timeout) \
1132 ({ \
1133 long __ret = timeout; \
1134 if (!___wait_cond_timeout(condition)) \
1135 __ret = __wait_event_lock_irq_timeout( \
1136 wq_head, condition, lock, timeout, \
1137 TASK_INTERRUPTIBLE); \
1138 __ret; \
1139 })
1140
1141 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1142 ({ \
1143 long __ret = timeout; \
1144 if (!___wait_cond_timeout(condition)) \
1145 __ret = __wait_event_lock_irq_timeout( \
1146 wq_head, condition, lock, timeout, \
1147 TASK_UNINTERRUPTIBLE); \
1148 __ret; \
1149 })
1150
1151 /*
1152 * Waitqueues which are removed from the waitqueue_head at wakeup time
1153 */
1154 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1155 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1156 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1157 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1158 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1159 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1160 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1161
1162 #define DEFINE_WAIT_FUNC(name, function) \
1163 struct wait_queue_entry name = { \
1164 .private = current, \
1165 .func = function, \
1166 .entry = LIST_HEAD_INIT((name).entry), \
1167 }
1168
1169 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1170
1171 #define init_wait(wait) \
1172 do { \
1173 (wait)->private = current; \
1174 (wait)->func = autoremove_wake_function; \
1175 INIT_LIST_HEAD(&(wait)->entry); \
1176 (wait)->flags = 0; \
1177 } while (0)
1178
1179 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1180
1181 #endif /* _LINUX_WAIT_H */
1182