1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic waiting primitives.
4 *
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 */
7 #include <trace/hooks/sched.h>
8
__init_waitqueue_head(struct wait_queue_head * wq_head,const char * name,struct lock_class_key * key)9 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10 {
11 spin_lock_init(&wq_head->lock);
12 lockdep_set_class_and_name(&wq_head->lock, key, name);
13 INIT_LIST_HEAD(&wq_head->head);
14 }
15
16 EXPORT_SYMBOL(__init_waitqueue_head);
17
add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)18 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
19 {
20 unsigned long flags;
21
22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
23 spin_lock_irqsave(&wq_head->lock, flags);
24 __add_wait_queue(wq_head, wq_entry);
25 spin_unlock_irqrestore(&wq_head->lock, flags);
26 }
27 EXPORT_SYMBOL(add_wait_queue);
28
add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)29 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
30 {
31 unsigned long flags;
32
33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
34 spin_lock_irqsave(&wq_head->lock, flags);
35 __add_wait_queue_entry_tail(wq_head, wq_entry);
36 spin_unlock_irqrestore(&wq_head->lock, flags);
37 }
38 EXPORT_SYMBOL(add_wait_queue_exclusive);
39
add_wait_queue_priority(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)40 void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
41 {
42 unsigned long flags;
43
44 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
45 spin_lock_irqsave(&wq_head->lock, flags);
46 __add_wait_queue(wq_head, wq_entry);
47 spin_unlock_irqrestore(&wq_head->lock, flags);
48 }
49 EXPORT_SYMBOL_GPL(add_wait_queue_priority);
50
remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)51 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
52 {
53 unsigned long flags;
54
55 spin_lock_irqsave(&wq_head->lock, flags);
56 __remove_wait_queue(wq_head, wq_entry);
57 spin_unlock_irqrestore(&wq_head->lock, flags);
58 }
59 EXPORT_SYMBOL(remove_wait_queue);
60
61 /*
62 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
63 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
64 * number) then we wake that number of exclusive tasks, and potentially all
65 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
66 * the list and any non-exclusive tasks will be woken first. A priority task
67 * may be at the head of the list, and can consume the event without any other
68 * tasks being woken.
69 *
70 * There are circumstances in which we can try to wake a task which has already
71 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
72 * zero in this (rare) case, and we handle it by continuing to scan the queue.
73 */
__wake_up_common(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key)74 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
75 int nr_exclusive, int wake_flags, void *key)
76 {
77 wait_queue_entry_t *curr, *next;
78
79 lockdep_assert_held(&wq_head->lock);
80
81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82
83 if (&curr->entry == &wq_head->head)
84 return nr_exclusive;
85
86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
87 unsigned flags = curr->flags;
88 int ret;
89
90 ret = curr->func(curr, mode, wake_flags, key);
91 if (ret < 0)
92 break;
93 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
94 break;
95 }
96
97 return nr_exclusive;
98 }
99
__wake_up_common_lock(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key)100 static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
101 int nr_exclusive, int wake_flags, void *key)
102 {
103 unsigned long flags;
104 int remaining;
105
106 spin_lock_irqsave(&wq_head->lock, flags);
107 remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags,
108 key);
109 spin_unlock_irqrestore(&wq_head->lock, flags);
110
111 return nr_exclusive - remaining;
112 }
113
114 /**
115 * __wake_up - wake up threads blocked on a waitqueue.
116 * @wq_head: the waitqueue
117 * @mode: which threads
118 * @nr_exclusive: how many wake-one or wake-many threads to wake up
119 * @key: is directly passed to the wakeup function
120 *
121 * If this function wakes up a task, it executes a full memory barrier
122 * before accessing the task state. Returns the number of exclusive
123 * tasks that were awaken.
124 */
__wake_up(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,void * key)125 int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
126 int nr_exclusive, void *key)
127 {
128 return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
129 }
130 EXPORT_SYMBOL(__wake_up);
131
__wake_up_on_current_cpu(struct wait_queue_head * wq_head,unsigned int mode,void * key)132 void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key)
133 {
134 __wake_up_common_lock(wq_head, mode, 1, WF_CURRENT_CPU, key);
135 }
136
137 /*
138 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
139 */
__wake_up_locked(struct wait_queue_head * wq_head,unsigned int mode,int nr)140 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
141 {
142 __wake_up_common(wq_head, mode, nr, 0, NULL);
143 }
144 EXPORT_SYMBOL_GPL(__wake_up_locked);
145
__wake_up_locked_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)146 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
147 {
148 __wake_up_common(wq_head, mode, 1, 0, key);
149 }
150 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
151
152 /**
153 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
154 * @wq_head: the waitqueue
155 * @mode: which threads
156 * @key: opaque value to be passed to wakeup targets
157 *
158 * The sync wakeup differs that the waker knows that it will schedule
159 * away soon, so while the target thread will be woken up, it will not
160 * be migrated to another CPU - ie. the two threads are 'synchronized'
161 * with each other. This can prevent needless bouncing between CPUs.
162 *
163 * On UP it can prevent extra preemption.
164 *
165 * If this function wakes up a task, it executes a full memory barrier before
166 * accessing the task state.
167 */
__wake_up_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)168 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
169 void *key)
170 {
171 int wake_flags = WF_SYNC;
172
173 if (unlikely(!wq_head))
174 return;
175
176 trace_android_vh_set_wake_flags(&wake_flags, &mode);
177 __wake_up_common_lock(wq_head, mode, 1, wake_flags, key);
178 }
179 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
180
181 /**
182 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
183 * @wq_head: the waitqueue
184 * @mode: which threads
185 * @key: opaque value to be passed to wakeup targets
186 *
187 * The sync wakeup differs in that the waker knows that it will schedule
188 * away soon, so while the target thread will be woken up, it will not
189 * be migrated to another CPU - ie. the two threads are 'synchronized'
190 * with each other. This can prevent needless bouncing between CPUs.
191 *
192 * On UP it can prevent extra preemption.
193 *
194 * If this function wakes up a task, it executes a full memory barrier before
195 * accessing the task state.
196 */
__wake_up_locked_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)197 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
198 unsigned int mode, void *key)
199 {
200 __wake_up_common(wq_head, mode, 1, WF_SYNC, key);
201 }
202 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
203
204 /*
205 * __wake_up_sync - see __wake_up_sync_key()
206 */
__wake_up_sync(struct wait_queue_head * wq_head,unsigned int mode)207 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
208 {
209 __wake_up_sync_key(wq_head, mode, NULL);
210 }
211 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
212
__wake_up_pollfree(struct wait_queue_head * wq_head)213 void __wake_up_pollfree(struct wait_queue_head *wq_head)
214 {
215 __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
216 /* POLLFREE must have cleared the queue. */
217 WARN_ON_ONCE(waitqueue_active(wq_head));
218 }
219
220 /*
221 * Note: we use "set_current_state()" _after_ the wait-queue add,
222 * because we need a memory barrier there on SMP, so that any
223 * wake-function that tests for the wait-queue being active
224 * will be guaranteed to see waitqueue addition _or_ subsequent
225 * tests in this thread will see the wakeup having taken place.
226 *
227 * The spin_unlock() itself is semi-permeable and only protects
228 * one way (it only protects stuff inside the critical region and
229 * stops them from bleeding out - it would still allow subsequent
230 * loads to move into the critical region).
231 */
232 void
prepare_to_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)233 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
234 {
235 unsigned long flags;
236
237 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
238 spin_lock_irqsave(&wq_head->lock, flags);
239 if (list_empty(&wq_entry->entry))
240 __add_wait_queue(wq_head, wq_entry);
241 set_current_state(state);
242 spin_unlock_irqrestore(&wq_head->lock, flags);
243 }
244 EXPORT_SYMBOL(prepare_to_wait);
245
246 /* Returns true if we are the first waiter in the queue, false otherwise. */
247 bool
prepare_to_wait_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)248 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
249 {
250 unsigned long flags;
251 bool was_empty = false;
252
253 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
254 spin_lock_irqsave(&wq_head->lock, flags);
255 if (list_empty(&wq_entry->entry)) {
256 was_empty = list_empty(&wq_head->head);
257 __add_wait_queue_entry_tail(wq_head, wq_entry);
258 }
259 set_current_state(state);
260 spin_unlock_irqrestore(&wq_head->lock, flags);
261 return was_empty;
262 }
263 EXPORT_SYMBOL(prepare_to_wait_exclusive);
264
init_wait_entry(struct wait_queue_entry * wq_entry,int flags)265 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
266 {
267 wq_entry->flags = flags;
268 wq_entry->private = current;
269 wq_entry->func = autoremove_wake_function;
270 INIT_LIST_HEAD(&wq_entry->entry);
271 }
272 EXPORT_SYMBOL(init_wait_entry);
273
prepare_to_wait_event(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)274 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
275 {
276 unsigned long flags;
277 long ret = 0;
278
279 spin_lock_irqsave(&wq_head->lock, flags);
280 if (signal_pending_state(state, current)) {
281 /*
282 * Exclusive waiter must not fail if it was selected by wakeup,
283 * it should "consume" the condition we were waiting for.
284 *
285 * The caller will recheck the condition and return success if
286 * we were already woken up, we can not miss the event because
287 * wakeup locks/unlocks the same wq_head->lock.
288 *
289 * But we need to ensure that set-condition + wakeup after that
290 * can't see us, it should wake up another exclusive waiter if
291 * we fail.
292 */
293 list_del_init(&wq_entry->entry);
294 ret = -ERESTARTSYS;
295 } else {
296 if (list_empty(&wq_entry->entry)) {
297 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
298 __add_wait_queue_entry_tail(wq_head, wq_entry);
299 else
300 __add_wait_queue(wq_head, wq_entry);
301 }
302 set_current_state(state);
303 }
304 spin_unlock_irqrestore(&wq_head->lock, flags);
305
306 return ret;
307 }
308 EXPORT_SYMBOL(prepare_to_wait_event);
309
310 /*
311 * Note! These two wait functions are entered with the
312 * wait-queue lock held (and interrupts off in the _irq
313 * case), so there is no race with testing the wakeup
314 * condition in the caller before they add the wait
315 * entry to the wake queue.
316 */
do_wait_intr(wait_queue_head_t * wq,wait_queue_entry_t * wait)317 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
318 {
319 if (likely(list_empty(&wait->entry)))
320 __add_wait_queue_entry_tail(wq, wait);
321
322 set_current_state(TASK_INTERRUPTIBLE);
323 if (signal_pending(current))
324 return -ERESTARTSYS;
325
326 spin_unlock(&wq->lock);
327 schedule();
328 spin_lock(&wq->lock);
329
330 return 0;
331 }
332 EXPORT_SYMBOL(do_wait_intr);
333
do_wait_intr_irq(wait_queue_head_t * wq,wait_queue_entry_t * wait)334 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
335 {
336 if (likely(list_empty(&wait->entry)))
337 __add_wait_queue_entry_tail(wq, wait);
338
339 set_current_state(TASK_INTERRUPTIBLE);
340 if (signal_pending(current))
341 return -ERESTARTSYS;
342
343 spin_unlock_irq(&wq->lock);
344 schedule();
345 spin_lock_irq(&wq->lock);
346
347 return 0;
348 }
349 EXPORT_SYMBOL(do_wait_intr_irq);
350
351 /**
352 * finish_wait - clean up after waiting in a queue
353 * @wq_head: waitqueue waited on
354 * @wq_entry: wait descriptor
355 *
356 * Sets current thread back to running state and removes
357 * the wait descriptor from the given waitqueue if still
358 * queued.
359 */
finish_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)360 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
361 {
362 unsigned long flags;
363
364 __set_current_state(TASK_RUNNING);
365 /*
366 * We can check for list emptiness outside the lock
367 * IFF:
368 * - we use the "careful" check that verifies both
369 * the next and prev pointers, so that there cannot
370 * be any half-pending updates in progress on other
371 * CPU's that we haven't seen yet (and that might
372 * still change the stack area.
373 * and
374 * - all other users take the lock (ie we can only
375 * have _one_ other CPU that looks at or modifies
376 * the list).
377 */
378 if (!list_empty_careful(&wq_entry->entry)) {
379 spin_lock_irqsave(&wq_head->lock, flags);
380 list_del_init(&wq_entry->entry);
381 spin_unlock_irqrestore(&wq_head->lock, flags);
382 }
383 }
384 EXPORT_SYMBOL(finish_wait);
385
autoremove_wake_function(struct wait_queue_entry * wq_entry,unsigned mode,int sync,void * key)386 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
387 {
388 int ret = default_wake_function(wq_entry, mode, sync, key);
389
390 if (ret)
391 list_del_init_careful(&wq_entry->entry);
392
393 return ret;
394 }
395 EXPORT_SYMBOL(autoremove_wake_function);
396
397 /*
398 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
399 *
400 * add_wait_queue(&wq_head, &wait);
401 * for (;;) {
402 * if (condition)
403 * break;
404 *
405 * // in wait_woken() // in woken_wake_function()
406 *
407 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
408 * smp_mb(); // A try_to_wake_up():
409 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
410 * schedule() if (p->state & mode)
411 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
412 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
413 * smp_mb(); // B condition = true;
414 * } smp_mb(); // C
415 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
416 */
wait_woken(struct wait_queue_entry * wq_entry,unsigned mode,long timeout)417 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
418 {
419 /*
420 * The below executes an smp_mb(), which matches with the full barrier
421 * executed by the try_to_wake_up() in woken_wake_function() such that
422 * either we see the store to wq_entry->flags in woken_wake_function()
423 * or woken_wake_function() sees our store to current->state.
424 */
425 set_current_state(mode); /* A */
426 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !kthread_should_stop_or_park())
427 timeout = schedule_timeout(timeout);
428 __set_current_state(TASK_RUNNING);
429
430 /*
431 * The below executes an smp_mb(), which matches with the smp_mb() (C)
432 * in woken_wake_function() such that either we see the wait condition
433 * being true or the store to wq_entry->flags in woken_wake_function()
434 * follows ours in the coherence order.
435 */
436 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
437
438 return timeout;
439 }
440 EXPORT_SYMBOL(wait_woken);
441
woken_wake_function(struct wait_queue_entry * wq_entry,unsigned mode,int sync,void * key)442 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
443 {
444 /* Pairs with the smp_store_mb() in wait_woken(). */
445 smp_mb(); /* C */
446 wq_entry->flags |= WQ_FLAG_WOKEN;
447
448 return default_wake_function(wq_entry, mode, sync, key);
449 }
450 EXPORT_SYMBOL(woken_wake_function);
451