• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic waiting primitives.
4  *
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  */
7 #include "sched.h"
8 #include <trace/hooks/sched.h>
9 
__init_waitqueue_head(struct wait_queue_head * wq_head,const char * name,struct lock_class_key * key)10 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
11 {
12 	spin_lock_init(&wq_head->lock);
13 	lockdep_set_class_and_name(&wq_head->lock, key, name);
14 	INIT_LIST_HEAD(&wq_head->head);
15 }
16 
17 EXPORT_SYMBOL(__init_waitqueue_head);
18 
add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)19 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
20 {
21 	unsigned long flags;
22 
23 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
24 	spin_lock_irqsave(&wq_head->lock, flags);
25 	__add_wait_queue(wq_head, wq_entry);
26 	spin_unlock_irqrestore(&wq_head->lock, flags);
27 }
28 EXPORT_SYMBOL(add_wait_queue);
29 
add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)30 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
31 {
32 	unsigned long flags;
33 
34 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
35 	spin_lock_irqsave(&wq_head->lock, flags);
36 	__add_wait_queue_entry_tail(wq_head, wq_entry);
37 	spin_unlock_irqrestore(&wq_head->lock, flags);
38 }
39 EXPORT_SYMBOL(add_wait_queue_exclusive);
40 
remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)41 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
42 {
43 	unsigned long flags;
44 
45 	spin_lock_irqsave(&wq_head->lock, flags);
46 	__remove_wait_queue(wq_head, wq_entry);
47 	spin_unlock_irqrestore(&wq_head->lock, flags);
48 }
49 EXPORT_SYMBOL(remove_wait_queue);
50 
51 /*
52  * Scan threshold to break wait queue walk.
53  * This allows a waker to take a break from holding the
54  * wait queue lock during the wait queue walk.
55  */
56 #define WAITQUEUE_WALK_BREAK_CNT 64
57 
58 /*
59  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
60  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
61  * number) then we wake all the non-exclusive tasks and one exclusive task.
62  *
63  * There are circumstances in which we can try to wake a task which has already
64  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
65  * zero in this (rare) case, and we handle it by continuing to scan the queue.
66  */
__wake_up_common(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key,wait_queue_entry_t * bookmark)67 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
68 			int nr_exclusive, int wake_flags, void *key,
69 			wait_queue_entry_t *bookmark)
70 {
71 	wait_queue_entry_t *curr, *next;
72 	int cnt = 0;
73 
74 	lockdep_assert_held(&wq_head->lock);
75 
76 	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
77 		curr = list_next_entry(bookmark, entry);
78 
79 		list_del(&bookmark->entry);
80 		bookmark->flags = 0;
81 	} else
82 		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
83 
84 	if (&curr->entry == &wq_head->head)
85 		return nr_exclusive;
86 
87 	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
88 		unsigned flags = curr->flags;
89 		int ret;
90 
91 		if (flags & WQ_FLAG_BOOKMARK)
92 			continue;
93 
94 		ret = curr->func(curr, mode, wake_flags, key);
95 		if (ret < 0)
96 			break;
97 		if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
98 			break;
99 
100 		if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
101 				(&next->entry != &wq_head->head)) {
102 			bookmark->flags = WQ_FLAG_BOOKMARK;
103 			list_add_tail(&bookmark->entry, &next->entry);
104 			break;
105 		}
106 	}
107 
108 	return nr_exclusive;
109 }
110 
__wake_up_common_lock(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key)111 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
112 			int nr_exclusive, int wake_flags, void *key)
113 {
114 	unsigned long flags;
115 	wait_queue_entry_t bookmark;
116 
117 	bookmark.flags = 0;
118 	bookmark.private = NULL;
119 	bookmark.func = NULL;
120 	INIT_LIST_HEAD(&bookmark.entry);
121 
122 	do {
123 		spin_lock_irqsave(&wq_head->lock, flags);
124 		nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
125 						wake_flags, key, &bookmark);
126 		spin_unlock_irqrestore(&wq_head->lock, flags);
127 	} while (bookmark.flags & WQ_FLAG_BOOKMARK);
128 }
129 
130 /**
131  * __wake_up - wake up threads blocked on a waitqueue.
132  * @wq_head: the waitqueue
133  * @mode: which threads
134  * @nr_exclusive: how many wake-one or wake-many threads to wake up
135  * @key: is directly passed to the wakeup function
136  *
137  * If this function wakes up a task, it executes a full memory barrier before
138  * accessing the task state.
139  */
__wake_up(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,void * key)140 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
141 			int nr_exclusive, void *key)
142 {
143 	__wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
144 }
145 EXPORT_SYMBOL(__wake_up);
146 
147 /*
148  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
149  */
__wake_up_locked(struct wait_queue_head * wq_head,unsigned int mode,int nr)150 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
151 {
152 	__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
153 }
154 EXPORT_SYMBOL_GPL(__wake_up_locked);
155 
__wake_up_locked_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)156 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
157 {
158 	__wake_up_common(wq_head, mode, 1, 0, key, NULL);
159 }
160 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
161 
__wake_up_locked_key_bookmark(struct wait_queue_head * wq_head,unsigned int mode,void * key,wait_queue_entry_t * bookmark)162 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
163 		unsigned int mode, void *key, wait_queue_entry_t *bookmark)
164 {
165 	__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
166 }
167 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
168 
169 /**
170  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
171  * @wq_head: the waitqueue
172  * @mode: which threads
173  * @key: opaque value to be passed to wakeup targets
174  *
175  * The sync wakeup differs that the waker knows that it will schedule
176  * away soon, so while the target thread will be woken up, it will not
177  * be migrated to another CPU - ie. the two threads are 'synchronized'
178  * with each other. This can prevent needless bouncing between CPUs.
179  *
180  * On UP it can prevent extra preemption.
181  *
182  * If this function wakes up a task, it executes a full memory barrier before
183  * accessing the task state.
184  */
__wake_up_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)185 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
186 			void *key)
187 {
188 	int wake_flags = WF_SYNC;
189 
190 	if (unlikely(!wq_head))
191 		return;
192 
193 	trace_android_vh_set_wake_flags(&wake_flags, &mode);
194 	__wake_up_common_lock(wq_head, mode, 1, wake_flags, key);
195 }
196 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
197 
198 /**
199  * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
200  * @wq_head: the waitqueue
201  * @mode: which threads
202  * @key: opaque value to be passed to wakeup targets
203  *
204  * The sync wakeup differs in that the waker knows that it will schedule
205  * away soon, so while the target thread will be woken up, it will not
206  * be migrated to another CPU - ie. the two threads are 'synchronized'
207  * with each other. This can prevent needless bouncing between CPUs.
208  *
209  * On UP it can prevent extra preemption.
210  *
211  * If this function wakes up a task, it executes a full memory barrier before
212  * accessing the task state.
213  */
__wake_up_locked_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)214 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
215 			       unsigned int mode, void *key)
216 {
217         __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
218 }
219 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
220 
221 /*
222  * __wake_up_sync - see __wake_up_sync_key()
223  */
__wake_up_sync(struct wait_queue_head * wq_head,unsigned int mode)224 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
225 {
226 	__wake_up_sync_key(wq_head, mode, NULL);
227 }
228 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
229 
__wake_up_pollfree(struct wait_queue_head * wq_head)230 void __wake_up_pollfree(struct wait_queue_head *wq_head)
231 {
232 	__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
233 	/* POLLFREE must have cleared the queue. */
234 	WARN_ON_ONCE(waitqueue_active(wq_head));
235 }
236 
237 /*
238  * Note: we use "set_current_state()" _after_ the wait-queue add,
239  * because we need a memory barrier there on SMP, so that any
240  * wake-function that tests for the wait-queue being active
241  * will be guaranteed to see waitqueue addition _or_ subsequent
242  * tests in this thread will see the wakeup having taken place.
243  *
244  * The spin_unlock() itself is semi-permeable and only protects
245  * one way (it only protects stuff inside the critical region and
246  * stops them from bleeding out - it would still allow subsequent
247  * loads to move into the critical region).
248  */
249 void
prepare_to_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)250 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
251 {
252 	unsigned long flags;
253 
254 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
255 	spin_lock_irqsave(&wq_head->lock, flags);
256 	if (list_empty(&wq_entry->entry))
257 		__add_wait_queue(wq_head, wq_entry);
258 	set_current_state(state);
259 	spin_unlock_irqrestore(&wq_head->lock, flags);
260 }
261 EXPORT_SYMBOL(prepare_to_wait);
262 
263 /* Returns true if we are the first waiter in the queue, false otherwise. */
264 bool
prepare_to_wait_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)265 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
266 {
267 	unsigned long flags;
268 	bool was_empty = false;
269 
270 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
271 	spin_lock_irqsave(&wq_head->lock, flags);
272 	if (list_empty(&wq_entry->entry)) {
273 		was_empty = list_empty(&wq_head->head);
274 		__add_wait_queue_entry_tail(wq_head, wq_entry);
275 	}
276 	set_current_state(state);
277 	spin_unlock_irqrestore(&wq_head->lock, flags);
278 	return was_empty;
279 }
280 EXPORT_SYMBOL(prepare_to_wait_exclusive);
281 
init_wait_entry(struct wait_queue_entry * wq_entry,int flags)282 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
283 {
284 	wq_entry->flags = flags;
285 	wq_entry->private = current;
286 	wq_entry->func = autoremove_wake_function;
287 	INIT_LIST_HEAD(&wq_entry->entry);
288 }
289 EXPORT_SYMBOL(init_wait_entry);
290 
prepare_to_wait_event(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)291 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
292 {
293 	unsigned long flags;
294 	long ret = 0;
295 
296 	spin_lock_irqsave(&wq_head->lock, flags);
297 	if (signal_pending_state(state, current)) {
298 		/*
299 		 * Exclusive waiter must not fail if it was selected by wakeup,
300 		 * it should "consume" the condition we were waiting for.
301 		 *
302 		 * The caller will recheck the condition and return success if
303 		 * we were already woken up, we can not miss the event because
304 		 * wakeup locks/unlocks the same wq_head->lock.
305 		 *
306 		 * But we need to ensure that set-condition + wakeup after that
307 		 * can't see us, it should wake up another exclusive waiter if
308 		 * we fail.
309 		 */
310 		list_del_init(&wq_entry->entry);
311 		ret = -ERESTARTSYS;
312 	} else {
313 		if (list_empty(&wq_entry->entry)) {
314 			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
315 				__add_wait_queue_entry_tail(wq_head, wq_entry);
316 			else
317 				__add_wait_queue(wq_head, wq_entry);
318 		}
319 		set_current_state(state);
320 	}
321 	spin_unlock_irqrestore(&wq_head->lock, flags);
322 
323 	return ret;
324 }
325 EXPORT_SYMBOL(prepare_to_wait_event);
326 
327 /*
328  * Note! These two wait functions are entered with the
329  * wait-queue lock held (and interrupts off in the _irq
330  * case), so there is no race with testing the wakeup
331  * condition in the caller before they add the wait
332  * entry to the wake queue.
333  */
do_wait_intr(wait_queue_head_t * wq,wait_queue_entry_t * wait)334 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
335 {
336 	if (likely(list_empty(&wait->entry)))
337 		__add_wait_queue_entry_tail(wq, wait);
338 
339 	set_current_state(TASK_INTERRUPTIBLE);
340 	if (signal_pending(current))
341 		return -ERESTARTSYS;
342 
343 	spin_unlock(&wq->lock);
344 	schedule();
345 	spin_lock(&wq->lock);
346 
347 	return 0;
348 }
349 EXPORT_SYMBOL(do_wait_intr);
350 
do_wait_intr_irq(wait_queue_head_t * wq,wait_queue_entry_t * wait)351 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
352 {
353 	if (likely(list_empty(&wait->entry)))
354 		__add_wait_queue_entry_tail(wq, wait);
355 
356 	set_current_state(TASK_INTERRUPTIBLE);
357 	if (signal_pending(current))
358 		return -ERESTARTSYS;
359 
360 	spin_unlock_irq(&wq->lock);
361 	schedule();
362 	spin_lock_irq(&wq->lock);
363 
364 	return 0;
365 }
366 EXPORT_SYMBOL(do_wait_intr_irq);
367 
368 /**
369  * finish_wait - clean up after waiting in a queue
370  * @wq_head: waitqueue waited on
371  * @wq_entry: wait descriptor
372  *
373  * Sets current thread back to running state and removes
374  * the wait descriptor from the given waitqueue if still
375  * queued.
376  */
finish_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)377 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
378 {
379 	unsigned long flags;
380 
381 	__set_current_state(TASK_RUNNING);
382 	/*
383 	 * We can check for list emptiness outside the lock
384 	 * IFF:
385 	 *  - we use the "careful" check that verifies both
386 	 *    the next and prev pointers, so that there cannot
387 	 *    be any half-pending updates in progress on other
388 	 *    CPU's that we haven't seen yet (and that might
389 	 *    still change the stack area.
390 	 * and
391 	 *  - all other users take the lock (ie we can only
392 	 *    have _one_ other CPU that looks at or modifies
393 	 *    the list).
394 	 */
395 	if (!list_empty_careful(&wq_entry->entry)) {
396 		spin_lock_irqsave(&wq_head->lock, flags);
397 		list_del_init(&wq_entry->entry);
398 		spin_unlock_irqrestore(&wq_head->lock, flags);
399 	}
400 }
401 EXPORT_SYMBOL(finish_wait);
402 
autoremove_wake_function(struct wait_queue_entry * wq_entry,unsigned int mode,int sync,void * key)403 __sched int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
404 				     int sync, void *key)
405 {
406 	int ret = default_wake_function(wq_entry, mode, sync, key);
407 
408 	if (ret)
409 		list_del_init_careful(&wq_entry->entry);
410 
411 	return ret;
412 }
413 EXPORT_SYMBOL(autoremove_wake_function);
414 
is_kthread_should_stop(void)415 static inline bool is_kthread_should_stop(void)
416 {
417 	return (current->flags & PF_KTHREAD) && kthread_should_stop();
418 }
419 
420 /*
421  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
422  *
423  * add_wait_queue(&wq_head, &wait);
424  * for (;;) {
425  *     if (condition)
426  *         break;
427  *
428  *     // in wait_woken()			// in woken_wake_function()
429  *
430  *     p->state = mode;				wq_entry->flags |= WQ_FLAG_WOKEN;
431  *     smp_mb(); // A				try_to_wake_up():
432  *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))	   <full barrier>
433  *         schedule()				   if (p->state & mode)
434  *     p->state = TASK_RUNNING;			      p->state = TASK_RUNNING;
435  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;	~~~~~~~~~~~~~~~~~~
436  *     smp_mb(); // B				condition = true;
437  * }						smp_mb(); // C
438  * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
439  */
wait_woken(struct wait_queue_entry * wq_entry,unsigned int mode,long timeout)440 __sched long wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, long timeout)
441 {
442 	/*
443 	 * The below executes an smp_mb(), which matches with the full barrier
444 	 * executed by the try_to_wake_up() in woken_wake_function() such that
445 	 * either we see the store to wq_entry->flags in woken_wake_function()
446 	 * or woken_wake_function() sees our store to current->state.
447 	 */
448 	set_current_state(mode); /* A */
449 	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
450 		timeout = schedule_timeout(timeout);
451 	__set_current_state(TASK_RUNNING);
452 
453 	/*
454 	 * The below executes an smp_mb(), which matches with the smp_mb() (C)
455 	 * in woken_wake_function() such that either we see the wait condition
456 	 * being true or the store to wq_entry->flags in woken_wake_function()
457 	 * follows ours in the coherence order.
458 	 */
459 	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
460 
461 	return timeout;
462 }
463 EXPORT_SYMBOL(wait_woken);
464 
woken_wake_function(struct wait_queue_entry * wq_entry,unsigned int mode,int sync,void * key)465 __sched int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
466 				int sync, void *key)
467 {
468 	/* Pairs with the smp_store_mb() in wait_woken(). */
469 	smp_mb(); /* C */
470 	wq_entry->flags |= WQ_FLAG_WOKEN;
471 
472 	return default_wake_function(wq_entry, mode, sync, key);
473 }
474 EXPORT_SYMBOL(woken_wake_function);
475