• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/locking/mutex.c
4  *
5  * Mutexes: blocking mutual exclusion locks
6  *
7  * Started by Ingo Molnar:
8  *
9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10  *
11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12  * David Howells for suggestions and improvements.
13  *
14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15  *    from the -rt tree, where it was originally implemented for rtmutexes
16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17  *    and Sven Dietrich.
18  *
19  * Also see Documentation/locking/mutex-design.rst.
20  */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32 
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 #else
36 # include "mutex.h"
37 #endif
38 
39 #include <trace/hooks/dtask.h>
40 
41 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)42 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
43 {
44 	atomic_long_set(&lock->owner, 0);
45 	spin_lock_init(&lock->wait_lock);
46 	INIT_LIST_HEAD(&lock->wait_list);
47 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
48 	osq_lock_init(&lock->osq);
49 #endif
50 
51 	debug_mutex_init(lock, name, key);
52 }
53 EXPORT_SYMBOL(__mutex_init);
54 
55 /*
56  * @owner: contains: 'struct task_struct *' to the current lock owner,
57  * NULL means not owned. Since task_struct pointers are aligned at
58  * at least L1_CACHE_BYTES, we have low bits to store extra state.
59  *
60  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
61  * Bit1 indicates unlock needs to hand the lock to the top-waiter
62  * Bit2 indicates handoff has been done and we're waiting for pickup.
63  */
64 #define MUTEX_FLAG_WAITERS	0x01
65 #define MUTEX_FLAG_HANDOFF	0x02
66 #define MUTEX_FLAG_PICKUP	0x04
67 
68 #define MUTEX_FLAGS		0x07
69 
70 /*
71  * Internal helper function; C doesn't allow us to hide it :/
72  *
73  * DO NOT USE (outside of mutex code).
74  */
__mutex_owner(struct mutex * lock)75 static inline struct task_struct *__mutex_owner(struct mutex *lock)
76 {
77 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
78 }
79 
__owner_task(unsigned long owner)80 static inline struct task_struct *__owner_task(unsigned long owner)
81 {
82 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
83 }
84 
mutex_is_locked(struct mutex * lock)85 bool mutex_is_locked(struct mutex *lock)
86 {
87 	return __mutex_owner(lock) != NULL;
88 }
89 EXPORT_SYMBOL(mutex_is_locked);
90 
91 __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex * lock)92 mutex_trylock_recursive(struct mutex *lock)
93 {
94 	if (unlikely(__mutex_owner(lock) == current))
95 		return MUTEX_TRYLOCK_RECURSIVE;
96 
97 	return mutex_trylock(lock);
98 }
99 EXPORT_SYMBOL(mutex_trylock_recursive);
100 
__owner_flags(unsigned long owner)101 static inline unsigned long __owner_flags(unsigned long owner)
102 {
103 	return owner & MUTEX_FLAGS;
104 }
105 
106 /*
107  * Trylock variant that retuns the owning task on failure.
108  */
__mutex_trylock_or_owner(struct mutex * lock)109 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
110 {
111 	unsigned long owner, curr = (unsigned long)current;
112 
113 	owner = atomic_long_read(&lock->owner);
114 	for (;;) { /* must loop, can race against a flag */
115 		unsigned long old, flags = __owner_flags(owner);
116 		unsigned long task = owner & ~MUTEX_FLAGS;
117 
118 		if (task) {
119 			if (likely(task != curr))
120 				break;
121 
122 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
123 				break;
124 
125 			flags &= ~MUTEX_FLAG_PICKUP;
126 		} else {
127 #ifdef CONFIG_DEBUG_MUTEXES
128 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
129 #endif
130 		}
131 
132 		/*
133 		 * We set the HANDOFF bit, we must make sure it doesn't live
134 		 * past the point where we acquire it. This would be possible
135 		 * if we (accidentally) set the bit on an unlocked mutex.
136 		 */
137 		flags &= ~MUTEX_FLAG_HANDOFF;
138 
139 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
140 		if (old == owner)
141 			return NULL;
142 
143 		owner = old;
144 	}
145 
146 	return __owner_task(owner);
147 }
148 
149 /*
150  * Actual trylock that will work on any unlocked state.
151  */
__mutex_trylock(struct mutex * lock)152 static inline bool __mutex_trylock(struct mutex *lock)
153 {
154 	return !__mutex_trylock_or_owner(lock);
155 }
156 
157 #ifndef CONFIG_DEBUG_LOCK_ALLOC
158 /*
159  * Lockdep annotations are contained to the slow paths for simplicity.
160  * There is nothing that would stop spreading the lockdep annotations outwards
161  * except more code.
162  */
163 
164 /*
165  * Optimistic trylock that only works in the uncontended case. Make sure to
166  * follow with a __mutex_trylock() before failing.
167  */
__mutex_trylock_fast(struct mutex * lock)168 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
169 {
170 	unsigned long curr = (unsigned long)current;
171 	unsigned long zero = 0UL;
172 
173 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
174 		return true;
175 
176 	return false;
177 }
178 
__mutex_unlock_fast(struct mutex * lock)179 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
180 {
181 	unsigned long curr = (unsigned long)current;
182 
183 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
184 		return true;
185 
186 	return false;
187 }
188 #endif
189 
__mutex_set_flag(struct mutex * lock,unsigned long flag)190 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
191 {
192 	atomic_long_or(flag, &lock->owner);
193 }
194 
__mutex_clear_flag(struct mutex * lock,unsigned long flag)195 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
196 {
197 	atomic_long_andnot(flag, &lock->owner);
198 }
199 
__mutex_waiter_is_first(struct mutex * lock,struct mutex_waiter * waiter)200 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
201 {
202 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
203 }
204 
205 /*
206  * Add @waiter to a given location in the lock wait_list and set the
207  * FLAG_WAITERS flag if it's the first waiter.
208  */
209 static void
__mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct list_head * list)210 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
211 		   struct list_head *list)
212 {
213 	debug_mutex_add_waiter(lock, waiter, current);
214 
215 	list_add_tail(&waiter->list, list);
216 	if (__mutex_waiter_is_first(lock, waiter))
217 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
218 }
219 
220 static void
__mutex_remove_waiter(struct mutex * lock,struct mutex_waiter * waiter)221 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
222 {
223 	list_del(&waiter->list);
224 	if (likely(list_empty(&lock->wait_list)))
225 		__mutex_clear_flag(lock, MUTEX_FLAGS);
226 
227 	debug_mutex_remove_waiter(lock, waiter, current);
228 }
229 
230 /*
231  * Give up ownership to a specific task, when @task = NULL, this is equivalent
232  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
233  * WAITERS. Provides RELEASE semantics like a regular unlock, the
234  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
235  */
__mutex_handoff(struct mutex * lock,struct task_struct * task)236 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
237 {
238 	unsigned long owner = atomic_long_read(&lock->owner);
239 
240 	for (;;) {
241 		unsigned long old, new;
242 
243 #ifdef CONFIG_DEBUG_MUTEXES
244 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
245 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
246 #endif
247 
248 		new = (owner & MUTEX_FLAG_WAITERS);
249 		new |= (unsigned long)task;
250 		if (task)
251 			new |= MUTEX_FLAG_PICKUP;
252 
253 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
254 		if (old == owner)
255 			break;
256 
257 		owner = old;
258 	}
259 }
260 
261 #ifndef CONFIG_DEBUG_LOCK_ALLOC
262 /*
263  * We split the mutex lock/unlock logic into separate fastpath and
264  * slowpath functions, to reduce the register pressure on the fastpath.
265  * We also put the fastpath first in the kernel image, to make sure the
266  * branch is predicted by the CPU as default-untaken.
267  */
268 static void __sched __mutex_lock_slowpath(struct mutex *lock);
269 
270 /**
271  * mutex_lock - acquire the mutex
272  * @lock: the mutex to be acquired
273  *
274  * Lock the mutex exclusively for this task. If the mutex is not
275  * available right now, it will sleep until it can get it.
276  *
277  * The mutex must later on be released by the same task that
278  * acquired it. Recursive locking is not allowed. The task
279  * may not exit without first unlocking the mutex. Also, kernel
280  * memory where the mutex resides must not be freed with
281  * the mutex still locked. The mutex must first be initialized
282  * (or statically defined) before it can be locked. memset()-ing
283  * the mutex to 0 is not allowed.
284  *
285  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
286  * checks that will enforce the restrictions and will also do
287  * deadlock debugging)
288  *
289  * This function is similar to (but not equivalent to) down().
290  */
mutex_lock(struct mutex * lock)291 void __sched mutex_lock(struct mutex *lock)
292 {
293 	might_sleep();
294 
295 	if (!__mutex_trylock_fast(lock))
296 		__mutex_lock_slowpath(lock);
297 }
298 EXPORT_SYMBOL(mutex_lock);
299 #endif
300 
301 /*
302  * Wait-Die:
303  *   The newer transactions are killed when:
304  *     It (the new transaction) makes a request for a lock being held
305  *     by an older transaction.
306  *
307  * Wound-Wait:
308  *   The newer transactions are wounded when:
309  *     An older transaction makes a request for a lock being held by
310  *     the newer transaction.
311  */
312 
313 /*
314  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
315  * it.
316  */
317 static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)318 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
319 {
320 #ifdef CONFIG_DEBUG_MUTEXES
321 	/*
322 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
323 	 * but released with a normal mutex_unlock in this call.
324 	 *
325 	 * This should never happen, always use ww_mutex_unlock.
326 	 */
327 	DEBUG_LOCKS_WARN_ON(ww->ctx);
328 
329 	/*
330 	 * Not quite done after calling ww_acquire_done() ?
331 	 */
332 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
333 
334 	if (ww_ctx->contending_lock) {
335 		/*
336 		 * After -EDEADLK you tried to
337 		 * acquire a different ww_mutex? Bad!
338 		 */
339 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
340 
341 		/*
342 		 * You called ww_mutex_lock after receiving -EDEADLK,
343 		 * but 'forgot' to unlock everything else first?
344 		 */
345 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
346 		ww_ctx->contending_lock = NULL;
347 	}
348 
349 	/*
350 	 * Naughty, using a different class will lead to undefined behavior!
351 	 */
352 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
353 #endif
354 	ww_ctx->acquired++;
355 	ww->ctx = ww_ctx;
356 }
357 
358 /*
359  * Determine if context @a is 'after' context @b. IOW, @a is a younger
360  * transaction than @b and depending on algorithm either needs to wait for
361  * @b or die.
362  */
363 static inline bool __sched
__ww_ctx_stamp_after(struct ww_acquire_ctx * a,struct ww_acquire_ctx * b)364 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
365 {
366 
367 	return (signed long)(a->stamp - b->stamp) > 0;
368 }
369 
370 /*
371  * Wait-Die; wake a younger waiter context (when locks held) such that it can
372  * die.
373  *
374  * Among waiters with context, only the first one can have other locks acquired
375  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
376  * __ww_mutex_check_kill() wake any but the earliest context.
377  */
378 static bool __sched
__ww_mutex_die(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ww_ctx)379 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
380 	       struct ww_acquire_ctx *ww_ctx)
381 {
382 	if (!ww_ctx->is_wait_die)
383 		return false;
384 
385 	if (waiter->ww_ctx->acquired > 0 &&
386 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
387 		debug_mutex_wake_waiter(lock, waiter);
388 		wake_up_process(waiter->task);
389 	}
390 
391 	return true;
392 }
393 
394 /*
395  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
396  *
397  * Wound the lock holder if there are waiters with older transactions than
398  * the lock holders. Even if multiple waiters may wound the lock holder,
399  * it's sufficient that only one does.
400  */
__ww_mutex_wound(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct ww_acquire_ctx * hold_ctx)401 static bool __ww_mutex_wound(struct mutex *lock,
402 			     struct ww_acquire_ctx *ww_ctx,
403 			     struct ww_acquire_ctx *hold_ctx)
404 {
405 	struct task_struct *owner = __mutex_owner(lock);
406 
407 	lockdep_assert_held(&lock->wait_lock);
408 
409 	/*
410 	 * Possible through __ww_mutex_add_waiter() when we race with
411 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
412 	 * through __ww_mutex_check_waiters().
413 	 */
414 	if (!hold_ctx)
415 		return false;
416 
417 	/*
418 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
419 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
420 	 * wait_lock.
421 	 */
422 	if (!owner)
423 		return false;
424 
425 	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
426 		hold_ctx->wounded = 1;
427 
428 		/*
429 		 * wake_up_process() paired with set_current_state()
430 		 * inserts sufficient barriers to make sure @owner either sees
431 		 * it's wounded in __ww_mutex_check_kill() or has a
432 		 * wakeup pending to re-read the wounded state.
433 		 */
434 		if (owner != current)
435 			wake_up_process(owner);
436 
437 		return true;
438 	}
439 
440 	return false;
441 }
442 
443 /*
444  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
445  * behind us on the wait-list, check if they need to die, or wound us.
446  *
447  * See __ww_mutex_add_waiter() for the list-order construction; basically the
448  * list is ordered by stamp, smallest (oldest) first.
449  *
450  * This relies on never mixing wait-die/wound-wait on the same wait-list;
451  * which is currently ensured by that being a ww_class property.
452  *
453  * The current task must not be on the wait list.
454  */
455 static void __sched
__ww_mutex_check_waiters(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)456 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
457 {
458 	struct mutex_waiter *cur;
459 
460 	lockdep_assert_held(&lock->wait_lock);
461 
462 	list_for_each_entry(cur, &lock->wait_list, list) {
463 		if (!cur->ww_ctx)
464 			continue;
465 
466 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
467 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
468 			break;
469 	}
470 }
471 
472 /*
473  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
474  * and wake up any waiters so they can recheck.
475  */
476 static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)477 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
478 {
479 	ww_mutex_lock_acquired(lock, ctx);
480 
481 	/*
482 	 * The lock->ctx update should be visible on all cores before
483 	 * the WAITERS check is done, otherwise contended waiters might be
484 	 * missed. The contended waiters will either see ww_ctx == NULL
485 	 * and keep spinning, or it will acquire wait_lock, add itself
486 	 * to waiter list and sleep.
487 	 */
488 	smp_mb(); /* See comments above and below. */
489 
490 	/*
491 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
492 	 *     MB		        MB
493 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
494 	 *
495 	 * The memory barrier above pairs with the memory barrier in
496 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
497 	 * and/or !empty list.
498 	 */
499 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
500 		return;
501 
502 	/*
503 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
504 	 * die or wound us.
505 	 */
506 	spin_lock(&lock->base.wait_lock);
507 	__ww_mutex_check_waiters(&lock->base, ctx);
508 	spin_unlock(&lock->base.wait_lock);
509 }
510 
511 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
512 
513 static inline
ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)514 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
515 			    struct mutex_waiter *waiter)
516 {
517 	struct ww_mutex *ww;
518 
519 	ww = container_of(lock, struct ww_mutex, base);
520 
521 	/*
522 	 * If ww->ctx is set the contents are undefined, only
523 	 * by acquiring wait_lock there is a guarantee that
524 	 * they are not invalid when reading.
525 	 *
526 	 * As such, when deadlock detection needs to be
527 	 * performed the optimistic spinning cannot be done.
528 	 *
529 	 * Check this in every inner iteration because we may
530 	 * be racing against another thread's ww_mutex_lock.
531 	 */
532 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
533 		return false;
534 
535 	/*
536 	 * If we aren't on the wait list yet, cancel the spin
537 	 * if there are waiters. We want  to avoid stealing the
538 	 * lock from a waiter with an earlier stamp, since the
539 	 * other thread may already own a lock that we also
540 	 * need.
541 	 */
542 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
543 		return false;
544 
545 	/*
546 	 * Similarly, stop spinning if we are no longer the
547 	 * first waiter.
548 	 */
549 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
550 		return false;
551 
552 	return true;
553 }
554 
555 /*
556  * Look out! "owner" is an entirely speculative pointer access and not
557  * reliable.
558  *
559  * "noinline" so that this function shows up on perf profiles.
560  */
561 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)562 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
563 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
564 {
565 	bool ret = true;
566 
567 	rcu_read_lock();
568 	while (__mutex_owner(lock) == owner) {
569 		/*
570 		 * Ensure we emit the owner->on_cpu, dereference _after_
571 		 * checking lock->owner still matches owner. If that fails,
572 		 * owner might point to freed memory. If it still matches,
573 		 * the rcu_read_lock() ensures the memory stays valid.
574 		 */
575 		barrier();
576 
577 		/*
578 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
579 		 */
580 		if (!owner->on_cpu || need_resched() ||
581 				vcpu_is_preempted(task_cpu(owner))) {
582 			ret = false;
583 			break;
584 		}
585 
586 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
587 			ret = false;
588 			break;
589 		}
590 
591 		cpu_relax();
592 	}
593 	rcu_read_unlock();
594 
595 	return ret;
596 }
597 
598 /*
599  * Initial check for entering the mutex spinning loop
600  */
mutex_can_spin_on_owner(struct mutex * lock)601 static inline int mutex_can_spin_on_owner(struct mutex *lock)
602 {
603 	struct task_struct *owner;
604 	int retval = 1;
605 
606 	if (need_resched())
607 		return 0;
608 
609 	rcu_read_lock();
610 	owner = __mutex_owner(lock);
611 
612 	/*
613 	 * As lock holder preemption issue, we both skip spinning if task is not
614 	 * on cpu or its cpu is preempted
615 	 */
616 	if (owner)
617 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
618 	rcu_read_unlock();
619 
620 	/*
621 	 * If lock->owner is not set, the mutex has been released. Return true
622 	 * such that we'll trylock in the spin path, which is a faster option
623 	 * than the blocking slow path.
624 	 */
625 	return retval;
626 }
627 
628 /*
629  * Optimistic spinning.
630  *
631  * We try to spin for acquisition when we find that the lock owner
632  * is currently running on a (different) CPU and while we don't
633  * need to reschedule. The rationale is that if the lock owner is
634  * running, it is likely to release the lock soon.
635  *
636  * The mutex spinners are queued up using MCS lock so that only one
637  * spinner can compete for the mutex. However, if mutex spinning isn't
638  * going to happen, there is no point in going through the lock/unlock
639  * overhead.
640  *
641  * Returns true when the lock was taken, otherwise false, indicating
642  * that we need to jump to the slowpath and sleep.
643  *
644  * The waiter flag is set to true if the spinner is a waiter in the wait
645  * queue. The waiter-spinner will spin on the lock directly and concurrently
646  * with the spinner at the head of the OSQ, if present, until the owner is
647  * changed to itself.
648  */
649 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)650 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
651 		      struct mutex_waiter *waiter)
652 {
653 	if (!waiter) {
654 		/*
655 		 * The purpose of the mutex_can_spin_on_owner() function is
656 		 * to eliminate the overhead of osq_lock() and osq_unlock()
657 		 * in case spinning isn't possible. As a waiter-spinner
658 		 * is not going to take OSQ lock anyway, there is no need
659 		 * to call mutex_can_spin_on_owner().
660 		 */
661 		if (!mutex_can_spin_on_owner(lock))
662 			goto fail;
663 
664 		/*
665 		 * In order to avoid a stampede of mutex spinners trying to
666 		 * acquire the mutex all at once, the spinners need to take a
667 		 * MCS (queued) lock first before spinning on the owner field.
668 		 */
669 		if (!osq_lock(&lock->osq))
670 			goto fail;
671 	}
672 
673 	for (;;) {
674 		struct task_struct *owner;
675 
676 		/* Try to acquire the mutex... */
677 		owner = __mutex_trylock_or_owner(lock);
678 		if (!owner)
679 			break;
680 
681 		/*
682 		 * There's an owner, wait for it to either
683 		 * release the lock or go to sleep.
684 		 */
685 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
686 			goto fail_unlock;
687 
688 		/*
689 		 * The cpu_relax() call is a compiler barrier which forces
690 		 * everything in this loop to be re-loaded. We don't need
691 		 * memory barriers as we'll eventually observe the right
692 		 * values at the cost of a few extra spins.
693 		 */
694 		cpu_relax();
695 	}
696 
697 	if (!waiter)
698 		osq_unlock(&lock->osq);
699 
700 	return true;
701 
702 
703 fail_unlock:
704 	if (!waiter)
705 		osq_unlock(&lock->osq);
706 
707 fail:
708 	/*
709 	 * If we fell out of the spin path because of need_resched(),
710 	 * reschedule now, before we try-lock the mutex. This avoids getting
711 	 * scheduled out right after we obtained the mutex.
712 	 */
713 	if (need_resched()) {
714 		/*
715 		 * We _should_ have TASK_RUNNING here, but just in case
716 		 * we do not, make it so, otherwise we might get stuck.
717 		 */
718 		__set_current_state(TASK_RUNNING);
719 		schedule_preempt_disabled();
720 	}
721 
722 	return false;
723 }
724 #else
725 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)726 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
727 		      struct mutex_waiter *waiter)
728 {
729 	return false;
730 }
731 #endif
732 
733 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
734 
735 /**
736  * mutex_unlock - release the mutex
737  * @lock: the mutex to be released
738  *
739  * Unlock a mutex that has been locked by this task previously.
740  *
741  * This function must not be used in interrupt context. Unlocking
742  * of a not locked mutex is not allowed.
743  *
744  * This function is similar to (but not equivalent to) up().
745  */
mutex_unlock(struct mutex * lock)746 void __sched mutex_unlock(struct mutex *lock)
747 {
748 #ifndef CONFIG_DEBUG_LOCK_ALLOC
749 	if (__mutex_unlock_fast(lock))
750 		return;
751 #endif
752 	__mutex_unlock_slowpath(lock, _RET_IP_);
753 }
754 EXPORT_SYMBOL(mutex_unlock);
755 
756 /**
757  * ww_mutex_unlock - release the w/w mutex
758  * @lock: the mutex to be released
759  *
760  * Unlock a mutex that has been locked by this task previously with any of the
761  * ww_mutex_lock* functions (with or without an acquire context). It is
762  * forbidden to release the locks after releasing the acquire context.
763  *
764  * This function must not be used in interrupt context. Unlocking
765  * of a unlocked mutex is not allowed.
766  */
ww_mutex_unlock(struct ww_mutex * lock)767 void __sched ww_mutex_unlock(struct ww_mutex *lock)
768 {
769 	/*
770 	 * The unlocking fastpath is the 0->1 transition from 'locked'
771 	 * into 'unlocked' state:
772 	 */
773 	if (lock->ctx) {
774 #ifdef CONFIG_DEBUG_MUTEXES
775 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
776 #endif
777 		if (lock->ctx->acquired > 0)
778 			lock->ctx->acquired--;
779 		lock->ctx = NULL;
780 	}
781 
782 	mutex_unlock(&lock->base);
783 }
784 EXPORT_SYMBOL(ww_mutex_unlock);
785 
786 
787 static __always_inline int __sched
__ww_mutex_kill(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)788 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
789 {
790 	if (ww_ctx->acquired > 0) {
791 #ifdef CONFIG_DEBUG_MUTEXES
792 		struct ww_mutex *ww;
793 
794 		ww = container_of(lock, struct ww_mutex, base);
795 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
796 		ww_ctx->contending_lock = ww;
797 #endif
798 		return -EDEADLK;
799 	}
800 
801 	return 0;
802 }
803 
804 
805 /*
806  * Check the wound condition for the current lock acquire.
807  *
808  * Wound-Wait: If we're wounded, kill ourself.
809  *
810  * Wait-Die: If we're trying to acquire a lock already held by an older
811  *           context, kill ourselves.
812  *
813  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
814  * look at waiters before us in the wait-list.
815  */
816 static inline int __sched
__ww_mutex_check_kill(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ctx)817 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
818 		      struct ww_acquire_ctx *ctx)
819 {
820 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
821 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
822 	struct mutex_waiter *cur;
823 
824 	if (ctx->acquired == 0)
825 		return 0;
826 
827 	if (!ctx->is_wait_die) {
828 		if (ctx->wounded)
829 			return __ww_mutex_kill(lock, ctx);
830 
831 		return 0;
832 	}
833 
834 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
835 		return __ww_mutex_kill(lock, ctx);
836 
837 	/*
838 	 * If there is a waiter in front of us that has a context, then its
839 	 * stamp is earlier than ours and we must kill ourself.
840 	 */
841 	cur = waiter;
842 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
843 		if (!cur->ww_ctx)
844 			continue;
845 
846 		return __ww_mutex_kill(lock, ctx);
847 	}
848 
849 	return 0;
850 }
851 
852 /*
853  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
854  * first. Such that older contexts are preferred to acquire the lock over
855  * younger contexts.
856  *
857  * Waiters without context are interspersed in FIFO order.
858  *
859  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
860  * older contexts already waiting) to avoid unnecessary waiting and for
861  * Wound-Wait ensure we wound the owning context when it is younger.
862  */
863 static inline int __sched
__ww_mutex_add_waiter(struct mutex_waiter * waiter,struct mutex * lock,struct ww_acquire_ctx * ww_ctx)864 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
865 		      struct mutex *lock,
866 		      struct ww_acquire_ctx *ww_ctx)
867 {
868 	struct mutex_waiter *cur;
869 	struct list_head *pos;
870 	bool is_wait_die;
871 
872 	if (!ww_ctx) {
873 		__mutex_add_waiter(lock, waiter, &lock->wait_list);
874 		return 0;
875 	}
876 
877 	is_wait_die = ww_ctx->is_wait_die;
878 
879 	/*
880 	 * Add the waiter before the first waiter with a higher stamp.
881 	 * Waiters without a context are skipped to avoid starving
882 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
883 	 * never die here, but they are sorted in stamp order and
884 	 * may wound the lock holder.
885 	 */
886 	pos = &lock->wait_list;
887 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
888 		if (!cur->ww_ctx)
889 			continue;
890 
891 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
892 			/*
893 			 * Wait-Die: if we find an older context waiting, there
894 			 * is no point in queueing behind it, as we'd have to
895 			 * die the moment it would acquire the lock.
896 			 */
897 			if (is_wait_die) {
898 				int ret = __ww_mutex_kill(lock, ww_ctx);
899 
900 				if (ret)
901 					return ret;
902 			}
903 
904 			break;
905 		}
906 
907 		pos = &cur->list;
908 
909 		/* Wait-Die: ensure younger waiters die. */
910 		__ww_mutex_die(lock, cur, ww_ctx);
911 	}
912 
913 	__mutex_add_waiter(lock, waiter, pos);
914 
915 	/*
916 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
917 	 * wound that such that we might proceed.
918 	 */
919 	if (!is_wait_die) {
920 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
921 
922 		/*
923 		 * See ww_mutex_set_context_fastpath(). Orders setting
924 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
925 		 * such that either we or the fastpath will wound @ww->ctx.
926 		 */
927 		smp_mb();
928 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
929 	}
930 
931 	return 0;
932 }
933 
934 /*
935  * Lock a mutex (possibly interruptible), slowpath:
936  */
937 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)938 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
939 		    struct lockdep_map *nest_lock, unsigned long ip,
940 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
941 {
942 	struct mutex_waiter waiter;
943 	struct ww_mutex *ww;
944 	int ret;
945 
946 	if (!use_ww_ctx)
947 		ww_ctx = NULL;
948 
949 	might_sleep();
950 
951 #ifdef CONFIG_DEBUG_MUTEXES
952 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
953 #endif
954 
955 	ww = container_of(lock, struct ww_mutex, base);
956 	if (ww_ctx) {
957 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
958 			return -EALREADY;
959 
960 		/*
961 		 * Reset the wounded flag after a kill. No other process can
962 		 * race and wound us here since they can't have a valid owner
963 		 * pointer if we don't have any locks held.
964 		 */
965 		if (ww_ctx->acquired == 0)
966 			ww_ctx->wounded = 0;
967 	}
968 
969 	preempt_disable();
970 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
971 
972 	if (__mutex_trylock(lock) ||
973 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
974 		/* got the lock, yay! */
975 		lock_acquired(&lock->dep_map, ip);
976 		if (ww_ctx)
977 			ww_mutex_set_context_fastpath(ww, ww_ctx);
978 		preempt_enable();
979 		return 0;
980 	}
981 
982 	spin_lock(&lock->wait_lock);
983 	/*
984 	 * After waiting to acquire the wait_lock, try again.
985 	 */
986 	if (__mutex_trylock(lock)) {
987 		if (ww_ctx)
988 			__ww_mutex_check_waiters(lock, ww_ctx);
989 
990 		goto skip_wait;
991 	}
992 
993 	debug_mutex_lock_common(lock, &waiter);
994 
995 	lock_contended(&lock->dep_map, ip);
996 
997 	if (!use_ww_ctx) {
998 		/* add waiting tasks to the end of the waitqueue (FIFO): */
999 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
1000 
1001 
1002 #ifdef CONFIG_DEBUG_MUTEXES
1003 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
1004 #endif
1005 	} else {
1006 		/*
1007 		 * Add in stamp order, waking up waiters that must kill
1008 		 * themselves.
1009 		 */
1010 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
1011 		if (ret)
1012 			goto err_early_kill;
1013 
1014 		waiter.ww_ctx = ww_ctx;
1015 	}
1016 
1017 	waiter.task = current;
1018 
1019 	trace_android_vh_mutex_wait_start(lock);
1020 	set_current_state(state);
1021 	for (;;) {
1022 		bool first;
1023 
1024 		/*
1025 		 * Once we hold wait_lock, we're serialized against
1026 		 * mutex_unlock() handing the lock off to us, do a trylock
1027 		 * before testing the error conditions to make sure we pick up
1028 		 * the handoff.
1029 		 */
1030 		if (__mutex_trylock(lock))
1031 			goto acquired;
1032 
1033 		/*
1034 		 * Check for signals and kill conditions while holding
1035 		 * wait_lock. This ensures the lock cancellation is ordered
1036 		 * against mutex_unlock() and wake-ups do not go missing.
1037 		 */
1038 		if (signal_pending_state(state, current)) {
1039 			ret = -EINTR;
1040 			goto err;
1041 		}
1042 
1043 		if (ww_ctx) {
1044 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1045 			if (ret)
1046 				goto err;
1047 		}
1048 
1049 		spin_unlock(&lock->wait_lock);
1050 		schedule_preempt_disabled();
1051 
1052 		first = __mutex_waiter_is_first(lock, &waiter);
1053 		if (first)
1054 			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1055 
1056 		set_current_state(state);
1057 		/*
1058 		 * Here we order against unlock; we must either see it change
1059 		 * state back to RUNNING and fall through the next schedule(),
1060 		 * or we must see its unlock and acquire.
1061 		 */
1062 		if (__mutex_trylock(lock) ||
1063 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1064 			break;
1065 
1066 		spin_lock(&lock->wait_lock);
1067 	}
1068 	spin_lock(&lock->wait_lock);
1069 acquired:
1070 	__set_current_state(TASK_RUNNING);
1071 	trace_android_vh_mutex_wait_finish(lock);
1072 
1073 	if (ww_ctx) {
1074 		/*
1075 		 * Wound-Wait; we stole the lock (!first_waiter), check the
1076 		 * waiters as anyone might want to wound us.
1077 		 */
1078 		if (!ww_ctx->is_wait_die &&
1079 		    !__mutex_waiter_is_first(lock, &waiter))
1080 			__ww_mutex_check_waiters(lock, ww_ctx);
1081 	}
1082 
1083 	__mutex_remove_waiter(lock, &waiter);
1084 
1085 	debug_mutex_free_waiter(&waiter);
1086 
1087 skip_wait:
1088 	/* got the lock - cleanup and rejoice! */
1089 	lock_acquired(&lock->dep_map, ip);
1090 
1091 	if (ww_ctx)
1092 		ww_mutex_lock_acquired(ww, ww_ctx);
1093 
1094 	spin_unlock(&lock->wait_lock);
1095 	preempt_enable();
1096 	return 0;
1097 
1098 err:
1099 	__set_current_state(TASK_RUNNING);
1100 	trace_android_vh_mutex_wait_finish(lock);
1101 	__mutex_remove_waiter(lock, &waiter);
1102 err_early_kill:
1103 	spin_unlock(&lock->wait_lock);
1104 	debug_mutex_free_waiter(&waiter);
1105 	mutex_release(&lock->dep_map, 1, ip);
1106 	preempt_enable();
1107 	return ret;
1108 }
1109 
1110 static int __sched
__mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)1111 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1112 	     struct lockdep_map *nest_lock, unsigned long ip)
1113 {
1114 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1115 }
1116 
1117 static int __sched
__ww_mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx)1118 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1119 		struct lockdep_map *nest_lock, unsigned long ip,
1120 		struct ww_acquire_ctx *ww_ctx)
1121 {
1122 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1123 }
1124 
1125 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1126 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)1127 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1128 {
1129 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1130 }
1131 
1132 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1133 
1134 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)1135 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1136 {
1137 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1138 }
1139 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1140 
1141 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)1142 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1143 {
1144 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1145 }
1146 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1147 
1148 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)1149 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1150 {
1151 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1152 }
1153 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1154 
1155 void __sched
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)1156 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1157 {
1158 	int token;
1159 
1160 	might_sleep();
1161 
1162 	token = io_schedule_prepare();
1163 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1164 			    subclass, NULL, _RET_IP_, NULL, 0);
1165 	io_schedule_finish(token);
1166 }
1167 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1168 
1169 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1170 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1171 {
1172 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1173 	unsigned tmp;
1174 
1175 	if (ctx->deadlock_inject_countdown-- == 0) {
1176 		tmp = ctx->deadlock_inject_interval;
1177 		if (tmp > UINT_MAX/4)
1178 			tmp = UINT_MAX;
1179 		else
1180 			tmp = tmp*2 + tmp + tmp/2;
1181 
1182 		ctx->deadlock_inject_interval = tmp;
1183 		ctx->deadlock_inject_countdown = tmp;
1184 		ctx->contending_lock = lock;
1185 
1186 		ww_mutex_unlock(lock);
1187 
1188 		return -EDEADLK;
1189 	}
1190 #endif
1191 
1192 	return 0;
1193 }
1194 
1195 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1196 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1197 {
1198 	int ret;
1199 
1200 	might_sleep();
1201 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1202 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1203 			       ctx);
1204 	if (!ret && ctx && ctx->acquired > 1)
1205 		return ww_mutex_deadlock_injection(lock, ctx);
1206 
1207 	return ret;
1208 }
1209 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1210 
1211 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1212 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1213 {
1214 	int ret;
1215 
1216 	might_sleep();
1217 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1218 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1219 			      ctx);
1220 
1221 	if (!ret && ctx && ctx->acquired > 1)
1222 		return ww_mutex_deadlock_injection(lock, ctx);
1223 
1224 	return ret;
1225 }
1226 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1227 
1228 #endif
1229 
1230 /*
1231  * Release the lock, slowpath:
1232  */
__mutex_unlock_slowpath(struct mutex * lock,unsigned long ip)1233 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1234 {
1235 	struct task_struct *next = NULL;
1236 	DEFINE_WAKE_Q(wake_q);
1237 	unsigned long owner;
1238 
1239 	mutex_release(&lock->dep_map, 1, ip);
1240 
1241 	/*
1242 	 * Release the lock before (potentially) taking the spinlock such that
1243 	 * other contenders can get on with things ASAP.
1244 	 *
1245 	 * Except when HANDOFF, in that case we must not clear the owner field,
1246 	 * but instead set it to the top waiter.
1247 	 */
1248 	owner = atomic_long_read(&lock->owner);
1249 	for (;;) {
1250 		unsigned long old;
1251 
1252 #ifdef CONFIG_DEBUG_MUTEXES
1253 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1254 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1255 #endif
1256 
1257 		if (owner & MUTEX_FLAG_HANDOFF)
1258 			break;
1259 
1260 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
1261 						  __owner_flags(owner));
1262 		if (old == owner) {
1263 			if (owner & MUTEX_FLAG_WAITERS)
1264 				break;
1265 
1266 			return;
1267 		}
1268 
1269 		owner = old;
1270 	}
1271 
1272 	spin_lock(&lock->wait_lock);
1273 	debug_mutex_unlock(lock);
1274 	if (!list_empty(&lock->wait_list)) {
1275 		/* get the first entry from the wait-list: */
1276 		struct mutex_waiter *waiter =
1277 			list_first_entry(&lock->wait_list,
1278 					 struct mutex_waiter, list);
1279 
1280 		next = waiter->task;
1281 
1282 		debug_mutex_wake_waiter(lock, waiter);
1283 		wake_q_add(&wake_q, next);
1284 	}
1285 
1286 	if (owner & MUTEX_FLAG_HANDOFF)
1287 		__mutex_handoff(lock, next);
1288 
1289 	spin_unlock(&lock->wait_lock);
1290 
1291 	wake_up_q(&wake_q);
1292 }
1293 
1294 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1295 /*
1296  * Here come the less common (and hence less performance-critical) APIs:
1297  * mutex_lock_interruptible() and mutex_trylock().
1298  */
1299 static noinline int __sched
1300 __mutex_lock_killable_slowpath(struct mutex *lock);
1301 
1302 static noinline int __sched
1303 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1304 
1305 /**
1306  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1307  * @lock: The mutex to be acquired.
1308  *
1309  * Lock the mutex like mutex_lock().  If a signal is delivered while the
1310  * process is sleeping, this function will return without acquiring the
1311  * mutex.
1312  *
1313  * Context: Process context.
1314  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1315  * signal arrived.
1316  */
mutex_lock_interruptible(struct mutex * lock)1317 int __sched mutex_lock_interruptible(struct mutex *lock)
1318 {
1319 	might_sleep();
1320 
1321 	if (__mutex_trylock_fast(lock))
1322 		return 0;
1323 
1324 	return __mutex_lock_interruptible_slowpath(lock);
1325 }
1326 
1327 EXPORT_SYMBOL(mutex_lock_interruptible);
1328 
1329 /**
1330  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1331  * @lock: The mutex to be acquired.
1332  *
1333  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
1334  * the current process is delivered while the process is sleeping, this
1335  * function will return without acquiring the mutex.
1336  *
1337  * Context: Process context.
1338  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1339  * fatal signal arrived.
1340  */
mutex_lock_killable(struct mutex * lock)1341 int __sched mutex_lock_killable(struct mutex *lock)
1342 {
1343 	might_sleep();
1344 
1345 	if (__mutex_trylock_fast(lock))
1346 		return 0;
1347 
1348 	return __mutex_lock_killable_slowpath(lock);
1349 }
1350 EXPORT_SYMBOL(mutex_lock_killable);
1351 
1352 /**
1353  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1354  * @lock: The mutex to be acquired.
1355  *
1356  * Lock the mutex like mutex_lock().  While the task is waiting for this
1357  * mutex, it will be accounted as being in the IO wait state by the
1358  * scheduler.
1359  *
1360  * Context: Process context.
1361  */
mutex_lock_io(struct mutex * lock)1362 void __sched mutex_lock_io(struct mutex *lock)
1363 {
1364 	int token;
1365 
1366 	token = io_schedule_prepare();
1367 	mutex_lock(lock);
1368 	io_schedule_finish(token);
1369 }
1370 EXPORT_SYMBOL_GPL(mutex_lock_io);
1371 
1372 static noinline void __sched
__mutex_lock_slowpath(struct mutex * lock)1373 __mutex_lock_slowpath(struct mutex *lock)
1374 {
1375 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1376 }
1377 
1378 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)1379 __mutex_lock_killable_slowpath(struct mutex *lock)
1380 {
1381 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1382 }
1383 
1384 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)1385 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1386 {
1387 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1388 }
1389 
1390 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1391 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1392 {
1393 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1394 			       _RET_IP_, ctx);
1395 }
1396 
1397 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1398 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1399 					    struct ww_acquire_ctx *ctx)
1400 {
1401 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1402 			       _RET_IP_, ctx);
1403 }
1404 
1405 #endif
1406 
1407 /**
1408  * mutex_trylock - try to acquire the mutex, without waiting
1409  * @lock: the mutex to be acquired
1410  *
1411  * Try to acquire the mutex atomically. Returns 1 if the mutex
1412  * has been acquired successfully, and 0 on contention.
1413  *
1414  * NOTE: this function follows the spin_trylock() convention, so
1415  * it is negated from the down_trylock() return values! Be careful
1416  * about this when converting semaphore users to mutexes.
1417  *
1418  * This function must not be used in interrupt context. The
1419  * mutex must be released by the same task that acquired it.
1420  */
mutex_trylock(struct mutex * lock)1421 int __sched mutex_trylock(struct mutex *lock)
1422 {
1423 	bool locked;
1424 
1425 #ifdef CONFIG_DEBUG_MUTEXES
1426 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1427 #endif
1428 
1429 	locked = __mutex_trylock(lock);
1430 	if (locked)
1431 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1432 
1433 	return locked;
1434 }
1435 EXPORT_SYMBOL(mutex_trylock);
1436 
1437 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1438 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1439 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1440 {
1441 	might_sleep();
1442 
1443 	if (__mutex_trylock_fast(&lock->base)) {
1444 		if (ctx)
1445 			ww_mutex_set_context_fastpath(lock, ctx);
1446 		return 0;
1447 	}
1448 
1449 	return __ww_mutex_lock_slowpath(lock, ctx);
1450 }
1451 EXPORT_SYMBOL(ww_mutex_lock);
1452 
1453 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1454 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1455 {
1456 	might_sleep();
1457 
1458 	if (__mutex_trylock_fast(&lock->base)) {
1459 		if (ctx)
1460 			ww_mutex_set_context_fastpath(lock, ctx);
1461 		return 0;
1462 	}
1463 
1464 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1465 }
1466 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1467 
1468 #endif
1469 
1470 /**
1471  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1472  * @cnt: the atomic which we are to dec
1473  * @lock: the mutex to return holding if we dec to 0
1474  *
1475  * return true and hold lock if we dec to 0, return false otherwise
1476  */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)1477 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1478 {
1479 	/* dec if we can't possibly hit 0 */
1480 	if (atomic_add_unless(cnt, -1, 1))
1481 		return 0;
1482 	/* we might hit 0, so take the lock */
1483 	mutex_lock(lock);
1484 	if (!atomic_dec_and_test(cnt)) {
1485 		/* when we actually did the dec, we didn't hit 0 */
1486 		mutex_unlock(lock);
1487 		return 0;
1488 	}
1489 	/* we hit 0, and we hold the lock */
1490 	return 1;
1491 }
1492 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1493