1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 #else
36 # include "mutex.h"
37 #endif
38
39 #include <trace/hooks/dtask.h>
40
41 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)42 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
43 {
44 atomic_long_set(&lock->owner, 0);
45 spin_lock_init(&lock->wait_lock);
46 INIT_LIST_HEAD(&lock->wait_list);
47 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
48 osq_lock_init(&lock->osq);
49 #endif
50
51 debug_mutex_init(lock, name, key);
52 }
53 EXPORT_SYMBOL(__mutex_init);
54
55 /*
56 * @owner: contains: 'struct task_struct *' to the current lock owner,
57 * NULL means not owned. Since task_struct pointers are aligned at
58 * at least L1_CACHE_BYTES, we have low bits to store extra state.
59 *
60 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
61 * Bit1 indicates unlock needs to hand the lock to the top-waiter
62 * Bit2 indicates handoff has been done and we're waiting for pickup.
63 */
64 #define MUTEX_FLAG_WAITERS 0x01
65 #define MUTEX_FLAG_HANDOFF 0x02
66 #define MUTEX_FLAG_PICKUP 0x04
67
68 #define MUTEX_FLAGS 0x07
69
70 /*
71 * Internal helper function; C doesn't allow us to hide it :/
72 *
73 * DO NOT USE (outside of mutex code).
74 */
__mutex_owner(struct mutex * lock)75 static inline struct task_struct *__mutex_owner(struct mutex *lock)
76 {
77 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
78 }
79
__owner_task(unsigned long owner)80 static inline struct task_struct *__owner_task(unsigned long owner)
81 {
82 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
83 }
84
mutex_is_locked(struct mutex * lock)85 bool mutex_is_locked(struct mutex *lock)
86 {
87 return __mutex_owner(lock) != NULL;
88 }
89 EXPORT_SYMBOL(mutex_is_locked);
90
91 __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex * lock)92 mutex_trylock_recursive(struct mutex *lock)
93 {
94 if (unlikely(__mutex_owner(lock) == current))
95 return MUTEX_TRYLOCK_RECURSIVE;
96
97 return mutex_trylock(lock);
98 }
99 EXPORT_SYMBOL(mutex_trylock_recursive);
100
__owner_flags(unsigned long owner)101 static inline unsigned long __owner_flags(unsigned long owner)
102 {
103 return owner & MUTEX_FLAGS;
104 }
105
106 /*
107 * Trylock variant that retuns the owning task on failure.
108 */
__mutex_trylock_or_owner(struct mutex * lock)109 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
110 {
111 unsigned long owner, curr = (unsigned long)current;
112
113 owner = atomic_long_read(&lock->owner);
114 for (;;) { /* must loop, can race against a flag */
115 unsigned long old, flags = __owner_flags(owner);
116 unsigned long task = owner & ~MUTEX_FLAGS;
117
118 if (task) {
119 if (likely(task != curr))
120 break;
121
122 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
123 break;
124
125 flags &= ~MUTEX_FLAG_PICKUP;
126 } else {
127 #ifdef CONFIG_DEBUG_MUTEXES
128 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
129 #endif
130 }
131
132 /*
133 * We set the HANDOFF bit, we must make sure it doesn't live
134 * past the point where we acquire it. This would be possible
135 * if we (accidentally) set the bit on an unlocked mutex.
136 */
137 flags &= ~MUTEX_FLAG_HANDOFF;
138
139 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
140 if (old == owner)
141 return NULL;
142
143 owner = old;
144 }
145
146 return __owner_task(owner);
147 }
148
149 /*
150 * Actual trylock that will work on any unlocked state.
151 */
__mutex_trylock(struct mutex * lock)152 static inline bool __mutex_trylock(struct mutex *lock)
153 {
154 return !__mutex_trylock_or_owner(lock);
155 }
156
157 #ifndef CONFIG_DEBUG_LOCK_ALLOC
158 /*
159 * Lockdep annotations are contained to the slow paths for simplicity.
160 * There is nothing that would stop spreading the lockdep annotations outwards
161 * except more code.
162 */
163
164 /*
165 * Optimistic trylock that only works in the uncontended case. Make sure to
166 * follow with a __mutex_trylock() before failing.
167 */
__mutex_trylock_fast(struct mutex * lock)168 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
169 {
170 unsigned long curr = (unsigned long)current;
171 unsigned long zero = 0UL;
172
173 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) {
174 trace_android_vh_record_mutex_lock_starttime(current, jiffies);
175 return true;
176 }
177
178 return false;
179 }
180
__mutex_unlock_fast(struct mutex * lock)181 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
182 {
183 unsigned long curr = (unsigned long)current;
184
185 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
186 return true;
187
188 return false;
189 }
190 #endif
191
__mutex_set_flag(struct mutex * lock,unsigned long flag)192 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
193 {
194 atomic_long_or(flag, &lock->owner);
195 }
196
__mutex_clear_flag(struct mutex * lock,unsigned long flag)197 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
198 {
199 atomic_long_andnot(flag, &lock->owner);
200 }
201
__mutex_waiter_is_first(struct mutex * lock,struct mutex_waiter * waiter)202 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
203 {
204 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
205 }
206
207 /*
208 * Add @waiter to a given location in the lock wait_list and set the
209 * FLAG_WAITERS flag if it's the first waiter.
210 */
211 static void
__mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct list_head * list)212 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
213 struct list_head *list)
214 {
215 bool already_on_list = false;
216 debug_mutex_add_waiter(lock, waiter, current);
217
218 trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list);
219 if (!already_on_list)
220 list_add_tail(&waiter->list, list);
221 if (__mutex_waiter_is_first(lock, waiter))
222 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
223 }
224
225 static void
__mutex_remove_waiter(struct mutex * lock,struct mutex_waiter * waiter)226 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
227 {
228 list_del(&waiter->list);
229 if (likely(list_empty(&lock->wait_list)))
230 __mutex_clear_flag(lock, MUTEX_FLAGS);
231
232 debug_mutex_remove_waiter(lock, waiter, current);
233 }
234
235 /*
236 * Give up ownership to a specific task, when @task = NULL, this is equivalent
237 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
238 * WAITERS. Provides RELEASE semantics like a regular unlock, the
239 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
240 */
__mutex_handoff(struct mutex * lock,struct task_struct * task)241 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
242 {
243 unsigned long owner = atomic_long_read(&lock->owner);
244
245 for (;;) {
246 unsigned long old, new;
247
248 #ifdef CONFIG_DEBUG_MUTEXES
249 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
250 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
251 #endif
252
253 new = (owner & MUTEX_FLAG_WAITERS);
254 new |= (unsigned long)task;
255 if (task)
256 new |= MUTEX_FLAG_PICKUP;
257
258 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
259 if (old == owner)
260 break;
261
262 owner = old;
263 }
264 }
265
266 #ifndef CONFIG_DEBUG_LOCK_ALLOC
267 /*
268 * We split the mutex lock/unlock logic into separate fastpath and
269 * slowpath functions, to reduce the register pressure on the fastpath.
270 * We also put the fastpath first in the kernel image, to make sure the
271 * branch is predicted by the CPU as default-untaken.
272 */
273 static void __sched __mutex_lock_slowpath(struct mutex *lock);
274
275 /**
276 * mutex_lock - acquire the mutex
277 * @lock: the mutex to be acquired
278 *
279 * Lock the mutex exclusively for this task. If the mutex is not
280 * available right now, it will sleep until it can get it.
281 *
282 * The mutex must later on be released by the same task that
283 * acquired it. Recursive locking is not allowed. The task
284 * may not exit without first unlocking the mutex. Also, kernel
285 * memory where the mutex resides must not be freed with
286 * the mutex still locked. The mutex must first be initialized
287 * (or statically defined) before it can be locked. memset()-ing
288 * the mutex to 0 is not allowed.
289 *
290 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
291 * checks that will enforce the restrictions and will also do
292 * deadlock debugging)
293 *
294 * This function is similar to (but not equivalent to) down().
295 */
mutex_lock(struct mutex * lock)296 void __sched mutex_lock(struct mutex *lock)
297 {
298 might_sleep();
299
300 if (!__mutex_trylock_fast(lock))
301 __mutex_lock_slowpath(lock);
302 }
303 EXPORT_SYMBOL(mutex_lock);
304 #endif
305
306 /*
307 * Wait-Die:
308 * The newer transactions are killed when:
309 * It (the new transaction) makes a request for a lock being held
310 * by an older transaction.
311 *
312 * Wound-Wait:
313 * The newer transactions are wounded when:
314 * An older transaction makes a request for a lock being held by
315 * the newer transaction.
316 */
317
318 /*
319 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
320 * it.
321 */
322 static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)323 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
324 {
325 #ifdef CONFIG_DEBUG_MUTEXES
326 /*
327 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
328 * but released with a normal mutex_unlock in this call.
329 *
330 * This should never happen, always use ww_mutex_unlock.
331 */
332 DEBUG_LOCKS_WARN_ON(ww->ctx);
333
334 /*
335 * Not quite done after calling ww_acquire_done() ?
336 */
337 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
338
339 if (ww_ctx->contending_lock) {
340 /*
341 * After -EDEADLK you tried to
342 * acquire a different ww_mutex? Bad!
343 */
344 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
345
346 /*
347 * You called ww_mutex_lock after receiving -EDEADLK,
348 * but 'forgot' to unlock everything else first?
349 */
350 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
351 ww_ctx->contending_lock = NULL;
352 }
353
354 /*
355 * Naughty, using a different class will lead to undefined behavior!
356 */
357 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
358 #endif
359 ww_ctx->acquired++;
360 ww->ctx = ww_ctx;
361 }
362
363 /*
364 * Determine if context @a is 'after' context @b. IOW, @a is a younger
365 * transaction than @b and depending on algorithm either needs to wait for
366 * @b or die.
367 */
368 static inline bool __sched
__ww_ctx_stamp_after(struct ww_acquire_ctx * a,struct ww_acquire_ctx * b)369 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
370 {
371
372 return (signed long)(a->stamp - b->stamp) > 0;
373 }
374
375 /*
376 * Wait-Die; wake a younger waiter context (when locks held) such that it can
377 * die.
378 *
379 * Among waiters with context, only the first one can have other locks acquired
380 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
381 * __ww_mutex_check_kill() wake any but the earliest context.
382 */
383 static bool __sched
__ww_mutex_die(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ww_ctx)384 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
385 struct ww_acquire_ctx *ww_ctx)
386 {
387 if (!ww_ctx->is_wait_die)
388 return false;
389
390 if (waiter->ww_ctx->acquired > 0 &&
391 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
392 debug_mutex_wake_waiter(lock, waiter);
393 wake_up_process(waiter->task);
394 }
395
396 return true;
397 }
398
399 /*
400 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
401 *
402 * Wound the lock holder if there are waiters with older transactions than
403 * the lock holders. Even if multiple waiters may wound the lock holder,
404 * it's sufficient that only one does.
405 */
__ww_mutex_wound(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct ww_acquire_ctx * hold_ctx)406 static bool __ww_mutex_wound(struct mutex *lock,
407 struct ww_acquire_ctx *ww_ctx,
408 struct ww_acquire_ctx *hold_ctx)
409 {
410 struct task_struct *owner = __mutex_owner(lock);
411
412 lockdep_assert_held(&lock->wait_lock);
413
414 /*
415 * Possible through __ww_mutex_add_waiter() when we race with
416 * ww_mutex_set_context_fastpath(). In that case we'll get here again
417 * through __ww_mutex_check_waiters().
418 */
419 if (!hold_ctx)
420 return false;
421
422 /*
423 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
424 * it cannot go away because we'll have FLAG_WAITERS set and hold
425 * wait_lock.
426 */
427 if (!owner)
428 return false;
429
430 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
431 hold_ctx->wounded = 1;
432
433 /*
434 * wake_up_process() paired with set_current_state()
435 * inserts sufficient barriers to make sure @owner either sees
436 * it's wounded in __ww_mutex_check_kill() or has a
437 * wakeup pending to re-read the wounded state.
438 */
439 if (owner != current)
440 wake_up_process(owner);
441
442 return true;
443 }
444
445 return false;
446 }
447
448 /*
449 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
450 * behind us on the wait-list, check if they need to die, or wound us.
451 *
452 * See __ww_mutex_add_waiter() for the list-order construction; basically the
453 * list is ordered by stamp, smallest (oldest) first.
454 *
455 * This relies on never mixing wait-die/wound-wait on the same wait-list;
456 * which is currently ensured by that being a ww_class property.
457 *
458 * The current task must not be on the wait list.
459 */
460 static void __sched
__ww_mutex_check_waiters(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)461 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
462 {
463 struct mutex_waiter *cur;
464
465 lockdep_assert_held(&lock->wait_lock);
466
467 list_for_each_entry(cur, &lock->wait_list, list) {
468 if (!cur->ww_ctx)
469 continue;
470
471 if (__ww_mutex_die(lock, cur, ww_ctx) ||
472 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
473 break;
474 }
475 }
476
477 /*
478 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
479 * and wake up any waiters so they can recheck.
480 */
481 static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)482 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
483 {
484 ww_mutex_lock_acquired(lock, ctx);
485
486 /*
487 * The lock->ctx update should be visible on all cores before
488 * the WAITERS check is done, otherwise contended waiters might be
489 * missed. The contended waiters will either see ww_ctx == NULL
490 * and keep spinning, or it will acquire wait_lock, add itself
491 * to waiter list and sleep.
492 */
493 smp_mb(); /* See comments above and below. */
494
495 /*
496 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
497 * MB MB
498 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
499 *
500 * The memory barrier above pairs with the memory barrier in
501 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
502 * and/or !empty list.
503 */
504 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
505 return;
506
507 /*
508 * Uh oh, we raced in fastpath, check if any of the waiters need to
509 * die or wound us.
510 */
511 spin_lock(&lock->base.wait_lock);
512 __ww_mutex_check_waiters(&lock->base, ctx);
513 spin_unlock(&lock->base.wait_lock);
514 }
515
516 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
517
518 static inline
ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)519 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
520 struct mutex_waiter *waiter)
521 {
522 struct ww_mutex *ww;
523
524 ww = container_of(lock, struct ww_mutex, base);
525
526 /*
527 * If ww->ctx is set the contents are undefined, only
528 * by acquiring wait_lock there is a guarantee that
529 * they are not invalid when reading.
530 *
531 * As such, when deadlock detection needs to be
532 * performed the optimistic spinning cannot be done.
533 *
534 * Check this in every inner iteration because we may
535 * be racing against another thread's ww_mutex_lock.
536 */
537 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
538 return false;
539
540 /*
541 * If we aren't on the wait list yet, cancel the spin
542 * if there are waiters. We want to avoid stealing the
543 * lock from a waiter with an earlier stamp, since the
544 * other thread may already own a lock that we also
545 * need.
546 */
547 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
548 return false;
549
550 /*
551 * Similarly, stop spinning if we are no longer the
552 * first waiter.
553 */
554 if (waiter && !__mutex_waiter_is_first(lock, waiter))
555 return false;
556
557 return true;
558 }
559
560 /*
561 * Look out! "owner" is an entirely speculative pointer access and not
562 * reliable.
563 *
564 * "noinline" so that this function shows up on perf profiles.
565 */
566 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)567 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
568 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
569 {
570 bool ret = true;
571 int cnt = 0;
572 bool time_out = false;
573
574 rcu_read_lock();
575 while (__mutex_owner(lock) == owner) {
576 trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt);
577 if (time_out) {
578 ret = false;
579 break;
580 }
581 /*
582 * Ensure we emit the owner->on_cpu, dereference _after_
583 * checking lock->owner still matches owner. If that fails,
584 * owner might point to freed memory. If it still matches,
585 * the rcu_read_lock() ensures the memory stays valid.
586 */
587 barrier();
588
589 /*
590 * Use vcpu_is_preempted to detect lock holder preemption issue.
591 */
592 if (!owner->on_cpu || need_resched() ||
593 vcpu_is_preempted(task_cpu(owner))) {
594 ret = false;
595 break;
596 }
597
598 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
599 ret = false;
600 break;
601 }
602
603 cpu_relax();
604 }
605 rcu_read_unlock();
606
607 return ret;
608 }
609
610 /*
611 * Initial check for entering the mutex spinning loop
612 */
mutex_can_spin_on_owner(struct mutex * lock)613 static inline int mutex_can_spin_on_owner(struct mutex *lock)
614 {
615 struct task_struct *owner;
616 int retval = 1;
617
618 if (need_resched())
619 return 0;
620
621 rcu_read_lock();
622 owner = __mutex_owner(lock);
623
624 /*
625 * As lock holder preemption issue, we both skip spinning if task is not
626 * on cpu or its cpu is preempted
627 */
628 if (owner)
629 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
630 rcu_read_unlock();
631 trace_android_vh_mutex_can_spin_on_owner(lock, &retval);
632
633 /*
634 * If lock->owner is not set, the mutex has been released. Return true
635 * such that we'll trylock in the spin path, which is a faster option
636 * than the blocking slow path.
637 */
638 return retval;
639 }
640
641 /*
642 * Optimistic spinning.
643 *
644 * We try to spin for acquisition when we find that the lock owner
645 * is currently running on a (different) CPU and while we don't
646 * need to reschedule. The rationale is that if the lock owner is
647 * running, it is likely to release the lock soon.
648 *
649 * The mutex spinners are queued up using MCS lock so that only one
650 * spinner can compete for the mutex. However, if mutex spinning isn't
651 * going to happen, there is no point in going through the lock/unlock
652 * overhead.
653 *
654 * Returns true when the lock was taken, otherwise false, indicating
655 * that we need to jump to the slowpath and sleep.
656 *
657 * The waiter flag is set to true if the spinner is a waiter in the wait
658 * queue. The waiter-spinner will spin on the lock directly and concurrently
659 * with the spinner at the head of the OSQ, if present, until the owner is
660 * changed to itself.
661 */
662 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)663 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
664 struct mutex_waiter *waiter)
665 {
666 if (!waiter) {
667 /*
668 * The purpose of the mutex_can_spin_on_owner() function is
669 * to eliminate the overhead of osq_lock() and osq_unlock()
670 * in case spinning isn't possible. As a waiter-spinner
671 * is not going to take OSQ lock anyway, there is no need
672 * to call mutex_can_spin_on_owner().
673 */
674 if (!mutex_can_spin_on_owner(lock))
675 goto fail;
676
677 /*
678 * In order to avoid a stampede of mutex spinners trying to
679 * acquire the mutex all at once, the spinners need to take a
680 * MCS (queued) lock first before spinning on the owner field.
681 */
682 if (!osq_lock(&lock->osq))
683 goto fail;
684 }
685
686 for (;;) {
687 struct task_struct *owner;
688
689 /* Try to acquire the mutex... */
690 owner = __mutex_trylock_or_owner(lock);
691 if (!owner)
692 break;
693
694 /*
695 * There's an owner, wait for it to either
696 * release the lock or go to sleep.
697 */
698 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
699 goto fail_unlock;
700
701 /*
702 * The cpu_relax() call is a compiler barrier which forces
703 * everything in this loop to be re-loaded. We don't need
704 * memory barriers as we'll eventually observe the right
705 * values at the cost of a few extra spins.
706 */
707 cpu_relax();
708 }
709
710 if (!waiter)
711 osq_unlock(&lock->osq);
712
713 trace_android_vh_mutex_opt_spin_finish(lock, true);
714 return true;
715
716
717 fail_unlock:
718 if (!waiter)
719 osq_unlock(&lock->osq);
720
721 fail:
722 trace_android_vh_mutex_opt_spin_finish(lock, false);
723 /*
724 * If we fell out of the spin path because of need_resched(),
725 * reschedule now, before we try-lock the mutex. This avoids getting
726 * scheduled out right after we obtained the mutex.
727 */
728 if (need_resched()) {
729 /*
730 * We _should_ have TASK_RUNNING here, but just in case
731 * we do not, make it so, otherwise we might get stuck.
732 */
733 __set_current_state(TASK_RUNNING);
734 schedule_preempt_disabled();
735 }
736
737 return false;
738 }
739 #else
740 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)741 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
742 struct mutex_waiter *waiter)
743 {
744 return false;
745 }
746 #endif
747
748 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
749
750 /**
751 * mutex_unlock - release the mutex
752 * @lock: the mutex to be released
753 *
754 * Unlock a mutex that has been locked by this task previously.
755 *
756 * This function must not be used in interrupt context. Unlocking
757 * of a not locked mutex is not allowed.
758 *
759 * This function is similar to (but not equivalent to) up().
760 */
mutex_unlock(struct mutex * lock)761 void __sched mutex_unlock(struct mutex *lock)
762 {
763 #ifndef CONFIG_DEBUG_LOCK_ALLOC
764 if (__mutex_unlock_fast(lock)) {
765 trace_android_vh_record_mutex_lock_starttime(current, 0);
766 return;
767 }
768 #endif
769 __mutex_unlock_slowpath(lock, _RET_IP_);
770 trace_android_vh_record_mutex_lock_starttime(current, 0);
771 }
772 EXPORT_SYMBOL(mutex_unlock);
773
774 /**
775 * ww_mutex_unlock - release the w/w mutex
776 * @lock: the mutex to be released
777 *
778 * Unlock a mutex that has been locked by this task previously with any of the
779 * ww_mutex_lock* functions (with or without an acquire context). It is
780 * forbidden to release the locks after releasing the acquire context.
781 *
782 * This function must not be used in interrupt context. Unlocking
783 * of a unlocked mutex is not allowed.
784 */
ww_mutex_unlock(struct ww_mutex * lock)785 void __sched ww_mutex_unlock(struct ww_mutex *lock)
786 {
787 /*
788 * The unlocking fastpath is the 0->1 transition from 'locked'
789 * into 'unlocked' state:
790 */
791 if (lock->ctx) {
792 #ifdef CONFIG_DEBUG_MUTEXES
793 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
794 #endif
795 if (lock->ctx->acquired > 0)
796 lock->ctx->acquired--;
797 lock->ctx = NULL;
798 }
799
800 mutex_unlock(&lock->base);
801 }
802 EXPORT_SYMBOL(ww_mutex_unlock);
803
804
805 static __always_inline int __sched
__ww_mutex_kill(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)806 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
807 {
808 if (ww_ctx->acquired > 0) {
809 #ifdef CONFIG_DEBUG_MUTEXES
810 struct ww_mutex *ww;
811
812 ww = container_of(lock, struct ww_mutex, base);
813 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
814 ww_ctx->contending_lock = ww;
815 #endif
816 return -EDEADLK;
817 }
818
819 return 0;
820 }
821
822
823 /*
824 * Check the wound condition for the current lock acquire.
825 *
826 * Wound-Wait: If we're wounded, kill ourself.
827 *
828 * Wait-Die: If we're trying to acquire a lock already held by an older
829 * context, kill ourselves.
830 *
831 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
832 * look at waiters before us in the wait-list.
833 */
834 static inline int __sched
__ww_mutex_check_kill(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ctx)835 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
836 struct ww_acquire_ctx *ctx)
837 {
838 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
839 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
840 struct mutex_waiter *cur;
841
842 if (ctx->acquired == 0)
843 return 0;
844
845 if (!ctx->is_wait_die) {
846 if (ctx->wounded)
847 return __ww_mutex_kill(lock, ctx);
848
849 return 0;
850 }
851
852 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
853 return __ww_mutex_kill(lock, ctx);
854
855 /*
856 * If there is a waiter in front of us that has a context, then its
857 * stamp is earlier than ours and we must kill ourself.
858 */
859 cur = waiter;
860 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
861 if (!cur->ww_ctx)
862 continue;
863
864 return __ww_mutex_kill(lock, ctx);
865 }
866
867 return 0;
868 }
869
870 /*
871 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
872 * first. Such that older contexts are preferred to acquire the lock over
873 * younger contexts.
874 *
875 * Waiters without context are interspersed in FIFO order.
876 *
877 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
878 * older contexts already waiting) to avoid unnecessary waiting and for
879 * Wound-Wait ensure we wound the owning context when it is younger.
880 */
881 static inline int __sched
__ww_mutex_add_waiter(struct mutex_waiter * waiter,struct mutex * lock,struct ww_acquire_ctx * ww_ctx)882 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
883 struct mutex *lock,
884 struct ww_acquire_ctx *ww_ctx)
885 {
886 struct mutex_waiter *cur;
887 struct list_head *pos;
888 bool is_wait_die;
889
890 if (!ww_ctx) {
891 __mutex_add_waiter(lock, waiter, &lock->wait_list);
892 return 0;
893 }
894
895 is_wait_die = ww_ctx->is_wait_die;
896
897 /*
898 * Add the waiter before the first waiter with a higher stamp.
899 * Waiters without a context are skipped to avoid starving
900 * them. Wait-Die waiters may die here. Wound-Wait waiters
901 * never die here, but they are sorted in stamp order and
902 * may wound the lock holder.
903 */
904 pos = &lock->wait_list;
905 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
906 if (!cur->ww_ctx)
907 continue;
908
909 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
910 /*
911 * Wait-Die: if we find an older context waiting, there
912 * is no point in queueing behind it, as we'd have to
913 * die the moment it would acquire the lock.
914 */
915 if (is_wait_die) {
916 int ret = __ww_mutex_kill(lock, ww_ctx);
917
918 if (ret)
919 return ret;
920 }
921
922 break;
923 }
924
925 pos = &cur->list;
926
927 /* Wait-Die: ensure younger waiters die. */
928 __ww_mutex_die(lock, cur, ww_ctx);
929 }
930
931 __mutex_add_waiter(lock, waiter, pos);
932
933 /*
934 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
935 * wound that such that we might proceed.
936 */
937 if (!is_wait_die) {
938 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
939
940 /*
941 * See ww_mutex_set_context_fastpath(). Orders setting
942 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
943 * such that either we or the fastpath will wound @ww->ctx.
944 */
945 smp_mb();
946 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
947 }
948
949 return 0;
950 }
951
952 /*
953 * Lock a mutex (possibly interruptible), slowpath:
954 */
955 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)956 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
957 struct lockdep_map *nest_lock, unsigned long ip,
958 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
959 {
960 struct mutex_waiter waiter;
961 struct ww_mutex *ww;
962 int ret;
963
964 if (!use_ww_ctx)
965 ww_ctx = NULL;
966
967 might_sleep();
968
969 #ifdef CONFIG_DEBUG_MUTEXES
970 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
971 #endif
972
973 ww = container_of(lock, struct ww_mutex, base);
974 if (ww_ctx) {
975 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
976 return -EALREADY;
977
978 /*
979 * Reset the wounded flag after a kill. No other process can
980 * race and wound us here since they can't have a valid owner
981 * pointer if we don't have any locks held.
982 */
983 if (ww_ctx->acquired == 0)
984 ww_ctx->wounded = 0;
985 }
986
987 preempt_disable();
988 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
989
990 if (__mutex_trylock(lock) ||
991 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
992 /* got the lock, yay! */
993 lock_acquired(&lock->dep_map, ip);
994 if (ww_ctx)
995 ww_mutex_set_context_fastpath(ww, ww_ctx);
996 trace_android_vh_record_mutex_lock_starttime(current, jiffies);
997 preempt_enable();
998 return 0;
999 }
1000
1001 spin_lock(&lock->wait_lock);
1002 /*
1003 * After waiting to acquire the wait_lock, try again.
1004 */
1005 if (__mutex_trylock(lock)) {
1006 if (ww_ctx)
1007 __ww_mutex_check_waiters(lock, ww_ctx);
1008
1009 goto skip_wait;
1010 }
1011
1012 debug_mutex_lock_common(lock, &waiter);
1013
1014 lock_contended(&lock->dep_map, ip);
1015
1016 if (!use_ww_ctx) {
1017 /* add waiting tasks to the end of the waitqueue (FIFO): */
1018 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
1019
1020
1021 #ifdef CONFIG_DEBUG_MUTEXES
1022 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
1023 #endif
1024 } else {
1025 /*
1026 * Add in stamp order, waking up waiters that must kill
1027 * themselves.
1028 */
1029 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
1030 if (ret)
1031 goto err_early_kill;
1032
1033 waiter.ww_ctx = ww_ctx;
1034 }
1035
1036 waiter.task = current;
1037
1038 trace_android_vh_mutex_wait_start(lock);
1039 set_current_state(state);
1040 for (;;) {
1041 bool first;
1042
1043 /*
1044 * Once we hold wait_lock, we're serialized against
1045 * mutex_unlock() handing the lock off to us, do a trylock
1046 * before testing the error conditions to make sure we pick up
1047 * the handoff.
1048 */
1049 if (__mutex_trylock(lock))
1050 goto acquired;
1051
1052 /*
1053 * Check for signals and kill conditions while holding
1054 * wait_lock. This ensures the lock cancellation is ordered
1055 * against mutex_unlock() and wake-ups do not go missing.
1056 */
1057 if (signal_pending_state(state, current)) {
1058 ret = -EINTR;
1059 goto err;
1060 }
1061
1062 if (ww_ctx) {
1063 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1064 if (ret)
1065 goto err;
1066 }
1067
1068 spin_unlock(&lock->wait_lock);
1069 schedule_preempt_disabled();
1070
1071 first = __mutex_waiter_is_first(lock, &waiter);
1072 if (first)
1073 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1074
1075 set_current_state(state);
1076 /*
1077 * Here we order against unlock; we must either see it change
1078 * state back to RUNNING and fall through the next schedule(),
1079 * or we must see its unlock and acquire.
1080 */
1081 if (__mutex_trylock(lock) ||
1082 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1083 break;
1084
1085 spin_lock(&lock->wait_lock);
1086 }
1087 spin_lock(&lock->wait_lock);
1088 acquired:
1089 __set_current_state(TASK_RUNNING);
1090 trace_android_vh_mutex_wait_finish(lock);
1091
1092 if (ww_ctx) {
1093 /*
1094 * Wound-Wait; we stole the lock (!first_waiter), check the
1095 * waiters as anyone might want to wound us.
1096 */
1097 if (!ww_ctx->is_wait_die &&
1098 !__mutex_waiter_is_first(lock, &waiter))
1099 __ww_mutex_check_waiters(lock, ww_ctx);
1100 }
1101
1102 __mutex_remove_waiter(lock, &waiter);
1103
1104 debug_mutex_free_waiter(&waiter);
1105
1106 skip_wait:
1107 /* got the lock - cleanup and rejoice! */
1108 lock_acquired(&lock->dep_map, ip);
1109
1110 if (ww_ctx)
1111 ww_mutex_lock_acquired(ww, ww_ctx);
1112
1113 spin_unlock(&lock->wait_lock);
1114 preempt_enable();
1115 trace_android_vh_record_mutex_lock_starttime(current, jiffies);
1116 return 0;
1117
1118 err:
1119 __set_current_state(TASK_RUNNING);
1120 trace_android_vh_mutex_wait_finish(lock);
1121 __mutex_remove_waiter(lock, &waiter);
1122 err_early_kill:
1123 spin_unlock(&lock->wait_lock);
1124 debug_mutex_free_waiter(&waiter);
1125 mutex_release(&lock->dep_map, ip);
1126 preempt_enable();
1127 return ret;
1128 }
1129
1130 static int __sched
__mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)1131 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1132 struct lockdep_map *nest_lock, unsigned long ip)
1133 {
1134 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1135 }
1136
1137 static int __sched
__ww_mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx)1138 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1139 struct lockdep_map *nest_lock, unsigned long ip,
1140 struct ww_acquire_ctx *ww_ctx)
1141 {
1142 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1143 }
1144
1145 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1146 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)1147 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1148 {
1149 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1150 }
1151
1152 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1153
1154 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)1155 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1156 {
1157 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1158 }
1159 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1160
1161 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)1162 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1163 {
1164 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1165 }
1166 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1167
1168 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)1169 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1170 {
1171 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1172 }
1173 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1174
1175 void __sched
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)1176 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1177 {
1178 int token;
1179
1180 might_sleep();
1181
1182 token = io_schedule_prepare();
1183 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1184 subclass, NULL, _RET_IP_, NULL, 0);
1185 io_schedule_finish(token);
1186 }
1187 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1188
1189 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1190 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1191 {
1192 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1193 unsigned tmp;
1194
1195 if (ctx->deadlock_inject_countdown-- == 0) {
1196 tmp = ctx->deadlock_inject_interval;
1197 if (tmp > UINT_MAX/4)
1198 tmp = UINT_MAX;
1199 else
1200 tmp = tmp*2 + tmp + tmp/2;
1201
1202 ctx->deadlock_inject_interval = tmp;
1203 ctx->deadlock_inject_countdown = tmp;
1204 ctx->contending_lock = lock;
1205
1206 ww_mutex_unlock(lock);
1207
1208 return -EDEADLK;
1209 }
1210 #endif
1211
1212 return 0;
1213 }
1214
1215 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1216 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1217 {
1218 int ret;
1219
1220 might_sleep();
1221 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1222 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1223 ctx);
1224 if (!ret && ctx && ctx->acquired > 1)
1225 return ww_mutex_deadlock_injection(lock, ctx);
1226
1227 return ret;
1228 }
1229 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1230
1231 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1232 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1233 {
1234 int ret;
1235
1236 might_sleep();
1237 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1238 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1239 ctx);
1240
1241 if (!ret && ctx && ctx->acquired > 1)
1242 return ww_mutex_deadlock_injection(lock, ctx);
1243
1244 return ret;
1245 }
1246 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1247
1248 #endif
1249
1250 /*
1251 * Release the lock, slowpath:
1252 */
__mutex_unlock_slowpath(struct mutex * lock,unsigned long ip)1253 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1254 {
1255 struct task_struct *next = NULL;
1256 DEFINE_WAKE_Q(wake_q);
1257 unsigned long owner;
1258
1259 mutex_release(&lock->dep_map, ip);
1260
1261 /*
1262 * Release the lock before (potentially) taking the spinlock such that
1263 * other contenders can get on with things ASAP.
1264 *
1265 * Except when HANDOFF, in that case we must not clear the owner field,
1266 * but instead set it to the top waiter.
1267 */
1268 owner = atomic_long_read(&lock->owner);
1269 for (;;) {
1270 unsigned long old;
1271
1272 #ifdef CONFIG_DEBUG_MUTEXES
1273 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1274 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1275 #endif
1276
1277 if (owner & MUTEX_FLAG_HANDOFF)
1278 break;
1279
1280 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1281 __owner_flags(owner));
1282 if (old == owner) {
1283 if (owner & MUTEX_FLAG_WAITERS)
1284 break;
1285
1286 return;
1287 }
1288
1289 owner = old;
1290 }
1291
1292 spin_lock(&lock->wait_lock);
1293 debug_mutex_unlock(lock);
1294 if (!list_empty(&lock->wait_list)) {
1295 /* get the first entry from the wait-list: */
1296 struct mutex_waiter *waiter =
1297 list_first_entry(&lock->wait_list,
1298 struct mutex_waiter, list);
1299
1300 next = waiter->task;
1301
1302 debug_mutex_wake_waiter(lock, waiter);
1303 wake_q_add(&wake_q, next);
1304 }
1305
1306 if (owner & MUTEX_FLAG_HANDOFF)
1307 __mutex_handoff(lock, next);
1308
1309 trace_android_vh_mutex_unlock_slowpath(lock);
1310 spin_unlock(&lock->wait_lock);
1311
1312 wake_up_q(&wake_q);
1313 trace_android_vh_mutex_unlock_slowpath_end(lock, next);
1314 }
1315
1316 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1317 /*
1318 * Here come the less common (and hence less performance-critical) APIs:
1319 * mutex_lock_interruptible() and mutex_trylock().
1320 */
1321 static noinline int __sched
1322 __mutex_lock_killable_slowpath(struct mutex *lock);
1323
1324 static noinline int __sched
1325 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1326
1327 /**
1328 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1329 * @lock: The mutex to be acquired.
1330 *
1331 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1332 * process is sleeping, this function will return without acquiring the
1333 * mutex.
1334 *
1335 * Context: Process context.
1336 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1337 * signal arrived.
1338 */
mutex_lock_interruptible(struct mutex * lock)1339 int __sched mutex_lock_interruptible(struct mutex *lock)
1340 {
1341 might_sleep();
1342
1343 if (__mutex_trylock_fast(lock))
1344 return 0;
1345
1346 return __mutex_lock_interruptible_slowpath(lock);
1347 }
1348
1349 EXPORT_SYMBOL(mutex_lock_interruptible);
1350
1351 /**
1352 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1353 * @lock: The mutex to be acquired.
1354 *
1355 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1356 * the current process is delivered while the process is sleeping, this
1357 * function will return without acquiring the mutex.
1358 *
1359 * Context: Process context.
1360 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1361 * fatal signal arrived.
1362 */
mutex_lock_killable(struct mutex * lock)1363 int __sched mutex_lock_killable(struct mutex *lock)
1364 {
1365 might_sleep();
1366
1367 if (__mutex_trylock_fast(lock))
1368 return 0;
1369
1370 return __mutex_lock_killable_slowpath(lock);
1371 }
1372 EXPORT_SYMBOL(mutex_lock_killable);
1373
1374 /**
1375 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1376 * @lock: The mutex to be acquired.
1377 *
1378 * Lock the mutex like mutex_lock(). While the task is waiting for this
1379 * mutex, it will be accounted as being in the IO wait state by the
1380 * scheduler.
1381 *
1382 * Context: Process context.
1383 */
mutex_lock_io(struct mutex * lock)1384 void __sched mutex_lock_io(struct mutex *lock)
1385 {
1386 int token;
1387
1388 token = io_schedule_prepare();
1389 mutex_lock(lock);
1390 io_schedule_finish(token);
1391 }
1392 EXPORT_SYMBOL_GPL(mutex_lock_io);
1393
1394 static noinline void __sched
__mutex_lock_slowpath(struct mutex * lock)1395 __mutex_lock_slowpath(struct mutex *lock)
1396 {
1397 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1398 }
1399
1400 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)1401 __mutex_lock_killable_slowpath(struct mutex *lock)
1402 {
1403 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1404 }
1405
1406 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)1407 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1408 {
1409 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1410 }
1411
1412 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1413 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1414 {
1415 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1416 _RET_IP_, ctx);
1417 }
1418
1419 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1420 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1421 struct ww_acquire_ctx *ctx)
1422 {
1423 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1424 _RET_IP_, ctx);
1425 }
1426
1427 #endif
1428
1429 /**
1430 * mutex_trylock - try to acquire the mutex, without waiting
1431 * @lock: the mutex to be acquired
1432 *
1433 * Try to acquire the mutex atomically. Returns 1 if the mutex
1434 * has been acquired successfully, and 0 on contention.
1435 *
1436 * NOTE: this function follows the spin_trylock() convention, so
1437 * it is negated from the down_trylock() return values! Be careful
1438 * about this when converting semaphore users to mutexes.
1439 *
1440 * This function must not be used in interrupt context. The
1441 * mutex must be released by the same task that acquired it.
1442 */
mutex_trylock(struct mutex * lock)1443 int __sched mutex_trylock(struct mutex *lock)
1444 {
1445 bool locked;
1446
1447 #ifdef CONFIG_DEBUG_MUTEXES
1448 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1449 #endif
1450
1451 locked = __mutex_trylock(lock);
1452 if (locked) {
1453 trace_android_vh_record_mutex_lock_starttime(current, jiffies);
1454 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1455 }
1456
1457 return locked;
1458 }
1459 EXPORT_SYMBOL(mutex_trylock);
1460
1461 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1462 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1463 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1464 {
1465 might_sleep();
1466
1467 if (__mutex_trylock_fast(&lock->base)) {
1468 if (ctx)
1469 ww_mutex_set_context_fastpath(lock, ctx);
1470 return 0;
1471 }
1472
1473 return __ww_mutex_lock_slowpath(lock, ctx);
1474 }
1475 EXPORT_SYMBOL(ww_mutex_lock);
1476
1477 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1478 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1479 {
1480 might_sleep();
1481
1482 if (__mutex_trylock_fast(&lock->base)) {
1483 if (ctx)
1484 ww_mutex_set_context_fastpath(lock, ctx);
1485 return 0;
1486 }
1487
1488 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1489 }
1490 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1491
1492 #endif
1493
1494 /**
1495 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1496 * @cnt: the atomic which we are to dec
1497 * @lock: the mutex to return holding if we dec to 0
1498 *
1499 * return true and hold lock if we dec to 0, return false otherwise
1500 */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)1501 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1502 {
1503 /* dec if we can't possibly hit 0 */
1504 if (atomic_add_unless(cnt, -1, 1))
1505 return 0;
1506 /* we might hit 0, so take the lock */
1507 mutex_lock(lock);
1508 if (!atomic_dec_and_test(cnt)) {
1509 /* when we actually did the dec, we didn't hit 0 */
1510 mutex_unlock(lock);
1511 return 0;
1512 }
1513 /* we hit 0, and we hold the lock */
1514 return 1;
1515 }
1516 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1517