1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 #else
36 # include "mutex.h"
37 #endif
38
39 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 {
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
47 #endif
48
49 debug_mutex_init(lock, name, key);
50 }
51 EXPORT_SYMBOL(__mutex_init);
52
53 /*
54 * @owner: contains: 'struct task_struct *' to the current lock owner,
55 * NULL means not owned. Since task_struct pointers are aligned at
56 * at least L1_CACHE_BYTES, we have low bits to store extra state.
57 *
58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
60 * Bit2 indicates handoff has been done and we're waiting for pickup.
61 */
62 #define MUTEX_FLAG_WAITERS 0x01
63 #define MUTEX_FLAG_HANDOFF 0x02
64 #define MUTEX_FLAG_PICKUP 0x04
65
66 #define MUTEX_FLAGS 0x07
67
68 /*
69 * Internal helper function; C doesn't allow us to hide it :/
70 *
71 * DO NOT USE (outside of mutex code).
72 */
__mutex_owner(struct mutex * lock)73 static inline struct task_struct *__mutex_owner(struct mutex *lock)
74 {
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
76 }
77
__owner_task(unsigned long owner)78 static inline struct task_struct *__owner_task(unsigned long owner)
79 {
80 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
81 }
82
mutex_is_locked(struct mutex * lock)83 bool mutex_is_locked(struct mutex *lock)
84 {
85 return __mutex_owner(lock) != NULL;
86 }
87 EXPORT_SYMBOL(mutex_is_locked);
88
89 __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex * lock)90 mutex_trylock_recursive(struct mutex *lock)
91 {
92 if (unlikely(__mutex_owner(lock) == current))
93 return MUTEX_TRYLOCK_RECURSIVE;
94
95 return mutex_trylock(lock);
96 }
97 EXPORT_SYMBOL(mutex_trylock_recursive);
98
__owner_flags(unsigned long owner)99 static inline unsigned long __owner_flags(unsigned long owner)
100 {
101 return owner & MUTEX_FLAGS;
102 }
103
104 /*
105 * Trylock variant that retuns the owning task on failure.
106 */
__mutex_trylock_or_owner(struct mutex * lock)107 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
108 {
109 unsigned long owner, curr = (unsigned long)current;
110
111 owner = atomic_long_read(&lock->owner);
112 for (;;) { /* must loop, can race against a flag */
113 unsigned long old, flags = __owner_flags(owner);
114 unsigned long task = owner & ~MUTEX_FLAGS;
115
116 if (task) {
117 if (likely(task != curr))
118 break;
119
120 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
121 break;
122
123 flags &= ~MUTEX_FLAG_PICKUP;
124 } else {
125 #ifdef CONFIG_DEBUG_MUTEXES
126 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
127 #endif
128 }
129
130 /*
131 * We set the HANDOFF bit, we must make sure it doesn't live
132 * past the point where we acquire it. This would be possible
133 * if we (accidentally) set the bit on an unlocked mutex.
134 */
135 flags &= ~MUTEX_FLAG_HANDOFF;
136
137 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
138 if (old == owner)
139 return NULL;
140
141 owner = old;
142 }
143
144 return __owner_task(owner);
145 }
146
147 /*
148 * Actual trylock that will work on any unlocked state.
149 */
__mutex_trylock(struct mutex * lock)150 static inline bool __mutex_trylock(struct mutex *lock)
151 {
152 return !__mutex_trylock_or_owner(lock);
153 }
154
155 #ifndef CONFIG_DEBUG_LOCK_ALLOC
156 /*
157 * Lockdep annotations are contained to the slow paths for simplicity.
158 * There is nothing that would stop spreading the lockdep annotations outwards
159 * except more code.
160 */
161
162 /*
163 * Optimistic trylock that only works in the uncontended case. Make sure to
164 * follow with a __mutex_trylock() before failing.
165 */
__mutex_trylock_fast(struct mutex * lock)166 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
167 {
168 unsigned long curr = (unsigned long)current;
169 unsigned long zero = 0UL;
170
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
172 return true;
173
174 return false;
175 }
176
__mutex_unlock_fast(struct mutex * lock)177 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
178 {
179 unsigned long curr = (unsigned long)current;
180
181 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
182 return true;
183
184 return false;
185 }
186 #endif
187
__mutex_set_flag(struct mutex * lock,unsigned long flag)188 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
189 {
190 atomic_long_or(flag, &lock->owner);
191 }
192
__mutex_clear_flag(struct mutex * lock,unsigned long flag)193 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
194 {
195 atomic_long_andnot(flag, &lock->owner);
196 }
197
__mutex_waiter_is_first(struct mutex * lock,struct mutex_waiter * waiter)198 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
199 {
200 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
201 }
202
203 /*
204 * Add @waiter to a given location in the lock wait_list and set the
205 * FLAG_WAITERS flag if it's the first waiter.
206 */
207 static void
__mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct list_head * list)208 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
209 struct list_head *list)
210 {
211 debug_mutex_add_waiter(lock, waiter, current);
212
213 list_add_tail(&waiter->list, list);
214 if (__mutex_waiter_is_first(lock, waiter))
215 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
216 }
217
218 static void
__mutex_remove_waiter(struct mutex * lock,struct mutex_waiter * waiter)219 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
220 {
221 list_del(&waiter->list);
222 if (likely(list_empty(&lock->wait_list)))
223 __mutex_clear_flag(lock, MUTEX_FLAGS);
224
225 debug_mutex_remove_waiter(lock, waiter, current);
226 }
227
228 /*
229 * Give up ownership to a specific task, when @task = NULL, this is equivalent
230 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
231 * WAITERS. Provides RELEASE semantics like a regular unlock, the
232 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
233 */
__mutex_handoff(struct mutex * lock,struct task_struct * task)234 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
235 {
236 unsigned long owner = atomic_long_read(&lock->owner);
237
238 for (;;) {
239 unsigned long old, new;
240
241 #ifdef CONFIG_DEBUG_MUTEXES
242 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
243 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
244 #endif
245
246 new = (owner & MUTEX_FLAG_WAITERS);
247 new |= (unsigned long)task;
248 if (task)
249 new |= MUTEX_FLAG_PICKUP;
250
251 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
252 if (old == owner)
253 break;
254
255 owner = old;
256 }
257 }
258
259 #ifndef CONFIG_DEBUG_LOCK_ALLOC
260 /*
261 * We split the mutex lock/unlock logic into separate fastpath and
262 * slowpath functions, to reduce the register pressure on the fastpath.
263 * We also put the fastpath first in the kernel image, to make sure the
264 * branch is predicted by the CPU as default-untaken.
265 */
266 static void __sched __mutex_lock_slowpath(struct mutex *lock);
267
268 /**
269 * mutex_lock - acquire the mutex
270 * @lock: the mutex to be acquired
271 *
272 * Lock the mutex exclusively for this task. If the mutex is not
273 * available right now, it will sleep until it can get it.
274 *
275 * The mutex must later on be released by the same task that
276 * acquired it. Recursive locking is not allowed. The task
277 * may not exit without first unlocking the mutex. Also, kernel
278 * memory where the mutex resides must not be freed with
279 * the mutex still locked. The mutex must first be initialized
280 * (or statically defined) before it can be locked. memset()-ing
281 * the mutex to 0 is not allowed.
282 *
283 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
284 * checks that will enforce the restrictions and will also do
285 * deadlock debugging)
286 *
287 * This function is similar to (but not equivalent to) down().
288 */
mutex_lock(struct mutex * lock)289 void __sched mutex_lock(struct mutex *lock)
290 {
291 might_sleep();
292
293 if (!__mutex_trylock_fast(lock))
294 __mutex_lock_slowpath(lock);
295 }
296 EXPORT_SYMBOL(mutex_lock);
297 #endif
298
299 /*
300 * Wait-Die:
301 * The newer transactions are killed when:
302 * It (the new transaction) makes a request for a lock being held
303 * by an older transaction.
304 *
305 * Wound-Wait:
306 * The newer transactions are wounded when:
307 * An older transaction makes a request for a lock being held by
308 * the newer transaction.
309 */
310
311 /*
312 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
313 * it.
314 */
315 static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)316 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
317 {
318 #ifdef CONFIG_DEBUG_MUTEXES
319 /*
320 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
321 * but released with a normal mutex_unlock in this call.
322 *
323 * This should never happen, always use ww_mutex_unlock.
324 */
325 DEBUG_LOCKS_WARN_ON(ww->ctx);
326
327 /*
328 * Not quite done after calling ww_acquire_done() ?
329 */
330 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
331
332 if (ww_ctx->contending_lock) {
333 /*
334 * After -EDEADLK you tried to
335 * acquire a different ww_mutex? Bad!
336 */
337 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
338
339 /*
340 * You called ww_mutex_lock after receiving -EDEADLK,
341 * but 'forgot' to unlock everything else first?
342 */
343 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
344 ww_ctx->contending_lock = NULL;
345 }
346
347 /*
348 * Naughty, using a different class will lead to undefined behavior!
349 */
350 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
351 #endif
352 ww_ctx->acquired++;
353 ww->ctx = ww_ctx;
354 }
355
356 /*
357 * Determine if context @a is 'after' context @b. IOW, @a is a younger
358 * transaction than @b and depending on algorithm either needs to wait for
359 * @b or die.
360 */
361 static inline bool __sched
__ww_ctx_stamp_after(struct ww_acquire_ctx * a,struct ww_acquire_ctx * b)362 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
363 {
364
365 return (signed long)(a->stamp - b->stamp) > 0;
366 }
367
368 /*
369 * Wait-Die; wake a younger waiter context (when locks held) such that it can
370 * die.
371 *
372 * Among waiters with context, only the first one can have other locks acquired
373 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
374 * __ww_mutex_check_kill() wake any but the earliest context.
375 */
376 static bool __sched
__ww_mutex_die(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ww_ctx)377 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
378 struct ww_acquire_ctx *ww_ctx)
379 {
380 if (!ww_ctx->is_wait_die)
381 return false;
382
383 if (waiter->ww_ctx->acquired > 0 &&
384 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
385 debug_mutex_wake_waiter(lock, waiter);
386 wake_up_process(waiter->task);
387 }
388
389 return true;
390 }
391
392 /*
393 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
394 *
395 * Wound the lock holder if there are waiters with older transactions than
396 * the lock holders. Even if multiple waiters may wound the lock holder,
397 * it's sufficient that only one does.
398 */
__ww_mutex_wound(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct ww_acquire_ctx * hold_ctx)399 static bool __ww_mutex_wound(struct mutex *lock,
400 struct ww_acquire_ctx *ww_ctx,
401 struct ww_acquire_ctx *hold_ctx)
402 {
403 struct task_struct *owner = __mutex_owner(lock);
404
405 lockdep_assert_held(&lock->wait_lock);
406
407 /*
408 * Possible through __ww_mutex_add_waiter() when we race with
409 * ww_mutex_set_context_fastpath(). In that case we'll get here again
410 * through __ww_mutex_check_waiters().
411 */
412 if (!hold_ctx)
413 return false;
414
415 /*
416 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
417 * it cannot go away because we'll have FLAG_WAITERS set and hold
418 * wait_lock.
419 */
420 if (!owner)
421 return false;
422
423 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
424 hold_ctx->wounded = 1;
425
426 /*
427 * wake_up_process() paired with set_current_state()
428 * inserts sufficient barriers to make sure @owner either sees
429 * it's wounded in __ww_mutex_check_kill() or has a
430 * wakeup pending to re-read the wounded state.
431 */
432 if (owner != current)
433 wake_up_process(owner);
434
435 return true;
436 }
437
438 return false;
439 }
440
441 /*
442 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
443 * behind us on the wait-list, check if they need to die, or wound us.
444 *
445 * See __ww_mutex_add_waiter() for the list-order construction; basically the
446 * list is ordered by stamp, smallest (oldest) first.
447 *
448 * This relies on never mixing wait-die/wound-wait on the same wait-list;
449 * which is currently ensured by that being a ww_class property.
450 *
451 * The current task must not be on the wait list.
452 */
453 static void __sched
__ww_mutex_check_waiters(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)454 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
455 {
456 struct mutex_waiter *cur;
457
458 lockdep_assert_held(&lock->wait_lock);
459
460 list_for_each_entry(cur, &lock->wait_list, list) {
461 if (!cur->ww_ctx)
462 continue;
463
464 if (__ww_mutex_die(lock, cur, ww_ctx) ||
465 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
466 break;
467 }
468 }
469
470 /*
471 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
472 * and wake up any waiters so they can recheck.
473 */
474 static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)475 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
476 {
477 ww_mutex_lock_acquired(lock, ctx);
478
479 /*
480 * The lock->ctx update should be visible on all cores before
481 * the WAITERS check is done, otherwise contended waiters might be
482 * missed. The contended waiters will either see ww_ctx == NULL
483 * and keep spinning, or it will acquire wait_lock, add itself
484 * to waiter list and sleep.
485 */
486 smp_mb(); /* See comments above and below. */
487
488 /*
489 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
490 * MB MB
491 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
492 *
493 * The memory barrier above pairs with the memory barrier in
494 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
495 * and/or !empty list.
496 */
497 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
498 return;
499
500 /*
501 * Uh oh, we raced in fastpath, check if any of the waiters need to
502 * die or wound us.
503 */
504 spin_lock(&lock->base.wait_lock);
505 __ww_mutex_check_waiters(&lock->base, ctx);
506 spin_unlock(&lock->base.wait_lock);
507 }
508
509 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
510
511 static inline
ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)512 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
513 struct mutex_waiter *waiter)
514 {
515 struct ww_mutex *ww;
516
517 ww = container_of(lock, struct ww_mutex, base);
518
519 /*
520 * If ww->ctx is set the contents are undefined, only
521 * by acquiring wait_lock there is a guarantee that
522 * they are not invalid when reading.
523 *
524 * As such, when deadlock detection needs to be
525 * performed the optimistic spinning cannot be done.
526 *
527 * Check this in every inner iteration because we may
528 * be racing against another thread's ww_mutex_lock.
529 */
530 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
531 return false;
532
533 /*
534 * If we aren't on the wait list yet, cancel the spin
535 * if there are waiters. We want to avoid stealing the
536 * lock from a waiter with an earlier stamp, since the
537 * other thread may already own a lock that we also
538 * need.
539 */
540 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
541 return false;
542
543 /*
544 * Similarly, stop spinning if we are no longer the
545 * first waiter.
546 */
547 if (waiter && !__mutex_waiter_is_first(lock, waiter))
548 return false;
549
550 return true;
551 }
552
553 /*
554 * Look out! "owner" is an entirely speculative pointer access and not
555 * reliable.
556 *
557 * "noinline" so that this function shows up on perf profiles.
558 */
559 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)560 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
561 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
562 {
563 bool ret = true;
564
565 rcu_read_lock();
566 while (__mutex_owner(lock) == owner) {
567 /*
568 * Ensure we emit the owner->on_cpu, dereference _after_
569 * checking lock->owner still matches owner. If that fails,
570 * owner might point to freed memory. If it still matches,
571 * the rcu_read_lock() ensures the memory stays valid.
572 */
573 barrier();
574
575 /*
576 * Use vcpu_is_preempted to detect lock holder preemption issue.
577 */
578 if (!owner->on_cpu || need_resched() ||
579 vcpu_is_preempted(task_cpu(owner))) {
580 ret = false;
581 break;
582 }
583
584 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
585 ret = false;
586 break;
587 }
588
589 cpu_relax();
590 }
591 rcu_read_unlock();
592
593 return ret;
594 }
595
596 /*
597 * Initial check for entering the mutex spinning loop
598 */
mutex_can_spin_on_owner(struct mutex * lock)599 static inline int mutex_can_spin_on_owner(struct mutex *lock)
600 {
601 struct task_struct *owner;
602 int retval = 1;
603
604 if (need_resched())
605 return 0;
606
607 rcu_read_lock();
608 owner = __mutex_owner(lock);
609
610 /*
611 * As lock holder preemption issue, we both skip spinning if task is not
612 * on cpu or its cpu is preempted
613 */
614 if (owner)
615 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
616 rcu_read_unlock();
617
618 /*
619 * If lock->owner is not set, the mutex has been released. Return true
620 * such that we'll trylock in the spin path, which is a faster option
621 * than the blocking slow path.
622 */
623 return retval;
624 }
625
626 /*
627 * Optimistic spinning.
628 *
629 * We try to spin for acquisition when we find that the lock owner
630 * is currently running on a (different) CPU and while we don't
631 * need to reschedule. The rationale is that if the lock owner is
632 * running, it is likely to release the lock soon.
633 *
634 * The mutex spinners are queued up using MCS lock so that only one
635 * spinner can compete for the mutex. However, if mutex spinning isn't
636 * going to happen, there is no point in going through the lock/unlock
637 * overhead.
638 *
639 * Returns true when the lock was taken, otherwise false, indicating
640 * that we need to jump to the slowpath and sleep.
641 *
642 * The waiter flag is set to true if the spinner is a waiter in the wait
643 * queue. The waiter-spinner will spin on the lock directly and concurrently
644 * with the spinner at the head of the OSQ, if present, until the owner is
645 * changed to itself.
646 */
647 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)648 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
649 struct mutex_waiter *waiter)
650 {
651 if (!waiter) {
652 /*
653 * The purpose of the mutex_can_spin_on_owner() function is
654 * to eliminate the overhead of osq_lock() and osq_unlock()
655 * in case spinning isn't possible. As a waiter-spinner
656 * is not going to take OSQ lock anyway, there is no need
657 * to call mutex_can_spin_on_owner().
658 */
659 if (!mutex_can_spin_on_owner(lock))
660 goto fail;
661
662 /*
663 * In order to avoid a stampede of mutex spinners trying to
664 * acquire the mutex all at once, the spinners need to take a
665 * MCS (queued) lock first before spinning on the owner field.
666 */
667 if (!osq_lock(&lock->osq))
668 goto fail;
669 }
670
671 for (;;) {
672 struct task_struct *owner;
673
674 /* Try to acquire the mutex... */
675 owner = __mutex_trylock_or_owner(lock);
676 if (!owner)
677 break;
678
679 /*
680 * There's an owner, wait for it to either
681 * release the lock or go to sleep.
682 */
683 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
684 goto fail_unlock;
685
686 /*
687 * The cpu_relax() call is a compiler barrier which forces
688 * everything in this loop to be re-loaded. We don't need
689 * memory barriers as we'll eventually observe the right
690 * values at the cost of a few extra spins.
691 */
692 cpu_relax();
693 }
694
695 if (!waiter)
696 osq_unlock(&lock->osq);
697
698 return true;
699
700
701 fail_unlock:
702 if (!waiter)
703 osq_unlock(&lock->osq);
704
705 fail:
706 /*
707 * If we fell out of the spin path because of need_resched(),
708 * reschedule now, before we try-lock the mutex. This avoids getting
709 * scheduled out right after we obtained the mutex.
710 */
711 if (need_resched()) {
712 /*
713 * We _should_ have TASK_RUNNING here, but just in case
714 * we do not, make it so, otherwise we might get stuck.
715 */
716 __set_current_state(TASK_RUNNING);
717 schedule_preempt_disabled();
718 }
719
720 return false;
721 }
722 #else
723 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)724 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
725 struct mutex_waiter *waiter)
726 {
727 return false;
728 }
729 #endif
730
731 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
732
733 /**
734 * mutex_unlock - release the mutex
735 * @lock: the mutex to be released
736 *
737 * Unlock a mutex that has been locked by this task previously.
738 *
739 * This function must not be used in interrupt context. Unlocking
740 * of a not locked mutex is not allowed.
741 *
742 * This function is similar to (but not equivalent to) up().
743 */
mutex_unlock(struct mutex * lock)744 void __sched mutex_unlock(struct mutex *lock)
745 {
746 #ifndef CONFIG_DEBUG_LOCK_ALLOC
747 if (__mutex_unlock_fast(lock))
748 return;
749 #endif
750 __mutex_unlock_slowpath(lock, _RET_IP_);
751 }
752 EXPORT_SYMBOL(mutex_unlock);
753
754 /**
755 * ww_mutex_unlock - release the w/w mutex
756 * @lock: the mutex to be released
757 *
758 * Unlock a mutex that has been locked by this task previously with any of the
759 * ww_mutex_lock* functions (with or without an acquire context). It is
760 * forbidden to release the locks after releasing the acquire context.
761 *
762 * This function must not be used in interrupt context. Unlocking
763 * of a unlocked mutex is not allowed.
764 */
ww_mutex_unlock(struct ww_mutex * lock)765 void __sched ww_mutex_unlock(struct ww_mutex *lock)
766 {
767 /*
768 * The unlocking fastpath is the 0->1 transition from 'locked'
769 * into 'unlocked' state:
770 */
771 if (lock->ctx) {
772 #ifdef CONFIG_DEBUG_MUTEXES
773 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
774 #endif
775 if (lock->ctx->acquired > 0)
776 lock->ctx->acquired--;
777 lock->ctx = NULL;
778 }
779
780 mutex_unlock(&lock->base);
781 }
782 EXPORT_SYMBOL(ww_mutex_unlock);
783
784
785 static __always_inline int __sched
__ww_mutex_kill(struct mutex * lock,struct ww_acquire_ctx * ww_ctx)786 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
787 {
788 if (ww_ctx->acquired > 0) {
789 #ifdef CONFIG_DEBUG_MUTEXES
790 struct ww_mutex *ww;
791
792 ww = container_of(lock, struct ww_mutex, base);
793 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
794 ww_ctx->contending_lock = ww;
795 #endif
796 return -EDEADLK;
797 }
798
799 return 0;
800 }
801
802
803 /*
804 * Check the wound condition for the current lock acquire.
805 *
806 * Wound-Wait: If we're wounded, kill ourself.
807 *
808 * Wait-Die: If we're trying to acquire a lock already held by an older
809 * context, kill ourselves.
810 *
811 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
812 * look at waiters before us in the wait-list.
813 */
814 static inline int __sched
__ww_mutex_check_kill(struct mutex * lock,struct mutex_waiter * waiter,struct ww_acquire_ctx * ctx)815 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
816 struct ww_acquire_ctx *ctx)
817 {
818 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
819 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
820 struct mutex_waiter *cur;
821
822 if (ctx->acquired == 0)
823 return 0;
824
825 if (!ctx->is_wait_die) {
826 if (ctx->wounded)
827 return __ww_mutex_kill(lock, ctx);
828
829 return 0;
830 }
831
832 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
833 return __ww_mutex_kill(lock, ctx);
834
835 /*
836 * If there is a waiter in front of us that has a context, then its
837 * stamp is earlier than ours and we must kill ourself.
838 */
839 cur = waiter;
840 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
841 if (!cur->ww_ctx)
842 continue;
843
844 return __ww_mutex_kill(lock, ctx);
845 }
846
847 return 0;
848 }
849
850 /*
851 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
852 * first. Such that older contexts are preferred to acquire the lock over
853 * younger contexts.
854 *
855 * Waiters without context are interspersed in FIFO order.
856 *
857 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
858 * older contexts already waiting) to avoid unnecessary waiting and for
859 * Wound-Wait ensure we wound the owning context when it is younger.
860 */
861 static inline int __sched
__ww_mutex_add_waiter(struct mutex_waiter * waiter,struct mutex * lock,struct ww_acquire_ctx * ww_ctx)862 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
863 struct mutex *lock,
864 struct ww_acquire_ctx *ww_ctx)
865 {
866 struct mutex_waiter *cur;
867 struct list_head *pos;
868 bool is_wait_die;
869
870 if (!ww_ctx) {
871 __mutex_add_waiter(lock, waiter, &lock->wait_list);
872 return 0;
873 }
874
875 is_wait_die = ww_ctx->is_wait_die;
876
877 /*
878 * Add the waiter before the first waiter with a higher stamp.
879 * Waiters without a context are skipped to avoid starving
880 * them. Wait-Die waiters may die here. Wound-Wait waiters
881 * never die here, but they are sorted in stamp order and
882 * may wound the lock holder.
883 */
884 pos = &lock->wait_list;
885 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
886 if (!cur->ww_ctx)
887 continue;
888
889 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
890 /*
891 * Wait-Die: if we find an older context waiting, there
892 * is no point in queueing behind it, as we'd have to
893 * die the moment it would acquire the lock.
894 */
895 if (is_wait_die) {
896 int ret = __ww_mutex_kill(lock, ww_ctx);
897
898 if (ret)
899 return ret;
900 }
901
902 break;
903 }
904
905 pos = &cur->list;
906
907 /* Wait-Die: ensure younger waiters die. */
908 __ww_mutex_die(lock, cur, ww_ctx);
909 }
910
911 __mutex_add_waiter(lock, waiter, pos);
912
913 /*
914 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
915 * wound that such that we might proceed.
916 */
917 if (!is_wait_die) {
918 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
919
920 /*
921 * See ww_mutex_set_context_fastpath(). Orders setting
922 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
923 * such that either we or the fastpath will wound @ww->ctx.
924 */
925 smp_mb();
926 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
927 }
928
929 return 0;
930 }
931
932 /*
933 * Lock a mutex (possibly interruptible), slowpath:
934 */
935 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)936 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
937 struct lockdep_map *nest_lock, unsigned long ip,
938 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
939 {
940 struct mutex_waiter waiter;
941 struct ww_mutex *ww;
942 int ret;
943
944 if (!use_ww_ctx)
945 ww_ctx = NULL;
946
947 might_sleep();
948
949 #ifdef CONFIG_DEBUG_MUTEXES
950 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
951 #endif
952
953 ww = container_of(lock, struct ww_mutex, base);
954 if (ww_ctx) {
955 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
956 return -EALREADY;
957
958 /*
959 * Reset the wounded flag after a kill. No other process can
960 * race and wound us here since they can't have a valid owner
961 * pointer if we don't have any locks held.
962 */
963 if (ww_ctx->acquired == 0)
964 ww_ctx->wounded = 0;
965 }
966
967 preempt_disable();
968 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
969
970 if (__mutex_trylock(lock) ||
971 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
972 /* got the lock, yay! */
973 lock_acquired(&lock->dep_map, ip);
974 if (ww_ctx)
975 ww_mutex_set_context_fastpath(ww, ww_ctx);
976 preempt_enable();
977 return 0;
978 }
979
980 spin_lock(&lock->wait_lock);
981 /*
982 * After waiting to acquire the wait_lock, try again.
983 */
984 if (__mutex_trylock(lock)) {
985 if (ww_ctx)
986 __ww_mutex_check_waiters(lock, ww_ctx);
987
988 goto skip_wait;
989 }
990
991 debug_mutex_lock_common(lock, &waiter);
992
993 lock_contended(&lock->dep_map, ip);
994
995 if (!use_ww_ctx) {
996 /* add waiting tasks to the end of the waitqueue (FIFO): */
997 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
998
999
1000 #ifdef CONFIG_DEBUG_MUTEXES
1001 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
1002 #endif
1003 } else {
1004 /*
1005 * Add in stamp order, waking up waiters that must kill
1006 * themselves.
1007 */
1008 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
1009 if (ret)
1010 goto err_early_kill;
1011
1012 waiter.ww_ctx = ww_ctx;
1013 }
1014
1015 waiter.task = current;
1016
1017 set_current_state(state);
1018 for (;;) {
1019 bool first;
1020
1021 /*
1022 * Once we hold wait_lock, we're serialized against
1023 * mutex_unlock() handing the lock off to us, do a trylock
1024 * before testing the error conditions to make sure we pick up
1025 * the handoff.
1026 */
1027 if (__mutex_trylock(lock))
1028 goto acquired;
1029
1030 /*
1031 * Check for signals and kill conditions while holding
1032 * wait_lock. This ensures the lock cancellation is ordered
1033 * against mutex_unlock() and wake-ups do not go missing.
1034 */
1035 if (signal_pending_state(state, current)) {
1036 ret = -EINTR;
1037 goto err;
1038 }
1039
1040 if (ww_ctx) {
1041 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1042 if (ret)
1043 goto err;
1044 }
1045
1046 spin_unlock(&lock->wait_lock);
1047 schedule_preempt_disabled();
1048
1049 first = __mutex_waiter_is_first(lock, &waiter);
1050 if (first)
1051 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1052
1053 set_current_state(state);
1054 /*
1055 * Here we order against unlock; we must either see it change
1056 * state back to RUNNING and fall through the next schedule(),
1057 * or we must see its unlock and acquire.
1058 */
1059 if (__mutex_trylock(lock) ||
1060 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1061 break;
1062
1063 spin_lock(&lock->wait_lock);
1064 }
1065 spin_lock(&lock->wait_lock);
1066 acquired:
1067 __set_current_state(TASK_RUNNING);
1068
1069 if (ww_ctx) {
1070 /*
1071 * Wound-Wait; we stole the lock (!first_waiter), check the
1072 * waiters as anyone might want to wound us.
1073 */
1074 if (!ww_ctx->is_wait_die &&
1075 !__mutex_waiter_is_first(lock, &waiter))
1076 __ww_mutex_check_waiters(lock, ww_ctx);
1077 }
1078
1079 __mutex_remove_waiter(lock, &waiter);
1080
1081 debug_mutex_free_waiter(&waiter);
1082
1083 skip_wait:
1084 /* got the lock - cleanup and rejoice! */
1085 lock_acquired(&lock->dep_map, ip);
1086
1087 if (ww_ctx)
1088 ww_mutex_lock_acquired(ww, ww_ctx);
1089
1090 spin_unlock(&lock->wait_lock);
1091 preempt_enable();
1092 return 0;
1093
1094 err:
1095 __set_current_state(TASK_RUNNING);
1096 __mutex_remove_waiter(lock, &waiter);
1097 err_early_kill:
1098 spin_unlock(&lock->wait_lock);
1099 debug_mutex_free_waiter(&waiter);
1100 mutex_release(&lock->dep_map, ip);
1101 preempt_enable();
1102 return ret;
1103 }
1104
1105 static int __sched
__mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)1106 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1107 struct lockdep_map *nest_lock, unsigned long ip)
1108 {
1109 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1110 }
1111
1112 static int __sched
__ww_mutex_lock(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx)1113 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1114 struct lockdep_map *nest_lock, unsigned long ip,
1115 struct ww_acquire_ctx *ww_ctx)
1116 {
1117 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1118 }
1119
1120 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1121 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)1122 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1123 {
1124 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1125 }
1126
1127 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1128
1129 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)1130 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1131 {
1132 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1133 }
1134 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1135
1136 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)1137 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1138 {
1139 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1140 }
1141 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1142
1143 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)1144 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1145 {
1146 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1147 }
1148 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1149
1150 void __sched
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)1151 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1152 {
1153 int token;
1154
1155 might_sleep();
1156
1157 token = io_schedule_prepare();
1158 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1159 subclass, NULL, _RET_IP_, NULL, 0);
1160 io_schedule_finish(token);
1161 }
1162 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1163
1164 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1165 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1166 {
1167 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1168 unsigned tmp;
1169
1170 if (ctx->deadlock_inject_countdown-- == 0) {
1171 tmp = ctx->deadlock_inject_interval;
1172 if (tmp > UINT_MAX/4)
1173 tmp = UINT_MAX;
1174 else
1175 tmp = tmp*2 + tmp + tmp/2;
1176
1177 ctx->deadlock_inject_interval = tmp;
1178 ctx->deadlock_inject_countdown = tmp;
1179 ctx->contending_lock = lock;
1180
1181 ww_mutex_unlock(lock);
1182
1183 return -EDEADLK;
1184 }
1185 #endif
1186
1187 return 0;
1188 }
1189
1190 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1191 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1192 {
1193 int ret;
1194
1195 might_sleep();
1196 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1197 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1198 ctx);
1199 if (!ret && ctx && ctx->acquired > 1)
1200 return ww_mutex_deadlock_injection(lock, ctx);
1201
1202 return ret;
1203 }
1204 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1205
1206 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1207 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1208 {
1209 int ret;
1210
1211 might_sleep();
1212 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1213 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1214 ctx);
1215
1216 if (!ret && ctx && ctx->acquired > 1)
1217 return ww_mutex_deadlock_injection(lock, ctx);
1218
1219 return ret;
1220 }
1221 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1222
1223 #endif
1224
1225 /*
1226 * Release the lock, slowpath:
1227 */
__mutex_unlock_slowpath(struct mutex * lock,unsigned long ip)1228 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1229 {
1230 struct task_struct *next = NULL;
1231 DEFINE_WAKE_Q(wake_q);
1232 unsigned long owner;
1233
1234 mutex_release(&lock->dep_map, ip);
1235
1236 /*
1237 * Release the lock before (potentially) taking the spinlock such that
1238 * other contenders can get on with things ASAP.
1239 *
1240 * Except when HANDOFF, in that case we must not clear the owner field,
1241 * but instead set it to the top waiter.
1242 */
1243 owner = atomic_long_read(&lock->owner);
1244 for (;;) {
1245 unsigned long old;
1246
1247 #ifdef CONFIG_DEBUG_MUTEXES
1248 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1249 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1250 #endif
1251
1252 if (owner & MUTEX_FLAG_HANDOFF)
1253 break;
1254
1255 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1256 __owner_flags(owner));
1257 if (old == owner) {
1258 if (owner & MUTEX_FLAG_WAITERS)
1259 break;
1260
1261 return;
1262 }
1263
1264 owner = old;
1265 }
1266
1267 spin_lock(&lock->wait_lock);
1268 debug_mutex_unlock(lock);
1269 if (!list_empty(&lock->wait_list)) {
1270 /* get the first entry from the wait-list: */
1271 struct mutex_waiter *waiter =
1272 list_first_entry(&lock->wait_list,
1273 struct mutex_waiter, list);
1274
1275 next = waiter->task;
1276
1277 debug_mutex_wake_waiter(lock, waiter);
1278 wake_q_add(&wake_q, next);
1279 }
1280
1281 if (owner & MUTEX_FLAG_HANDOFF)
1282 __mutex_handoff(lock, next);
1283
1284 spin_unlock(&lock->wait_lock);
1285
1286 wake_up_q(&wake_q);
1287 }
1288
1289 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1290 /*
1291 * Here come the less common (and hence less performance-critical) APIs:
1292 * mutex_lock_interruptible() and mutex_trylock().
1293 */
1294 static noinline int __sched
1295 __mutex_lock_killable_slowpath(struct mutex *lock);
1296
1297 static noinline int __sched
1298 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1299
1300 /**
1301 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1302 * @lock: The mutex to be acquired.
1303 *
1304 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1305 * process is sleeping, this function will return without acquiring the
1306 * mutex.
1307 *
1308 * Context: Process context.
1309 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1310 * signal arrived.
1311 */
mutex_lock_interruptible(struct mutex * lock)1312 int __sched mutex_lock_interruptible(struct mutex *lock)
1313 {
1314 might_sleep();
1315
1316 if (__mutex_trylock_fast(lock))
1317 return 0;
1318
1319 return __mutex_lock_interruptible_slowpath(lock);
1320 }
1321
1322 EXPORT_SYMBOL(mutex_lock_interruptible);
1323
1324 /**
1325 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1326 * @lock: The mutex to be acquired.
1327 *
1328 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1329 * the current process is delivered while the process is sleeping, this
1330 * function will return without acquiring the mutex.
1331 *
1332 * Context: Process context.
1333 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1334 * fatal signal arrived.
1335 */
mutex_lock_killable(struct mutex * lock)1336 int __sched mutex_lock_killable(struct mutex *lock)
1337 {
1338 might_sleep();
1339
1340 if (__mutex_trylock_fast(lock))
1341 return 0;
1342
1343 return __mutex_lock_killable_slowpath(lock);
1344 }
1345 EXPORT_SYMBOL(mutex_lock_killable);
1346
1347 /**
1348 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1349 * @lock: The mutex to be acquired.
1350 *
1351 * Lock the mutex like mutex_lock(). While the task is waiting for this
1352 * mutex, it will be accounted as being in the IO wait state by the
1353 * scheduler.
1354 *
1355 * Context: Process context.
1356 */
mutex_lock_io(struct mutex * lock)1357 void __sched mutex_lock_io(struct mutex *lock)
1358 {
1359 int token;
1360
1361 token = io_schedule_prepare();
1362 mutex_lock(lock);
1363 io_schedule_finish(token);
1364 }
1365 EXPORT_SYMBOL_GPL(mutex_lock_io);
1366
1367 static noinline void __sched
__mutex_lock_slowpath(struct mutex * lock)1368 __mutex_lock_slowpath(struct mutex *lock)
1369 {
1370 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1371 }
1372
1373 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)1374 __mutex_lock_killable_slowpath(struct mutex *lock)
1375 {
1376 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1377 }
1378
1379 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)1380 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1381 {
1382 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1383 }
1384
1385 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1386 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1387 {
1388 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1389 _RET_IP_, ctx);
1390 }
1391
1392 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1393 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1394 struct ww_acquire_ctx *ctx)
1395 {
1396 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1397 _RET_IP_, ctx);
1398 }
1399
1400 #endif
1401
1402 /**
1403 * mutex_trylock - try to acquire the mutex, without waiting
1404 * @lock: the mutex to be acquired
1405 *
1406 * Try to acquire the mutex atomically. Returns 1 if the mutex
1407 * has been acquired successfully, and 0 on contention.
1408 *
1409 * NOTE: this function follows the spin_trylock() convention, so
1410 * it is negated from the down_trylock() return values! Be careful
1411 * about this when converting semaphore users to mutexes.
1412 *
1413 * This function must not be used in interrupt context. The
1414 * mutex must be released by the same task that acquired it.
1415 */
mutex_trylock(struct mutex * lock)1416 int __sched mutex_trylock(struct mutex *lock)
1417 {
1418 bool locked;
1419
1420 #ifdef CONFIG_DEBUG_MUTEXES
1421 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1422 #endif
1423
1424 locked = __mutex_trylock(lock);
1425 if (locked)
1426 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1427
1428 return locked;
1429 }
1430 EXPORT_SYMBOL(mutex_trylock);
1431
1432 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1433 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1434 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1435 {
1436 might_sleep();
1437
1438 if (__mutex_trylock_fast(&lock->base)) {
1439 if (ctx)
1440 ww_mutex_set_context_fastpath(lock, ctx);
1441 return 0;
1442 }
1443
1444 return __ww_mutex_lock_slowpath(lock, ctx);
1445 }
1446 EXPORT_SYMBOL(ww_mutex_lock);
1447
1448 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1449 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1450 {
1451 might_sleep();
1452
1453 if (__mutex_trylock_fast(&lock->base)) {
1454 if (ctx)
1455 ww_mutex_set_context_fastpath(lock, ctx);
1456 return 0;
1457 }
1458
1459 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1460 }
1461 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1462
1463 #endif
1464
1465 /**
1466 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1467 * @cnt: the atomic which we are to dec
1468 * @lock: the mutex to return holding if we dec to 0
1469 *
1470 * return true and hold lock if we dec to 0, return false otherwise
1471 */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)1472 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1473 {
1474 /* dec if we can't possibly hit 0 */
1475 if (atomic_add_unless(cnt, -1, 1))
1476 return 0;
1477 /* we might hit 0, so take the lock */
1478 mutex_lock(lock);
1479 if (!atomic_dec_and_test(cnt)) {
1480 /* when we actually did the dec, we didn't hit 0 */
1481 mutex_unlock(lock);
1482 return 0;
1483 }
1484 /* we hit 0, and we hold the lock */
1485 return 1;
1486 }
1487 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1488