1 /*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/locking/mutex-design.txt.
19 */
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include "mcs_spinlock.h"
29
30 /*
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
33 */
34 #ifdef CONFIG_DEBUG_MUTEXES
35 # include "mutex-debug.h"
36 # include <asm-generic/mutex-null.h>
37 /*
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
40 * case.
41 */
42 # undef __mutex_slowpath_needs_to_unlock
43 # define __mutex_slowpath_needs_to_unlock() 0
44 #else
45 # include "mutex.h"
46 # include <asm/mutex.h>
47 #endif
48
49 void
__mutex_init(struct mutex * lock,const char * name,struct lock_class_key * key)50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51 {
52 atomic_set(&lock->count, 1);
53 spin_lock_init(&lock->wait_lock);
54 INIT_LIST_HEAD(&lock->wait_list);
55 mutex_clear_owner(lock);
56 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57 osq_lock_init(&lock->osq);
58 #endif
59
60 debug_mutex_init(lock, name, key);
61 }
62
63 EXPORT_SYMBOL(__mutex_init);
64
65 #ifndef CONFIG_DEBUG_LOCK_ALLOC
66 /*
67 * We split the mutex lock/unlock logic into separate fastpath and
68 * slowpath functions, to reduce the register pressure on the fastpath.
69 * We also put the fastpath first in the kernel image, to make sure the
70 * branch is predicted by the CPU as default-untaken.
71 */
72 __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
73
74 /**
75 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
77 *
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
80 *
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides mutex must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
88 *
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
92 *
93 * This function is similar to (but not equivalent to) down().
94 */
mutex_lock(struct mutex * lock)95 void __sched mutex_lock(struct mutex *lock)
96 {
97 might_sleep();
98 /*
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
101 */
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103 mutex_set_owner(lock);
104 }
105
106 EXPORT_SYMBOL(mutex_lock);
107 #endif
108
ww_mutex_lock_acquired(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)109 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110 struct ww_acquire_ctx *ww_ctx)
111 {
112 #ifdef CONFIG_DEBUG_MUTEXES
113 /*
114 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115 * but released with a normal mutex_unlock in this call.
116 *
117 * This should never happen, always use ww_mutex_unlock.
118 */
119 DEBUG_LOCKS_WARN_ON(ww->ctx);
120
121 /*
122 * Not quite done after calling ww_acquire_done() ?
123 */
124 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
125
126 if (ww_ctx->contending_lock) {
127 /*
128 * After -EDEADLK you tried to
129 * acquire a different ww_mutex? Bad!
130 */
131 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
132
133 /*
134 * You called ww_mutex_lock after receiving -EDEADLK,
135 * but 'forgot' to unlock everything else first?
136 */
137 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138 ww_ctx->contending_lock = NULL;
139 }
140
141 /*
142 * Naughty, using a different class will lead to undefined behavior!
143 */
144 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
145 #endif
146 ww_ctx->acquired++;
147 }
148
149 /*
150 * after acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck.
152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
154 * as the fastpath and opportunistic spinning are disabled in that case.
155 */
156 static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)157 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
158 struct ww_acquire_ctx *ctx)
159 {
160 unsigned long flags;
161 struct mutex_waiter *cur;
162
163 ww_mutex_lock_acquired(lock, ctx);
164
165 lock->ctx = ctx;
166
167 /*
168 * The lock->ctx update should be visible on all cores before
169 * the atomic read is done, otherwise contended waiters might be
170 * missed. The contended waiters will either see ww_ctx == NULL
171 * and keep spinning, or it will acquire wait_lock, add itself
172 * to waiter list and sleep.
173 */
174 smp_mb(); /* ^^^ */
175
176 /*
177 * Check if lock is contended, if not there is nobody to wake up
178 */
179 if (likely(atomic_read(&lock->base.count) == 0))
180 return;
181
182 /*
183 * Uh oh, we raced in fastpath, wake up everyone in this case,
184 * so they can see the new lock->ctx.
185 */
186 spin_lock_mutex(&lock->base.wait_lock, flags);
187 list_for_each_entry(cur, &lock->base.wait_list, list) {
188 debug_mutex_wake_waiter(&lock->base, cur);
189 wake_up_process(cur->task);
190 }
191 spin_unlock_mutex(&lock->base.wait_lock, flags);
192 }
193
194
195 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
196 /*
197 * In order to avoid a stampede of mutex spinners from acquiring the mutex
198 * more or less simultaneously, the spinners need to acquire a MCS lock
199 * first before spinning on the owner field.
200 *
201 */
202
203 /*
204 * Mutex spinning code migrated from kernel/sched/core.c
205 */
206
owner_running(struct mutex * lock,struct task_struct * owner)207 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
208 {
209 if (lock->owner != owner)
210 return false;
211
212 /*
213 * Ensure we emit the owner->on_cpu, dereference _after_ checking
214 * lock->owner still matches owner, if that fails, owner might
215 * point to free()d memory, if it still matches, the rcu_read_lock()
216 * ensures the memory stays valid.
217 */
218 barrier();
219
220 return owner->on_cpu;
221 }
222
223 /*
224 * Look out! "owner" is an entirely speculative pointer
225 * access and not reliable.
226 */
227 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner)228 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
229 {
230 rcu_read_lock();
231 while (owner_running(lock, owner)) {
232 if (need_resched())
233 break;
234
235 cpu_relax_lowlatency();
236 }
237 rcu_read_unlock();
238
239 /*
240 * We break out the loop above on need_resched() and when the
241 * owner changed, which is a sign for heavy contention. Return
242 * success only when lock->owner is NULL.
243 */
244 return lock->owner == NULL;
245 }
246
247 /*
248 * Initial check for entering the mutex spinning loop
249 */
mutex_can_spin_on_owner(struct mutex * lock)250 static inline int mutex_can_spin_on_owner(struct mutex *lock)
251 {
252 struct task_struct *owner;
253 int retval = 1;
254
255 if (need_resched())
256 return 0;
257
258 rcu_read_lock();
259 owner = ACCESS_ONCE(lock->owner);
260 if (owner)
261 retval = owner->on_cpu;
262 rcu_read_unlock();
263 /*
264 * if lock->owner is not set, the mutex owner may have just acquired
265 * it and not set the owner yet or the mutex has been released.
266 */
267 return retval;
268 }
269
270 /*
271 * Atomically try to take the lock when it is available
272 */
mutex_try_to_acquire(struct mutex * lock)273 static inline bool mutex_try_to_acquire(struct mutex *lock)
274 {
275 return !mutex_is_locked(lock) &&
276 (atomic_cmpxchg(&lock->count, 1, 0) == 1);
277 }
278
279 /*
280 * Optimistic spinning.
281 *
282 * We try to spin for acquisition when we find that the lock owner
283 * is currently running on a (different) CPU and while we don't
284 * need to reschedule. The rationale is that if the lock owner is
285 * running, it is likely to release the lock soon.
286 *
287 * Since this needs the lock owner, and this mutex implementation
288 * doesn't track the owner atomically in the lock field, we need to
289 * track it non-atomically.
290 *
291 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
292 * to serialize everything.
293 *
294 * The mutex spinners are queued up using MCS lock so that only one
295 * spinner can compete for the mutex. However, if mutex spinning isn't
296 * going to happen, there is no point in going through the lock/unlock
297 * overhead.
298 *
299 * Returns true when the lock was taken, otherwise false, indicating
300 * that we need to jump to the slowpath and sleep.
301 */
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)302 static bool mutex_optimistic_spin(struct mutex *lock,
303 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
304 {
305 struct task_struct *task = current;
306
307 if (!mutex_can_spin_on_owner(lock))
308 goto done;
309
310 if (!osq_lock(&lock->osq))
311 goto done;
312
313 while (true) {
314 struct task_struct *owner;
315
316 if (use_ww_ctx && ww_ctx->acquired > 0) {
317 struct ww_mutex *ww;
318
319 ww = container_of(lock, struct ww_mutex, base);
320 /*
321 * If ww->ctx is set the contents are undefined, only
322 * by acquiring wait_lock there is a guarantee that
323 * they are not invalid when reading.
324 *
325 * As such, when deadlock detection needs to be
326 * performed the optimistic spinning cannot be done.
327 */
328 if (ACCESS_ONCE(ww->ctx))
329 break;
330 }
331
332 /*
333 * If there's an owner, wait for it to either
334 * release the lock or go to sleep.
335 */
336 owner = ACCESS_ONCE(lock->owner);
337 if (owner && !mutex_spin_on_owner(lock, owner))
338 break;
339
340 /* Try to acquire the mutex if it is unlocked. */
341 if (mutex_try_to_acquire(lock)) {
342 lock_acquired(&lock->dep_map, ip);
343
344 if (use_ww_ctx) {
345 struct ww_mutex *ww;
346 ww = container_of(lock, struct ww_mutex, base);
347
348 ww_mutex_set_context_fastpath(ww, ww_ctx);
349 }
350
351 mutex_set_owner(lock);
352 osq_unlock(&lock->osq);
353 return true;
354 }
355
356 /*
357 * When there's no owner, we might have preempted between the
358 * owner acquiring the lock and setting the owner field. If
359 * we're an RT task that will live-lock because we won't let
360 * the owner complete.
361 */
362 if (!owner && (need_resched() || rt_task(task)))
363 break;
364
365 /*
366 * The cpu_relax() call is a compiler barrier which forces
367 * everything in this loop to be re-loaded. We don't need
368 * memory barriers as we'll eventually observe the right
369 * values at the cost of a few extra spins.
370 */
371 cpu_relax_lowlatency();
372 }
373
374 osq_unlock(&lock->osq);
375 done:
376 /*
377 * If we fell out of the spin path because of need_resched(),
378 * reschedule now, before we try-lock the mutex. This avoids getting
379 * scheduled out right after we obtained the mutex.
380 */
381 if (need_resched())
382 schedule_preempt_disabled();
383
384 return false;
385 }
386 #else
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)387 static bool mutex_optimistic_spin(struct mutex *lock,
388 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
389 {
390 return false;
391 }
392 #endif
393
394 __visible __used noinline
395 void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
396
397 /**
398 * mutex_unlock - release the mutex
399 * @lock: the mutex to be released
400 *
401 * Unlock a mutex that has been locked by this task previously.
402 *
403 * This function must not be used in interrupt context. Unlocking
404 * of a not locked mutex is not allowed.
405 *
406 * This function is similar to (but not equivalent to) up().
407 */
mutex_unlock(struct mutex * lock)408 void __sched mutex_unlock(struct mutex *lock)
409 {
410 /*
411 * The unlocking fastpath is the 0->1 transition from 'locked'
412 * into 'unlocked' state:
413 */
414 #ifndef CONFIG_DEBUG_MUTEXES
415 /*
416 * When debugging is enabled we must not clear the owner before time,
417 * the slow path will always be taken, and that clears the owner field
418 * after verifying that it was indeed current.
419 */
420 mutex_clear_owner(lock);
421 #endif
422 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
423 }
424
425 EXPORT_SYMBOL(mutex_unlock);
426
427 /**
428 * ww_mutex_unlock - release the w/w mutex
429 * @lock: the mutex to be released
430 *
431 * Unlock a mutex that has been locked by this task previously with any of the
432 * ww_mutex_lock* functions (with or without an acquire context). It is
433 * forbidden to release the locks after releasing the acquire context.
434 *
435 * This function must not be used in interrupt context. Unlocking
436 * of a unlocked mutex is not allowed.
437 */
ww_mutex_unlock(struct ww_mutex * lock)438 void __sched ww_mutex_unlock(struct ww_mutex *lock)
439 {
440 /*
441 * The unlocking fastpath is the 0->1 transition from 'locked'
442 * into 'unlocked' state:
443 */
444 if (lock->ctx) {
445 #ifdef CONFIG_DEBUG_MUTEXES
446 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
447 #endif
448 if (lock->ctx->acquired > 0)
449 lock->ctx->acquired--;
450 lock->ctx = NULL;
451 }
452
453 #ifndef CONFIG_DEBUG_MUTEXES
454 /*
455 * When debugging is enabled we must not clear the owner before time,
456 * the slow path will always be taken, and that clears the owner field
457 * after verifying that it was indeed current.
458 */
459 mutex_clear_owner(&lock->base);
460 #endif
461 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
462 }
463 EXPORT_SYMBOL(ww_mutex_unlock);
464
465 static inline int __sched
__mutex_lock_check_stamp(struct mutex * lock,struct ww_acquire_ctx * ctx)466 __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
467 {
468 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
469 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
470
471 if (!hold_ctx)
472 return 0;
473
474 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
475 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
476 #ifdef CONFIG_DEBUG_MUTEXES
477 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
478 ctx->contending_lock = ww;
479 #endif
480 return -EDEADLK;
481 }
482
483 return 0;
484 }
485
486 /*
487 * Lock a mutex (possibly interruptible), slowpath:
488 */
489 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,long state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)490 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
491 struct lockdep_map *nest_lock, unsigned long ip,
492 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
493 {
494 struct task_struct *task = current;
495 struct mutex_waiter waiter;
496 unsigned long flags;
497 int ret;
498
499 if (use_ww_ctx) {
500 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
501 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
502 return -EALREADY;
503 }
504
505 preempt_disable();
506 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
507
508 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
509 /* got the lock, yay! */
510 preempt_enable();
511 return 0;
512 }
513
514 spin_lock_mutex(&lock->wait_lock, flags);
515
516 /*
517 * Once more, try to acquire the lock. Only try-lock the mutex if
518 * it is unlocked to reduce unnecessary xchg() operations.
519 */
520 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
521 goto skip_wait;
522
523 debug_mutex_lock_common(lock, &waiter);
524 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
525
526 /* add waiting tasks to the end of the waitqueue (FIFO): */
527 list_add_tail(&waiter.list, &lock->wait_list);
528 waiter.task = task;
529
530 lock_contended(&lock->dep_map, ip);
531
532 for (;;) {
533 /*
534 * Lets try to take the lock again - this is needed even if
535 * we get here for the first time (shortly after failing to
536 * acquire the lock), to make sure that we get a wakeup once
537 * it's unlocked. Later on, if we sleep, this is the
538 * operation that gives us the lock. We xchg it to -1, so
539 * that when we release the lock, we properly wake up the
540 * other waiters. We only attempt the xchg if the count is
541 * non-negative in order to avoid unnecessary xchg operations:
542 */
543 if (atomic_read(&lock->count) >= 0 &&
544 (atomic_xchg(&lock->count, -1) == 1))
545 break;
546
547 /*
548 * got a signal? (This code gets eliminated in the
549 * TASK_UNINTERRUPTIBLE case.)
550 */
551 if (unlikely(signal_pending_state(state, task))) {
552 ret = -EINTR;
553 goto err;
554 }
555
556 if (use_ww_ctx && ww_ctx->acquired > 0) {
557 ret = __mutex_lock_check_stamp(lock, ww_ctx);
558 if (ret)
559 goto err;
560 }
561
562 __set_task_state(task, state);
563
564 /* didn't get the lock, go to sleep: */
565 spin_unlock_mutex(&lock->wait_lock, flags);
566 schedule_preempt_disabled();
567 spin_lock_mutex(&lock->wait_lock, flags);
568 }
569 mutex_remove_waiter(lock, &waiter, current_thread_info());
570 /* set it to 0 if there are no waiters left: */
571 if (likely(list_empty(&lock->wait_list)))
572 atomic_set(&lock->count, 0);
573 debug_mutex_free_waiter(&waiter);
574
575 skip_wait:
576 /* got the lock - cleanup and rejoice! */
577 lock_acquired(&lock->dep_map, ip);
578 mutex_set_owner(lock);
579
580 if (use_ww_ctx) {
581 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
582 struct mutex_waiter *cur;
583
584 /*
585 * This branch gets optimized out for the common case,
586 * and is only important for ww_mutex_lock.
587 */
588 ww_mutex_lock_acquired(ww, ww_ctx);
589 ww->ctx = ww_ctx;
590
591 /*
592 * Give any possible sleeping processes the chance to wake up,
593 * so they can recheck if they have to back off.
594 */
595 list_for_each_entry(cur, &lock->wait_list, list) {
596 debug_mutex_wake_waiter(lock, cur);
597 wake_up_process(cur->task);
598 }
599 }
600
601 spin_unlock_mutex(&lock->wait_lock, flags);
602 preempt_enable();
603 return 0;
604
605 err:
606 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
607 spin_unlock_mutex(&lock->wait_lock, flags);
608 debug_mutex_free_waiter(&waiter);
609 mutex_release(&lock->dep_map, 1, ip);
610 preempt_enable();
611 return ret;
612 }
613
614 #ifdef CONFIG_DEBUG_LOCK_ALLOC
615 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)616 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
617 {
618 might_sleep();
619 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
620 subclass, NULL, _RET_IP_, NULL, 0);
621 }
622
623 EXPORT_SYMBOL_GPL(mutex_lock_nested);
624
625 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)626 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
627 {
628 might_sleep();
629 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
630 0, nest, _RET_IP_, NULL, 0);
631 }
632
633 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
634
635 int __sched
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)636 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
637 {
638 might_sleep();
639 return __mutex_lock_common(lock, TASK_KILLABLE,
640 subclass, NULL, _RET_IP_, NULL, 0);
641 }
642 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
643
644 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)645 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
646 {
647 might_sleep();
648 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
649 subclass, NULL, _RET_IP_, NULL, 0);
650 }
651
652 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
653
654 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)655 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
656 {
657 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
658 unsigned tmp;
659
660 if (ctx->deadlock_inject_countdown-- == 0) {
661 tmp = ctx->deadlock_inject_interval;
662 if (tmp > UINT_MAX/4)
663 tmp = UINT_MAX;
664 else
665 tmp = tmp*2 + tmp + tmp/2;
666
667 ctx->deadlock_inject_interval = tmp;
668 ctx->deadlock_inject_countdown = tmp;
669 ctx->contending_lock = lock;
670
671 ww_mutex_unlock(lock);
672
673 return -EDEADLK;
674 }
675 #endif
676
677 return 0;
678 }
679
680 int __sched
__ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)681 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682 {
683 int ret;
684
685 might_sleep();
686 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
687 0, &ctx->dep_map, _RET_IP_, ctx, 1);
688 if (!ret && ctx->acquired > 1)
689 return ww_mutex_deadlock_injection(lock, ctx);
690
691 return ret;
692 }
693 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
694
695 int __sched
__ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)696 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697 {
698 int ret;
699
700 might_sleep();
701 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
702 0, &ctx->dep_map, _RET_IP_, ctx, 1);
703
704 if (!ret && ctx->acquired > 1)
705 return ww_mutex_deadlock_injection(lock, ctx);
706
707 return ret;
708 }
709 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
710
711 #endif
712
713 /*
714 * Release the lock, slowpath:
715 */
716 static inline void
__mutex_unlock_common_slowpath(struct mutex * lock,int nested)717 __mutex_unlock_common_slowpath(struct mutex *lock, int nested)
718 {
719 unsigned long flags;
720
721 /*
722 * As a performance measurement, release the lock before doing other
723 * wakeup related duties to follow. This allows other tasks to acquire
724 * the lock sooner, while still handling cleanups in past unlock calls.
725 * This can be done as we do not enforce strict equivalence between the
726 * mutex counter and wait_list.
727 *
728 *
729 * Some architectures leave the lock unlocked in the fastpath failure
730 * case, others need to leave it locked. In the later case we have to
731 * unlock it here - as the lock counter is currently 0 or negative.
732 */
733 if (__mutex_slowpath_needs_to_unlock())
734 atomic_set(&lock->count, 1);
735
736 spin_lock_mutex(&lock->wait_lock, flags);
737 mutex_release(&lock->dep_map, nested, _RET_IP_);
738 debug_mutex_unlock(lock);
739
740 if (!list_empty(&lock->wait_list)) {
741 /* get the first entry from the wait-list: */
742 struct mutex_waiter *waiter =
743 list_entry(lock->wait_list.next,
744 struct mutex_waiter, list);
745
746 debug_mutex_wake_waiter(lock, waiter);
747
748 wake_up_process(waiter->task);
749 }
750
751 spin_unlock_mutex(&lock->wait_lock, flags);
752 }
753
754 /*
755 * Release the lock, slowpath:
756 */
757 __visible void
__mutex_unlock_slowpath(atomic_t * lock_count)758 __mutex_unlock_slowpath(atomic_t *lock_count)
759 {
760 struct mutex *lock = container_of(lock_count, struct mutex, count);
761
762 __mutex_unlock_common_slowpath(lock, 1);
763 }
764
765 #ifndef CONFIG_DEBUG_LOCK_ALLOC
766 /*
767 * Here come the less common (and hence less performance-critical) APIs:
768 * mutex_lock_interruptible() and mutex_trylock().
769 */
770 static noinline int __sched
771 __mutex_lock_killable_slowpath(struct mutex *lock);
772
773 static noinline int __sched
774 __mutex_lock_interruptible_slowpath(struct mutex *lock);
775
776 /**
777 * mutex_lock_interruptible - acquire the mutex, interruptible
778 * @lock: the mutex to be acquired
779 *
780 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
781 * been acquired or sleep until the mutex becomes available. If a
782 * signal arrives while waiting for the lock then this function
783 * returns -EINTR.
784 *
785 * This function is similar to (but not equivalent to) down_interruptible().
786 */
mutex_lock_interruptible(struct mutex * lock)787 int __sched mutex_lock_interruptible(struct mutex *lock)
788 {
789 int ret;
790
791 might_sleep();
792 ret = __mutex_fastpath_lock_retval(&lock->count);
793 if (likely(!ret)) {
794 mutex_set_owner(lock);
795 return 0;
796 } else
797 return __mutex_lock_interruptible_slowpath(lock);
798 }
799
800 EXPORT_SYMBOL(mutex_lock_interruptible);
801
mutex_lock_killable(struct mutex * lock)802 int __sched mutex_lock_killable(struct mutex *lock)
803 {
804 int ret;
805
806 might_sleep();
807 ret = __mutex_fastpath_lock_retval(&lock->count);
808 if (likely(!ret)) {
809 mutex_set_owner(lock);
810 return 0;
811 } else
812 return __mutex_lock_killable_slowpath(lock);
813 }
814 EXPORT_SYMBOL(mutex_lock_killable);
815
816 __visible void __sched
__mutex_lock_slowpath(atomic_t * lock_count)817 __mutex_lock_slowpath(atomic_t *lock_count)
818 {
819 struct mutex *lock = container_of(lock_count, struct mutex, count);
820
821 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
822 NULL, _RET_IP_, NULL, 0);
823 }
824
825 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)826 __mutex_lock_killable_slowpath(struct mutex *lock)
827 {
828 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
829 NULL, _RET_IP_, NULL, 0);
830 }
831
832 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)833 __mutex_lock_interruptible_slowpath(struct mutex *lock)
834 {
835 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
836 NULL, _RET_IP_, NULL, 0);
837 }
838
839 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)840 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
841 {
842 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
843 NULL, _RET_IP_, ctx, 1);
844 }
845
846 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)847 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
848 struct ww_acquire_ctx *ctx)
849 {
850 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
851 NULL, _RET_IP_, ctx, 1);
852 }
853
854 #endif
855
856 /*
857 * Spinlock based trylock, we take the spinlock and check whether we
858 * can get the lock:
859 */
__mutex_trylock_slowpath(atomic_t * lock_count)860 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
861 {
862 struct mutex *lock = container_of(lock_count, struct mutex, count);
863 unsigned long flags;
864 int prev;
865
866 /* No need to trylock if the mutex is locked. */
867 if (mutex_is_locked(lock))
868 return 0;
869
870 spin_lock_mutex(&lock->wait_lock, flags);
871
872 prev = atomic_xchg(&lock->count, -1);
873 if (likely(prev == 1)) {
874 mutex_set_owner(lock);
875 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
876 }
877
878 /* Set it back to 0 if there are no waiters: */
879 if (likely(list_empty(&lock->wait_list)))
880 atomic_set(&lock->count, 0);
881
882 spin_unlock_mutex(&lock->wait_lock, flags);
883
884 return prev == 1;
885 }
886
887 /**
888 * mutex_trylock - try to acquire the mutex, without waiting
889 * @lock: the mutex to be acquired
890 *
891 * Try to acquire the mutex atomically. Returns 1 if the mutex
892 * has been acquired successfully, and 0 on contention.
893 *
894 * NOTE: this function follows the spin_trylock() convention, so
895 * it is negated from the down_trylock() return values! Be careful
896 * about this when converting semaphore users to mutexes.
897 *
898 * This function must not be used in interrupt context. The
899 * mutex must be released by the same task that acquired it.
900 */
mutex_trylock(struct mutex * lock)901 int __sched mutex_trylock(struct mutex *lock)
902 {
903 int ret;
904
905 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
906 if (ret)
907 mutex_set_owner(lock);
908
909 return ret;
910 }
911 EXPORT_SYMBOL(mutex_trylock);
912
913 #ifndef CONFIG_DEBUG_LOCK_ALLOC
914 int __sched
__ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)915 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
916 {
917 int ret;
918
919 might_sleep();
920
921 ret = __mutex_fastpath_lock_retval(&lock->base.count);
922
923 if (likely(!ret)) {
924 ww_mutex_set_context_fastpath(lock, ctx);
925 mutex_set_owner(&lock->base);
926 } else
927 ret = __ww_mutex_lock_slowpath(lock, ctx);
928 return ret;
929 }
930 EXPORT_SYMBOL(__ww_mutex_lock);
931
932 int __sched
__ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)933 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
934 {
935 int ret;
936
937 might_sleep();
938
939 ret = __mutex_fastpath_lock_retval(&lock->base.count);
940
941 if (likely(!ret)) {
942 ww_mutex_set_context_fastpath(lock, ctx);
943 mutex_set_owner(&lock->base);
944 } else
945 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
946 return ret;
947 }
948 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
949
950 #endif
951
952 /**
953 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
954 * @cnt: the atomic which we are to dec
955 * @lock: the mutex to return holding if we dec to 0
956 *
957 * return true and hold lock if we dec to 0, return false otherwise
958 */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)959 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
960 {
961 /* dec if we can't possibly hit 0 */
962 if (atomic_add_unless(cnt, -1, 1))
963 return 0;
964 /* we might hit 0, so take the lock */
965 mutex_lock(lock);
966 if (!atomic_dec_and_test(cnt)) {
967 /* when we actually did the dec, we didn't hit 0 */
968 mutex_unlock(lock);
969 return 0;
970 }
971 /* we hit 0, and we hold the lock */
972 return 1;
973 }
974 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
975