• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * rtmutex API
4  */
5 #include <linux/spinlock.h>
6 #include <linux/export.h>
7 
8 #define RT_MUTEX_BUILD_MUTEX
9 #include "rtmutex.c"
10 
11 /*
12  * Max number of times we'll walk the boosting chain:
13  */
14 int max_lock_depth = 1024;
15 
16 /*
17  * Debug aware fast / slowpath lock,trylock,unlock
18  *
19  * The atomic acquire/release ops are compiled away, when either the
20  * architecture does not support cmpxchg or when debugging is enabled.
21  */
__rt_mutex_lock_common(struct rt_mutex * lock,unsigned int state,struct lockdep_map * nest_lock,unsigned int subclass)22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
23 						  unsigned int state,
24 						  struct lockdep_map *nest_lock,
25 						  unsigned int subclass)
26 {
27 	int ret;
28 
29 	might_sleep();
30 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
31 	ret = __rt_mutex_lock(&lock->rtmutex, state);
32 	if (ret)
33 		mutex_release(&lock->dep_map, _RET_IP_);
34 	else
35 		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
36 	return ret;
37 }
38 
rt_mutex_base_init(struct rt_mutex_base * rtb)39 void rt_mutex_base_init(struct rt_mutex_base *rtb)
40 {
41 	__rt_mutex_base_init(rtb);
42 }
43 EXPORT_SYMBOL(rt_mutex_base_init);
44 
45 #ifdef CONFIG_DEBUG_LOCK_ALLOC
46 /**
47  * rt_mutex_lock_nested - lock a rt_mutex
48  *
49  * @lock: the rt_mutex to be locked
50  * @subclass: the lockdep subclass
51  */
rt_mutex_lock_nested(struct rt_mutex * lock,unsigned int subclass)52 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
53 {
54 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
55 }
56 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
57 
_rt_mutex_lock_nest_lock(struct rt_mutex * lock,struct lockdep_map * nest_lock)58 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
59 {
60 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
61 }
62 EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
63 
64 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
65 
66 /**
67  * rt_mutex_lock - lock a rt_mutex
68  *
69  * @lock: the rt_mutex to be locked
70  */
rt_mutex_lock(struct rt_mutex * lock)71 void __sched rt_mutex_lock(struct rt_mutex *lock)
72 {
73 	__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
74 }
75 EXPORT_SYMBOL_GPL(rt_mutex_lock);
76 #endif
77 
78 /**
79  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
80  *
81  * @lock:		the rt_mutex to be locked
82  *
83  * Returns:
84  *  0		on success
85  * -EINTR	when interrupted by a signal
86  */
rt_mutex_lock_interruptible(struct rt_mutex * lock)87 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
88 {
89 	return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
90 }
91 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
92 
93 /**
94  * rt_mutex_lock_killable - lock a rt_mutex killable
95  *
96  * @lock:		the rt_mutex to be locked
97  *
98  * Returns:
99  *  0		on success
100  * -EINTR	when interrupted by a signal
101  */
rt_mutex_lock_killable(struct rt_mutex * lock)102 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
103 {
104 	return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
105 }
106 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
107 
108 /**
109  * rt_mutex_trylock - try to lock a rt_mutex
110  *
111  * @lock:	the rt_mutex to be locked
112  *
113  * This function can only be called in thread context. It's safe to call it
114  * from atomic regions, but not from hard or soft interrupt context.
115  *
116  * Returns:
117  *  1 on success
118  *  0 on contention
119  */
rt_mutex_trylock(struct rt_mutex * lock)120 int __sched rt_mutex_trylock(struct rt_mutex *lock)
121 {
122 	int ret;
123 
124 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
125 		return 0;
126 
127 	ret = __rt_mutex_trylock(&lock->rtmutex);
128 	if (ret) {
129 		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
130 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
131 	}
132 
133 	return ret;
134 }
135 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
136 
137 /**
138  * rt_mutex_unlock - unlock a rt_mutex
139  *
140  * @lock: the rt_mutex to be unlocked
141  */
rt_mutex_unlock(struct rt_mutex * lock)142 void __sched rt_mutex_unlock(struct rt_mutex *lock)
143 {
144 	trace_android_vh_record_rtmutex_lock_starttime(current, 0);
145 	mutex_release(&lock->dep_map, _RET_IP_);
146 	__rt_mutex_unlock(&lock->rtmutex);
147 }
148 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
149 
150 /*
151  * Futex variants, must not use fastpath.
152  */
rt_mutex_futex_trylock(struct rt_mutex_base * lock)153 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
154 {
155 	return rt_mutex_slowtrylock(lock);
156 }
157 
__rt_mutex_futex_trylock(struct rt_mutex_base * lock)158 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
159 {
160 	return __rt_mutex_slowtrylock(lock);
161 }
162 
163 /**
164  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
165  * do not use the fast-path, can be simple and will not need to retry.
166  *
167  * @lock:	The rt_mutex to be unlocked
168  * @wqh:	The wake queue head from which to get the next lock waiter
169  */
__rt_mutex_futex_unlock(struct rt_mutex_base * lock,struct rt_wake_q_head * wqh)170 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
171 				     struct rt_wake_q_head *wqh)
172 {
173 	lockdep_assert_held(&lock->wait_lock);
174 
175 	debug_rt_mutex_unlock(lock);
176 
177 	if (!rt_mutex_has_waiters(lock)) {
178 		lock->owner = NULL;
179 		return false; /* done */
180 	}
181 
182 	/*
183 	 * We've already deboosted, mark_wakeup_next_waiter() will
184 	 * retain preempt_disabled when we drop the wait_lock, to
185 	 * avoid inversion prior to the wakeup.  preempt_disable()
186 	 * therein pairs with rt_mutex_postunlock().
187 	 */
188 	mark_wakeup_next_waiter(wqh, lock);
189 
190 	return true; /* call postunlock() */
191 }
192 
rt_mutex_futex_unlock(struct rt_mutex_base * lock)193 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
194 {
195 	DEFINE_RT_WAKE_Q(wqh);
196 	unsigned long flags;
197 	bool postunlock;
198 
199 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
200 	postunlock = __rt_mutex_futex_unlock(lock, &wqh);
201 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
202 
203 	if (postunlock)
204 		rt_mutex_postunlock(&wqh);
205 }
206 
207 /**
208  * __rt_mutex_init - initialize the rt_mutex
209  *
210  * @lock:	The rt_mutex to be initialized
211  * @name:	The lock name used for debugging
212  * @key:	The lock class key used for debugging
213  *
214  * Initialize the rt_mutex to unlocked state.
215  *
216  * Initializing of a locked rt_mutex is not allowed
217  */
__rt_mutex_init(struct rt_mutex * lock,const char * name,struct lock_class_key * key)218 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
219 			     struct lock_class_key *key)
220 {
221 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
222 	__rt_mutex_base_init(&lock->rtmutex);
223 	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
224 }
225 EXPORT_SYMBOL_GPL(__rt_mutex_init);
226 
227 /**
228  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
229  *				proxy owner
230  *
231  * @lock:	the rt_mutex to be locked
232  * @proxy_owner:the task to set as owner
233  *
234  * No locking. Caller has to do serializing itself
235  *
236  * Special API call for PI-futex support. This initializes the rtmutex and
237  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
238  * possible at this point because the pi_state which contains the rtmutex
239  * is not yet visible to other tasks.
240  */
rt_mutex_init_proxy_locked(struct rt_mutex_base * lock,struct task_struct * proxy_owner)241 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
242 					struct task_struct *proxy_owner)
243 {
244 	static struct lock_class_key pi_futex_key;
245 
246 	__rt_mutex_base_init(lock);
247 	/*
248 	 * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
249 	 * and rtmutex based. That causes a lockdep false positive, because
250 	 * some of the futex functions invoke spin_unlock(&hb->lock) with
251 	 * the wait_lock of the rtmutex associated to the pi_futex held.
252 	 * spin_unlock() in turn takes wait_lock of the rtmutex on which
253 	 * the spinlock is based, which makes lockdep notice a lock
254 	 * recursion. Give the futex/rtmutex wait_lock a separate key.
255 	 */
256 	lockdep_set_class(&lock->wait_lock, &pi_futex_key);
257 	rt_mutex_set_owner(lock, proxy_owner);
258 }
259 
260 /**
261  * rt_mutex_proxy_unlock - release a lock on behalf of owner
262  *
263  * @lock:	the rt_mutex to be locked
264  *
265  * No locking. Caller has to do serializing itself
266  *
267  * Special API call for PI-futex support. This just cleans up the rtmutex
268  * (debugging) state. Concurrent operations on this rt_mutex are not
269  * possible because it belongs to the pi_state which is about to be freed
270  * and it is not longer visible to other tasks.
271  */
rt_mutex_proxy_unlock(struct rt_mutex_base * lock)272 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
273 {
274 	debug_rt_mutex_proxy_unlock(lock);
275 	rt_mutex_clear_owner(lock);
276 }
277 
278 /**
279  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
280  * @lock:		the rt_mutex to take
281  * @waiter:		the pre-initialized rt_mutex_waiter
282  * @task:		the task to prepare
283  *
284  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
285  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
286  *
287  * NOTE: does _NOT_ remove the @waiter on failure; must either call
288  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
289  *
290  * Returns:
291  *  0 - task blocked on lock
292  *  1 - acquired the lock for task, caller should wake it up
293  * <0 - error
294  *
295  * Special API call for PI-futex support.
296  */
__rt_mutex_start_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task)297 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
298 					struct rt_mutex_waiter *waiter,
299 					struct task_struct *task)
300 {
301 	int ret;
302 
303 	lockdep_assert_held(&lock->wait_lock);
304 
305 	if (try_to_take_rt_mutex(lock, task, NULL))
306 		return 1;
307 
308 	/* We enforce deadlock detection for futexes */
309 	ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
310 				      RT_MUTEX_FULL_CHAINWALK);
311 
312 	if (ret && !rt_mutex_owner(lock)) {
313 		/*
314 		 * Reset the return value. We might have
315 		 * returned with -EDEADLK and the owner
316 		 * released the lock while we were walking the
317 		 * pi chain.  Let the waiter sort it out.
318 		 */
319 		ret = 0;
320 	}
321 
322 	return ret;
323 }
324 
325 /**
326  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
327  * @lock:		the rt_mutex to take
328  * @waiter:		the pre-initialized rt_mutex_waiter
329  * @task:		the task to prepare
330  *
331  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
332  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
333  *
334  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
335  * on failure.
336  *
337  * Returns:
338  *  0 - task blocked on lock
339  *  1 - acquired the lock for task, caller should wake it up
340  * <0 - error
341  *
342  * Special API call for PI-futex support.
343  */
rt_mutex_start_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task)344 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
345 				      struct rt_mutex_waiter *waiter,
346 				      struct task_struct *task)
347 {
348 	int ret;
349 
350 	raw_spin_lock_irq(&lock->wait_lock);
351 	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
352 	if (unlikely(ret))
353 		remove_waiter(lock, waiter);
354 	raw_spin_unlock_irq(&lock->wait_lock);
355 
356 	return ret;
357 }
358 
359 /**
360  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
361  * @lock:		the rt_mutex we were woken on
362  * @to:			the timeout, null if none. hrtimer should already have
363  *			been started.
364  * @waiter:		the pre-initialized rt_mutex_waiter
365  *
366  * Wait for the lock acquisition started on our behalf by
367  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
368  * rt_mutex_cleanup_proxy_lock().
369  *
370  * Returns:
371  *  0 - success
372  * <0 - error, one of -EINTR, -ETIMEDOUT
373  *
374  * Special API call for PI-futex support
375  */
rt_mutex_wait_proxy_lock(struct rt_mutex_base * lock,struct hrtimer_sleeper * to,struct rt_mutex_waiter * waiter)376 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
377 				     struct hrtimer_sleeper *to,
378 				     struct rt_mutex_waiter *waiter)
379 {
380 	int ret;
381 
382 	raw_spin_lock_irq(&lock->wait_lock);
383 	/* sleep on the mutex */
384 	set_current_state(TASK_INTERRUPTIBLE);
385 	ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
386 	/*
387 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
388 	 * have to fix that up.
389 	 */
390 	fixup_rt_mutex_waiters(lock, true);
391 	raw_spin_unlock_irq(&lock->wait_lock);
392 
393 	return ret;
394 }
395 
396 /**
397  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
398  * @lock:		the rt_mutex we were woken on
399  * @waiter:		the pre-initialized rt_mutex_waiter
400  *
401  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
402  * rt_mutex_wait_proxy_lock().
403  *
404  * Unless we acquired the lock; we're still enqueued on the wait-list and can
405  * in fact still be granted ownership until we're removed. Therefore we can
406  * find we are in fact the owner and must disregard the
407  * rt_mutex_wait_proxy_lock() failure.
408  *
409  * Returns:
410  *  true  - did the cleanup, we done.
411  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
412  *          caller should disregards its return value.
413  *
414  * Special API call for PI-futex support
415  */
rt_mutex_cleanup_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)416 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
417 					 struct rt_mutex_waiter *waiter)
418 {
419 	bool cleanup = false;
420 
421 	raw_spin_lock_irq(&lock->wait_lock);
422 	/*
423 	 * Do an unconditional try-lock, this deals with the lock stealing
424 	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
425 	 * sets a NULL owner.
426 	 *
427 	 * We're not interested in the return value, because the subsequent
428 	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
429 	 * we will own the lock and it will have removed the waiter. If we
430 	 * failed the trylock, we're still not owner and we need to remove
431 	 * ourselves.
432 	 */
433 	try_to_take_rt_mutex(lock, current, waiter);
434 	/*
435 	 * Unless we're the owner; we're still enqueued on the wait_list.
436 	 * So check if we became owner, if not, take us off the wait_list.
437 	 */
438 	if (rt_mutex_owner(lock) != current) {
439 		remove_waiter(lock, waiter);
440 		cleanup = true;
441 	}
442 	/*
443 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
444 	 * have to fix that up.
445 	 */
446 	fixup_rt_mutex_waiters(lock, false);
447 
448 	raw_spin_unlock_irq(&lock->wait_lock);
449 
450 	return cleanup;
451 }
452 
453 /*
454  * Recheck the pi chain, in case we got a priority setting
455  *
456  * Called from sched_setscheduler
457  */
rt_mutex_adjust_pi(struct task_struct * task)458 void __sched rt_mutex_adjust_pi(struct task_struct *task)
459 {
460 	struct rt_mutex_waiter *waiter;
461 	struct rt_mutex_base *next_lock;
462 	unsigned long flags;
463 
464 	raw_spin_lock_irqsave(&task->pi_lock, flags);
465 
466 	waiter = task->pi_blocked_on;
467 	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
468 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
469 		return;
470 	}
471 	next_lock = waiter->lock;
472 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
473 
474 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
475 	get_task_struct(task);
476 
477 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
478 				   next_lock, NULL, task);
479 }
480 
481 /*
482  * Performs the wakeup of the top-waiter and re-enables preemption.
483  */
rt_mutex_postunlock(struct rt_wake_q_head * wqh)484 void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
485 {
486 	rt_mutex_wake_up_q(wqh);
487 }
488 
489 #ifdef CONFIG_DEBUG_RT_MUTEXES
rt_mutex_debug_task_free(struct task_struct * task)490 void rt_mutex_debug_task_free(struct task_struct *task)
491 {
492 	DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
493 	DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
494 }
495 #endif
496 
497 #ifdef CONFIG_PREEMPT_RT
498 /* Mutexes */
__mutex_rt_init(struct mutex * mutex,const char * name,struct lock_class_key * key)499 void __mutex_rt_init(struct mutex *mutex, const char *name,
500 		     struct lock_class_key *key)
501 {
502 	debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
503 	lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
504 }
505 EXPORT_SYMBOL(__mutex_rt_init);
506 
__mutex_lock_common(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)507 static __always_inline int __mutex_lock_common(struct mutex *lock,
508 					       unsigned int state,
509 					       unsigned int subclass,
510 					       struct lockdep_map *nest_lock,
511 					       unsigned long ip)
512 {
513 	int ret;
514 
515 	might_sleep();
516 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
517 	ret = __rt_mutex_lock(&lock->rtmutex, state);
518 	if (ret)
519 		mutex_release(&lock->dep_map, ip);
520 	else
521 		lock_acquired(&lock->dep_map, ip);
522 	return ret;
523 }
524 
525 #ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_lock_nested(struct mutex * lock,unsigned int subclass)526 void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
527 {
528 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
529 }
530 EXPORT_SYMBOL_GPL(mutex_lock_nested);
531 
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest_lock)532 void __sched _mutex_lock_nest_lock(struct mutex *lock,
533 				   struct lockdep_map *nest_lock)
534 {
535 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
536 }
537 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
538 
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)539 int __sched mutex_lock_interruptible_nested(struct mutex *lock,
540 					    unsigned int subclass)
541 {
542 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
543 }
544 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
545 
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)546 int __sched mutex_lock_killable_nested(struct mutex *lock,
547 					    unsigned int subclass)
548 {
549 	return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
550 }
551 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
552 
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)553 void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
554 {
555 	int token;
556 
557 	might_sleep();
558 
559 	token = io_schedule_prepare();
560 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
561 	io_schedule_finish(token);
562 }
563 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
564 
565 #else /* CONFIG_DEBUG_LOCK_ALLOC */
566 
mutex_lock(struct mutex * lock)567 void __sched mutex_lock(struct mutex *lock)
568 {
569 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
570 }
571 EXPORT_SYMBOL(mutex_lock);
572 
mutex_lock_interruptible(struct mutex * lock)573 int __sched mutex_lock_interruptible(struct mutex *lock)
574 {
575 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
576 }
577 EXPORT_SYMBOL(mutex_lock_interruptible);
578 
mutex_lock_killable(struct mutex * lock)579 int __sched mutex_lock_killable(struct mutex *lock)
580 {
581 	return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
582 }
583 EXPORT_SYMBOL(mutex_lock_killable);
584 
mutex_lock_io(struct mutex * lock)585 void __sched mutex_lock_io(struct mutex *lock)
586 {
587 	int token = io_schedule_prepare();
588 
589 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
590 	io_schedule_finish(token);
591 }
592 EXPORT_SYMBOL(mutex_lock_io);
593 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
594 
mutex_trylock(struct mutex * lock)595 int __sched mutex_trylock(struct mutex *lock)
596 {
597 	int ret;
598 
599 	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
600 		return 0;
601 
602 	ret = __rt_mutex_trylock(&lock->rtmutex);
603 	if (ret)
604 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
605 
606 	return ret;
607 }
608 EXPORT_SYMBOL(mutex_trylock);
609 
mutex_unlock(struct mutex * lock)610 void __sched mutex_unlock(struct mutex *lock)
611 {
612 	mutex_release(&lock->dep_map, _RET_IP_);
613 	__rt_mutex_unlock(&lock->rtmutex);
614 }
615 EXPORT_SYMBOL(mutex_unlock);
616 
617 #endif /* CONFIG_PREEMPT_RT */
618