1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * rtmutex API
4 */
5 #include <linux/spinlock.h>
6 #include <linux/export.h>
7
8 #define RT_MUTEX_BUILD_MUTEX
9 #include "rtmutex.c"
10
11 /*
12 * Max number of times we'll walk the boosting chain:
13 */
14 int max_lock_depth = 1024;
15
16 /*
17 * Debug aware fast / slowpath lock,trylock,unlock
18 *
19 * The atomic acquire/release ops are compiled away, when either the
20 * architecture does not support cmpxchg or when debugging is enabled.
21 */
__rt_mutex_lock_common(struct rt_mutex * lock,unsigned int state,unsigned int subclass)22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
23 unsigned int state,
24 unsigned int subclass)
25 {
26 int ret;
27
28 might_sleep();
29 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
30 ret = __rt_mutex_lock(&lock->rtmutex, state);
31 if (ret)
32 mutex_release(&lock->dep_map, _RET_IP_);
33 else
34 trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
35 return ret;
36 }
37
rt_mutex_base_init(struct rt_mutex_base * rtb)38 void rt_mutex_base_init(struct rt_mutex_base *rtb)
39 {
40 __rt_mutex_base_init(rtb);
41 }
42 EXPORT_SYMBOL(rt_mutex_base_init);
43
44 #ifdef CONFIG_DEBUG_LOCK_ALLOC
45 /**
46 * rt_mutex_lock_nested - lock a rt_mutex
47 *
48 * @lock: the rt_mutex to be locked
49 * @subclass: the lockdep subclass
50 */
rt_mutex_lock_nested(struct rt_mutex * lock,unsigned int subclass)51 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
52 {
53 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
54 }
55 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
56
57 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
58
59 /**
60 * rt_mutex_lock - lock a rt_mutex
61 *
62 * @lock: the rt_mutex to be locked
63 */
rt_mutex_lock(struct rt_mutex * lock)64 void __sched rt_mutex_lock(struct rt_mutex *lock)
65 {
66 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
67 }
68 EXPORT_SYMBOL_GPL(rt_mutex_lock);
69 #endif
70
71 /**
72 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
73 *
74 * @lock: the rt_mutex to be locked
75 *
76 * Returns:
77 * 0 on success
78 * -EINTR when interrupted by a signal
79 */
rt_mutex_lock_interruptible(struct rt_mutex * lock)80 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
81 {
82 return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
83 }
84 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
85
86 /**
87 * rt_mutex_trylock - try to lock a rt_mutex
88 *
89 * @lock: the rt_mutex to be locked
90 *
91 * This function can only be called in thread context. It's safe to call it
92 * from atomic regions, but not from hard or soft interrupt context.
93 *
94 * Returns:
95 * 1 on success
96 * 0 on contention
97 */
rt_mutex_trylock(struct rt_mutex * lock)98 int __sched rt_mutex_trylock(struct rt_mutex *lock)
99 {
100 int ret;
101
102 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
103 return 0;
104
105 ret = __rt_mutex_trylock(&lock->rtmutex);
106 if (ret) {
107 trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
108 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
109 }
110
111 return ret;
112 }
113 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
114
115 /**
116 * rt_mutex_unlock - unlock a rt_mutex
117 *
118 * @lock: the rt_mutex to be unlocked
119 */
rt_mutex_unlock(struct rt_mutex * lock)120 void __sched rt_mutex_unlock(struct rt_mutex *lock)
121 {
122 trace_android_vh_record_rtmutex_lock_starttime(current, 0);
123 mutex_release(&lock->dep_map, _RET_IP_);
124 __rt_mutex_unlock(&lock->rtmutex);
125 }
126 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
127
128 /*
129 * Futex variants, must not use fastpath.
130 */
rt_mutex_futex_trylock(struct rt_mutex_base * lock)131 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
132 {
133 return rt_mutex_slowtrylock(lock);
134 }
135
__rt_mutex_futex_trylock(struct rt_mutex_base * lock)136 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
137 {
138 return __rt_mutex_slowtrylock(lock);
139 }
140
141 /**
142 * __rt_mutex_futex_unlock - Futex variant, that since futex variants
143 * do not use the fast-path, can be simple and will not need to retry.
144 *
145 * @lock: The rt_mutex to be unlocked
146 * @wqh: The wake queue head from which to get the next lock waiter
147 */
__rt_mutex_futex_unlock(struct rt_mutex_base * lock,struct rt_wake_q_head * wqh)148 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
149 struct rt_wake_q_head *wqh)
150 {
151 lockdep_assert_held(&lock->wait_lock);
152
153 debug_rt_mutex_unlock(lock);
154
155 if (!rt_mutex_has_waiters(lock)) {
156 lock->owner = NULL;
157 return false; /* done */
158 }
159
160 /*
161 * We've already deboosted, mark_wakeup_next_waiter() will
162 * retain preempt_disabled when we drop the wait_lock, to
163 * avoid inversion prior to the wakeup. preempt_disable()
164 * therein pairs with rt_mutex_postunlock().
165 */
166 mark_wakeup_next_waiter(wqh, lock);
167
168 return true; /* call postunlock() */
169 }
170
rt_mutex_futex_unlock(struct rt_mutex_base * lock)171 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
172 {
173 DEFINE_RT_WAKE_Q(wqh);
174 unsigned long flags;
175 bool postunlock;
176
177 raw_spin_lock_irqsave(&lock->wait_lock, flags);
178 postunlock = __rt_mutex_futex_unlock(lock, &wqh);
179 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
180
181 if (postunlock)
182 rt_mutex_postunlock(&wqh);
183 }
184
185 /**
186 * __rt_mutex_init - initialize the rt_mutex
187 *
188 * @lock: The rt_mutex to be initialized
189 * @name: The lock name used for debugging
190 * @key: The lock class key used for debugging
191 *
192 * Initialize the rt_mutex to unlocked state.
193 *
194 * Initializing of a locked rt_mutex is not allowed
195 */
__rt_mutex_init(struct rt_mutex * lock,const char * name,struct lock_class_key * key)196 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
197 struct lock_class_key *key)
198 {
199 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
200 __rt_mutex_base_init(&lock->rtmutex);
201 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
202 }
203 EXPORT_SYMBOL_GPL(__rt_mutex_init);
204
205 /**
206 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
207 * proxy owner
208 *
209 * @lock: the rt_mutex to be locked
210 * @proxy_owner:the task to set as owner
211 *
212 * No locking. Caller has to do serializing itself
213 *
214 * Special API call for PI-futex support. This initializes the rtmutex and
215 * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
216 * possible at this point because the pi_state which contains the rtmutex
217 * is not yet visible to other tasks.
218 */
rt_mutex_init_proxy_locked(struct rt_mutex_base * lock,struct task_struct * proxy_owner)219 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
220 struct task_struct *proxy_owner)
221 {
222 static struct lock_class_key pi_futex_key;
223
224 __rt_mutex_base_init(lock);
225 /*
226 * On PREEMPT_RT the futex hashbucket spinlock becomes 'sleeping'
227 * and rtmutex based. That causes a lockdep false positive, because
228 * some of the futex functions invoke spin_unlock(&hb->lock) with
229 * the wait_lock of the rtmutex associated to the pi_futex held.
230 * spin_unlock() in turn takes wait_lock of the rtmutex on which
231 * the spinlock is based, which makes lockdep notice a lock
232 * recursion. Give the futex/rtmutex wait_lock a separate key.
233 */
234 lockdep_set_class(&lock->wait_lock, &pi_futex_key);
235 rt_mutex_set_owner(lock, proxy_owner);
236 }
237
238 /**
239 * rt_mutex_proxy_unlock - release a lock on behalf of owner
240 *
241 * @lock: the rt_mutex to be locked
242 *
243 * No locking. Caller has to do serializing itself
244 *
245 * Special API call for PI-futex support. This just cleans up the rtmutex
246 * (debugging) state. Concurrent operations on this rt_mutex are not
247 * possible because it belongs to the pi_state which is about to be freed
248 * and it is not longer visible to other tasks.
249 */
rt_mutex_proxy_unlock(struct rt_mutex_base * lock)250 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
251 {
252 debug_rt_mutex_proxy_unlock(lock);
253 rt_mutex_clear_owner(lock);
254 }
255
256 /**
257 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
258 * @lock: the rt_mutex to take
259 * @waiter: the pre-initialized rt_mutex_waiter
260 * @task: the task to prepare
261 *
262 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
263 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
264 *
265 * NOTE: does _NOT_ remove the @waiter on failure; must either call
266 * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
267 *
268 * Returns:
269 * 0 - task blocked on lock
270 * 1 - acquired the lock for task, caller should wake it up
271 * <0 - error
272 *
273 * Special API call for PI-futex support.
274 */
__rt_mutex_start_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task)275 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
276 struct rt_mutex_waiter *waiter,
277 struct task_struct *task)
278 {
279 int ret;
280
281 lockdep_assert_held(&lock->wait_lock);
282
283 if (try_to_take_rt_mutex(lock, task, NULL))
284 return 1;
285
286 /* We enforce deadlock detection for futexes */
287 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
288 RT_MUTEX_FULL_CHAINWALK);
289
290 if (ret && !rt_mutex_owner(lock)) {
291 /*
292 * Reset the return value. We might have
293 * returned with -EDEADLK and the owner
294 * released the lock while we were walking the
295 * pi chain. Let the waiter sort it out.
296 */
297 ret = 0;
298 }
299
300 return ret;
301 }
302
303 /**
304 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
305 * @lock: the rt_mutex to take
306 * @waiter: the pre-initialized rt_mutex_waiter
307 * @task: the task to prepare
308 *
309 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
310 * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
311 *
312 * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
313 * on failure.
314 *
315 * Returns:
316 * 0 - task blocked on lock
317 * 1 - acquired the lock for task, caller should wake it up
318 * <0 - error
319 *
320 * Special API call for PI-futex support.
321 */
rt_mutex_start_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task)322 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
323 struct rt_mutex_waiter *waiter,
324 struct task_struct *task)
325 {
326 int ret;
327
328 raw_spin_lock_irq(&lock->wait_lock);
329 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
330 if (unlikely(ret))
331 remove_waiter(lock, waiter);
332 raw_spin_unlock_irq(&lock->wait_lock);
333
334 return ret;
335 }
336
337 /**
338 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
339 * @lock: the rt_mutex we were woken on
340 * @to: the timeout, null if none. hrtimer should already have
341 * been started.
342 * @waiter: the pre-initialized rt_mutex_waiter
343 *
344 * Wait for the lock acquisition started on our behalf by
345 * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
346 * rt_mutex_cleanup_proxy_lock().
347 *
348 * Returns:
349 * 0 - success
350 * <0 - error, one of -EINTR, -ETIMEDOUT
351 *
352 * Special API call for PI-futex support
353 */
rt_mutex_wait_proxy_lock(struct rt_mutex_base * lock,struct hrtimer_sleeper * to,struct rt_mutex_waiter * waiter)354 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
355 struct hrtimer_sleeper *to,
356 struct rt_mutex_waiter *waiter)
357 {
358 int ret;
359
360 raw_spin_lock_irq(&lock->wait_lock);
361 /* sleep on the mutex */
362 set_current_state(TASK_INTERRUPTIBLE);
363 ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
364 /*
365 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
366 * have to fix that up.
367 */
368 fixup_rt_mutex_waiters(lock, true);
369 raw_spin_unlock_irq(&lock->wait_lock);
370
371 return ret;
372 }
373
374 /**
375 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
376 * @lock: the rt_mutex we were woken on
377 * @waiter: the pre-initialized rt_mutex_waiter
378 *
379 * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
380 * rt_mutex_wait_proxy_lock().
381 *
382 * Unless we acquired the lock; we're still enqueued on the wait-list and can
383 * in fact still be granted ownership until we're removed. Therefore we can
384 * find we are in fact the owner and must disregard the
385 * rt_mutex_wait_proxy_lock() failure.
386 *
387 * Returns:
388 * true - did the cleanup, we done.
389 * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
390 * caller should disregards its return value.
391 *
392 * Special API call for PI-futex support
393 */
rt_mutex_cleanup_proxy_lock(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)394 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
395 struct rt_mutex_waiter *waiter)
396 {
397 bool cleanup = false;
398
399 raw_spin_lock_irq(&lock->wait_lock);
400 /*
401 * Do an unconditional try-lock, this deals with the lock stealing
402 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
403 * sets a NULL owner.
404 *
405 * We're not interested in the return value, because the subsequent
406 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
407 * we will own the lock and it will have removed the waiter. If we
408 * failed the trylock, we're still not owner and we need to remove
409 * ourselves.
410 */
411 try_to_take_rt_mutex(lock, current, waiter);
412 /*
413 * Unless we're the owner; we're still enqueued on the wait_list.
414 * So check if we became owner, if not, take us off the wait_list.
415 */
416 if (rt_mutex_owner(lock) != current) {
417 remove_waiter(lock, waiter);
418 cleanup = true;
419 }
420 /*
421 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
422 * have to fix that up.
423 */
424 fixup_rt_mutex_waiters(lock, false);
425
426 raw_spin_unlock_irq(&lock->wait_lock);
427
428 return cleanup;
429 }
430
431 /*
432 * Recheck the pi chain, in case we got a priority setting
433 *
434 * Called from sched_setscheduler
435 */
rt_mutex_adjust_pi(struct task_struct * task)436 void __sched rt_mutex_adjust_pi(struct task_struct *task)
437 {
438 struct rt_mutex_waiter *waiter;
439 struct rt_mutex_base *next_lock;
440 unsigned long flags;
441
442 raw_spin_lock_irqsave(&task->pi_lock, flags);
443
444 waiter = task->pi_blocked_on;
445 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
446 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
447 return;
448 }
449 next_lock = waiter->lock;
450 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
451
452 /* gets dropped in rt_mutex_adjust_prio_chain()! */
453 get_task_struct(task);
454
455 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
456 next_lock, NULL, task);
457 }
458
459 /*
460 * Performs the wakeup of the top-waiter and re-enables preemption.
461 */
rt_mutex_postunlock(struct rt_wake_q_head * wqh)462 void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
463 {
464 rt_mutex_wake_up_q(wqh);
465 }
466
467 #ifdef CONFIG_DEBUG_RT_MUTEXES
rt_mutex_debug_task_free(struct task_struct * task)468 void rt_mutex_debug_task_free(struct task_struct *task)
469 {
470 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
471 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
472 }
473 #endif
474
475 #ifdef CONFIG_PREEMPT_RT
476 /* Mutexes */
__mutex_rt_init(struct mutex * mutex,const char * name,struct lock_class_key * key)477 void __mutex_rt_init(struct mutex *mutex, const char *name,
478 struct lock_class_key *key)
479 {
480 debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
481 lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
482 }
483 EXPORT_SYMBOL(__mutex_rt_init);
484
__mutex_lock_common(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)485 static __always_inline int __mutex_lock_common(struct mutex *lock,
486 unsigned int state,
487 unsigned int subclass,
488 struct lockdep_map *nest_lock,
489 unsigned long ip)
490 {
491 int ret;
492
493 might_sleep();
494 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
495 ret = __rt_mutex_lock(&lock->rtmutex, state);
496 if (ret)
497 mutex_release(&lock->dep_map, ip);
498 else
499 lock_acquired(&lock->dep_map, ip);
500 return ret;
501 }
502
503 #ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_lock_nested(struct mutex * lock,unsigned int subclass)504 void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
505 {
506 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
507 }
508 EXPORT_SYMBOL_GPL(mutex_lock_nested);
509
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest_lock)510 void __sched _mutex_lock_nest_lock(struct mutex *lock,
511 struct lockdep_map *nest_lock)
512 {
513 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
514 }
515 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
516
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)517 int __sched mutex_lock_interruptible_nested(struct mutex *lock,
518 unsigned int subclass)
519 {
520 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
521 }
522 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
523
mutex_lock_killable_nested(struct mutex * lock,unsigned int subclass)524 int __sched mutex_lock_killable_nested(struct mutex *lock,
525 unsigned int subclass)
526 {
527 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
528 }
529 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
530
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)531 void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
532 {
533 int token;
534
535 might_sleep();
536
537 token = io_schedule_prepare();
538 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
539 io_schedule_finish(token);
540 }
541 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
542
543 #else /* CONFIG_DEBUG_LOCK_ALLOC */
544
mutex_lock(struct mutex * lock)545 void __sched mutex_lock(struct mutex *lock)
546 {
547 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
548 }
549 EXPORT_SYMBOL(mutex_lock);
550
mutex_lock_interruptible(struct mutex * lock)551 int __sched mutex_lock_interruptible(struct mutex *lock)
552 {
553 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
554 }
555 EXPORT_SYMBOL(mutex_lock_interruptible);
556
mutex_lock_killable(struct mutex * lock)557 int __sched mutex_lock_killable(struct mutex *lock)
558 {
559 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
560 }
561 EXPORT_SYMBOL(mutex_lock_killable);
562
mutex_lock_io(struct mutex * lock)563 void __sched mutex_lock_io(struct mutex *lock)
564 {
565 int token = io_schedule_prepare();
566
567 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
568 io_schedule_finish(token);
569 }
570 EXPORT_SYMBOL(mutex_lock_io);
571 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
572
mutex_trylock(struct mutex * lock)573 int __sched mutex_trylock(struct mutex *lock)
574 {
575 int ret;
576
577 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
578 return 0;
579
580 ret = __rt_mutex_trylock(&lock->rtmutex);
581 if (ret)
582 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
583
584 return ret;
585 }
586 EXPORT_SYMBOL(mutex_trylock);
587
mutex_unlock(struct mutex * lock)588 void __sched mutex_unlock(struct mutex *lock)
589 {
590 mutex_release(&lock->dep_map, _RET_IP_);
591 __rt_mutex_unlock(&lock->rtmutex);
592 }
593 EXPORT_SYMBOL(mutex_unlock);
594
595 #endif /* CONFIG_PREEMPT_RT */
596