1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 *
5 * started by Ingo Molnar and Thomas Gleixner.
6 *
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
9 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
10 * Copyright (C) 2006 Esben Nielsen
11 * Adaptive Spinlocks:
12 * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
13 * and Peter Morreale,
14 * Adaptive Spinlocks simplification:
15 * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
16 *
17 * See Documentation/locking/rt-mutex-design.rst for details.
18 */
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/sched/deadline.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/rt.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/ww_mutex.h>
26 #include <trace/hooks/dtask.h>
27
28 #include "rtmutex_common.h"
29
30 #ifndef WW_RT
31 # define build_ww_mutex() (false)
32 # define ww_container_of(rtm) NULL
33
__ww_mutex_add_waiter(struct rt_mutex_waiter * waiter,struct rt_mutex * lock,struct ww_acquire_ctx * ww_ctx)34 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
35 struct rt_mutex *lock,
36 struct ww_acquire_ctx *ww_ctx)
37 {
38 return 0;
39 }
40
__ww_mutex_check_waiters(struct rt_mutex * lock,struct ww_acquire_ctx * ww_ctx)41 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
42 struct ww_acquire_ctx *ww_ctx)
43 {
44 }
45
ww_mutex_lock_acquired(struct ww_mutex * lock,struct ww_acquire_ctx * ww_ctx)46 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
47 struct ww_acquire_ctx *ww_ctx)
48 {
49 }
50
__ww_mutex_check_kill(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct ww_acquire_ctx * ww_ctx)51 static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
52 struct rt_mutex_waiter *waiter,
53 struct ww_acquire_ctx *ww_ctx)
54 {
55 return 0;
56 }
57
58 #else
59 # define build_ww_mutex() (true)
60 # define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base)
61 # include "ww_mutex.h"
62 #endif
63
64 /*
65 * lock->owner state tracking:
66 *
67 * lock->owner holds the task_struct pointer of the owner. Bit 0
68 * is used to keep track of the "lock has waiters" state.
69 *
70 * owner bit0
71 * NULL 0 lock is free (fast acquire possible)
72 * NULL 1 lock is free and has waiters and the top waiter
73 * is going to take the lock*
74 * taskpointer 0 lock is held (fast release possible)
75 * taskpointer 1 lock is held and has waiters**
76 *
77 * The fast atomic compare exchange based acquire and release is only
78 * possible when bit 0 of lock->owner is 0.
79 *
80 * (*) It also can be a transitional state when grabbing the lock
81 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
82 * we need to set the bit0 before looking at the lock, and the owner may be
83 * NULL in this small time, hence this can be a transitional state.
84 *
85 * (**) There is a small time when bit 0 is set but there are no
86 * waiters. This can happen when grabbing the lock in the slow path.
87 * To prevent a cmpxchg of the owner releasing the lock, we need to
88 * set this bit before looking at the lock.
89 */
90
91 static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base * lock,struct task_struct * owner)92 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
93 {
94 unsigned long val = (unsigned long)owner;
95
96 if (rt_mutex_has_waiters(lock))
97 val |= RT_MUTEX_HAS_WAITERS;
98
99 return (struct task_struct *)val;
100 }
101
102 static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base * lock,struct task_struct * owner)103 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
104 {
105 /*
106 * lock->wait_lock is held but explicit acquire semantics are needed
107 * for a new lock owner so WRITE_ONCE is insufficient.
108 */
109 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
110 }
111
rt_mutex_clear_owner(struct rt_mutex_base * lock)112 static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
113 {
114 /* lock->wait_lock is held so the unlock provides release semantics. */
115 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
116 }
117
clear_rt_mutex_waiters(struct rt_mutex_base * lock)118 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
119 {
120 lock->owner = (struct task_struct *)
121 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
122 }
123
124 static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base * lock,bool acquire_lock)125 fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
126 {
127 unsigned long owner, *p = (unsigned long *) &lock->owner;
128
129 if (rt_mutex_has_waiters(lock))
130 return;
131
132 /*
133 * The rbtree has no waiters enqueued, now make sure that the
134 * lock->owner still has the waiters bit set, otherwise the
135 * following can happen:
136 *
137 * CPU 0 CPU 1 CPU2
138 * l->owner=T1
139 * rt_mutex_lock(l)
140 * lock(l->lock)
141 * l->owner = T1 | HAS_WAITERS;
142 * enqueue(T2)
143 * boost()
144 * unlock(l->lock)
145 * block()
146 *
147 * rt_mutex_lock(l)
148 * lock(l->lock)
149 * l->owner = T1 | HAS_WAITERS;
150 * enqueue(T3)
151 * boost()
152 * unlock(l->lock)
153 * block()
154 * signal(->T2) signal(->T3)
155 * lock(l->lock)
156 * dequeue(T2)
157 * deboost()
158 * unlock(l->lock)
159 * lock(l->lock)
160 * dequeue(T3)
161 * ==> wait list is empty
162 * deboost()
163 * unlock(l->lock)
164 * lock(l->lock)
165 * fixup_rt_mutex_waiters()
166 * if (wait_list_empty(l) {
167 * l->owner = owner
168 * owner = l->owner & ~HAS_WAITERS;
169 * ==> l->owner = T1
170 * }
171 * lock(l->lock)
172 * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
173 * if (wait_list_empty(l) {
174 * owner = l->owner & ~HAS_WAITERS;
175 * cmpxchg(l->owner, T1, NULL)
176 * ===> Success (l->owner = NULL)
177 *
178 * l->owner = owner
179 * ==> l->owner = T1
180 * }
181 *
182 * With the check for the waiter bit in place T3 on CPU2 will not
183 * overwrite. All tasks fiddling with the waiters bit are
184 * serialized by l->lock, so nothing else can modify the waiters
185 * bit. If the bit is set then nothing can change l->owner either
186 * so the simple RMW is safe. The cmpxchg() will simply fail if it
187 * happens in the middle of the RMW because the waiters bit is
188 * still set.
189 */
190 owner = READ_ONCE(*p);
191 if (owner & RT_MUTEX_HAS_WAITERS) {
192 /*
193 * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
194 * why xchg_acquire() is used for updating owner for
195 * locking and WRITE_ONCE() for unlocking.
196 *
197 * WRITE_ONCE() would work for the acquire case too, but
198 * in case that the lock acquisition failed it might
199 * force other lockers into the slow path unnecessarily.
200 */
201 if (acquire_lock)
202 xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
203 else
204 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
205 }
206 }
207
208 /*
209 * We can speed up the acquire/release, if there's no debugging state to be
210 * set up.
211 */
212 #ifndef CONFIG_DEBUG_RT_MUTEXES
rt_mutex_cmpxchg_acquire(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)213 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
214 struct task_struct *old,
215 struct task_struct *new)
216 {
217 return try_cmpxchg_acquire(&lock->owner, &old, new);
218 }
219
rt_mutex_cmpxchg_release(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)220 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
221 struct task_struct *old,
222 struct task_struct *new)
223 {
224 return try_cmpxchg_release(&lock->owner, &old, new);
225 }
226
227 /*
228 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
229 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
230 * relaxed semantics suffice.
231 */
mark_rt_mutex_waiters(struct rt_mutex_base * lock)232 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
233 {
234 unsigned long owner, *p = (unsigned long *) &lock->owner;
235
236 do {
237 owner = *p;
238 } while (cmpxchg_relaxed(p, owner,
239 owner | RT_MUTEX_HAS_WAITERS) != owner);
240
241 /*
242 * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
243 * operations in the event of contention. Ensure the successful
244 * cmpxchg is visible.
245 */
246 smp_mb__after_atomic();
247 }
248
249 /*
250 * Safe fastpath aware unlock:
251 * 1) Clear the waiters bit
252 * 2) Drop lock->wait_lock
253 * 3) Try to unlock the lock with cmpxchg
254 */
unlock_rt_mutex_safe(struct rt_mutex_base * lock,unsigned long flags)255 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
256 unsigned long flags)
257 __releases(lock->wait_lock)
258 {
259 struct task_struct *owner = rt_mutex_owner(lock);
260
261 clear_rt_mutex_waiters(lock);
262 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
263 /*
264 * If a new waiter comes in between the unlock and the cmpxchg
265 * we have two situations:
266 *
267 * unlock(wait_lock);
268 * lock(wait_lock);
269 * cmpxchg(p, owner, 0) == owner
270 * mark_rt_mutex_waiters(lock);
271 * acquire(lock);
272 * or:
273 *
274 * unlock(wait_lock);
275 * lock(wait_lock);
276 * mark_rt_mutex_waiters(lock);
277 *
278 * cmpxchg(p, owner, 0) != owner
279 * enqueue_waiter();
280 * unlock(wait_lock);
281 * lock(wait_lock);
282 * wake waiter();
283 * unlock(wait_lock);
284 * lock(wait_lock);
285 * acquire(lock);
286 */
287 return rt_mutex_cmpxchg_release(lock, owner, NULL);
288 }
289
290 #else
rt_mutex_cmpxchg_acquire(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)291 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
292 struct task_struct *old,
293 struct task_struct *new)
294 {
295 return false;
296
297 }
298
rt_mutex_cmpxchg_release(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)299 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
300 struct task_struct *old,
301 struct task_struct *new)
302 {
303 return false;
304 }
305
mark_rt_mutex_waiters(struct rt_mutex_base * lock)306 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
307 {
308 lock->owner = (struct task_struct *)
309 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
310 }
311
312 /*
313 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
314 */
unlock_rt_mutex_safe(struct rt_mutex_base * lock,unsigned long flags)315 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
316 unsigned long flags)
317 __releases(lock->wait_lock)
318 {
319 lock->owner = NULL;
320 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
321 return true;
322 }
323 #endif
324
__waiter_prio(struct task_struct * task)325 static __always_inline int __waiter_prio(struct task_struct *task)
326 {
327 int prio = task->prio;
328 int waiter_prio = 0;
329
330 trace_android_vh_rtmutex_waiter_prio(task, &waiter_prio);
331 if (waiter_prio > 0)
332 return waiter_prio;
333
334 if (!rt_prio(prio))
335 return DEFAULT_PRIO;
336
337 return prio;
338 }
339
340 static __always_inline void
waiter_update_prio(struct rt_mutex_waiter * waiter,struct task_struct * task)341 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
342 {
343 waiter->prio = __waiter_prio(task);
344 waiter->deadline = task->dl.deadline;
345 }
346
347 /*
348 * Only use with rt_mutex_waiter_{less,equal}()
349 */
350 #define task_to_waiter(p) \
351 &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
352
rt_mutex_waiter_less(struct rt_mutex_waiter * left,struct rt_mutex_waiter * right)353 static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
354 struct rt_mutex_waiter *right)
355 {
356 if (left->prio < right->prio)
357 return 1;
358
359 /*
360 * If both waiters have dl_prio(), we check the deadlines of the
361 * associated tasks.
362 * If left waiter has a dl_prio(), and we didn't return 1 above,
363 * then right waiter has a dl_prio() too.
364 */
365 if (dl_prio(left->prio))
366 return dl_time_before(left->deadline, right->deadline);
367
368 return 0;
369 }
370
rt_mutex_waiter_equal(struct rt_mutex_waiter * left,struct rt_mutex_waiter * right)371 static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
372 struct rt_mutex_waiter *right)
373 {
374 if (left->prio != right->prio)
375 return 0;
376
377 /*
378 * If both waiters have dl_prio(), we check the deadlines of the
379 * associated tasks.
380 * If left waiter has a dl_prio(), and we didn't return 0 above,
381 * then right waiter has a dl_prio() too.
382 */
383 if (dl_prio(left->prio))
384 return left->deadline == right->deadline;
385
386 return 1;
387 }
388
rt_mutex_steal(struct rt_mutex_waiter * waiter,struct rt_mutex_waiter * top_waiter)389 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
390 struct rt_mutex_waiter *top_waiter)
391 {
392 bool ret = false;
393
394 if (rt_mutex_waiter_less(waiter, top_waiter))
395 return true;
396
397 trace_android_vh_rt_mutex_steal(waiter->prio, top_waiter->prio, &ret);
398 if (ret)
399 return true;
400
401 #ifdef RT_MUTEX_BUILD_SPINLOCKS
402 /*
403 * Note that RT tasks are excluded from same priority (lateral)
404 * steals to prevent the introduction of an unbounded latency.
405 */
406 if (rt_prio(waiter->prio) || dl_prio(waiter->prio))
407 return false;
408
409 return rt_mutex_waiter_equal(waiter, top_waiter);
410 #else
411 return false;
412 #endif
413 }
414
415 #define __node_2_waiter(node) \
416 rb_entry((node), struct rt_mutex_waiter, tree_entry)
417
__waiter_less(struct rb_node * a,const struct rb_node * b)418 static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
419 {
420 struct rt_mutex_waiter *aw = __node_2_waiter(a);
421 struct rt_mutex_waiter *bw = __node_2_waiter(b);
422
423 if (rt_mutex_waiter_less(aw, bw))
424 return 1;
425
426 if (!build_ww_mutex())
427 return 0;
428
429 if (rt_mutex_waiter_less(bw, aw))
430 return 0;
431
432 /* NOTE: relies on waiter->ww_ctx being set before insertion */
433 if (aw->ww_ctx) {
434 if (!bw->ww_ctx)
435 return 1;
436
437 return (signed long)(aw->ww_ctx->stamp -
438 bw->ww_ctx->stamp) < 0;
439 }
440
441 return 0;
442 }
443
444 static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)445 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
446 {
447 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
448 }
449
450 static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)451 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
452 {
453 if (RB_EMPTY_NODE(&waiter->tree_entry))
454 return;
455
456 rb_erase_cached(&waiter->tree_entry, &lock->waiters);
457 RB_CLEAR_NODE(&waiter->tree_entry);
458 }
459
460 #define __node_2_pi_waiter(node) \
461 rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
462
463 static __always_inline bool
__pi_waiter_less(struct rb_node * a,const struct rb_node * b)464 __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
465 {
466 return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
467 }
468
469 static __always_inline void
rt_mutex_enqueue_pi(struct task_struct * task,struct rt_mutex_waiter * waiter)470 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
471 {
472 rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
473 }
474
475 static __always_inline void
rt_mutex_dequeue_pi(struct task_struct * task,struct rt_mutex_waiter * waiter)476 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
477 {
478 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
479 return;
480
481 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
482 RB_CLEAR_NODE(&waiter->pi_tree_entry);
483 }
484
rt_mutex_adjust_prio(struct task_struct * p)485 static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
486 {
487 struct task_struct *pi_task = NULL;
488
489 lockdep_assert_held(&p->pi_lock);
490
491 if (task_has_pi_waiters(p))
492 pi_task = task_top_pi_waiter(p)->task;
493
494 rt_mutex_setprio(p, pi_task);
495 }
496
497 /* RT mutex specific wake_q wrappers */
rt_mutex_wake_q_add(struct rt_wake_q_head * wqh,struct rt_mutex_waiter * w)498 static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
499 struct rt_mutex_waiter *w)
500 {
501 if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) {
502 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
503 WARN_ON_ONCE(wqh->rtlock_task);
504 get_task_struct(w->task);
505 wqh->rtlock_task = w->task;
506 } else {
507 wake_q_add(&wqh->head, w->task);
508 }
509 }
510
rt_mutex_wake_up_q(struct rt_wake_q_head * wqh)511 static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
512 {
513 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
514 wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
515 put_task_struct(wqh->rtlock_task);
516 wqh->rtlock_task = NULL;
517 }
518
519 if (!wake_q_empty(&wqh->head))
520 wake_up_q(&wqh->head);
521
522 /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
523 preempt_enable();
524 }
525
526 /*
527 * Deadlock detection is conditional:
528 *
529 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
530 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
531 *
532 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
533 * conducted independent of the detect argument.
534 *
535 * If the waiter argument is NULL this indicates the deboost path and
536 * deadlock detection is disabled independent of the detect argument
537 * and the config settings.
538 */
539 static __always_inline bool
rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter * waiter,enum rtmutex_chainwalk chwalk)540 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
541 enum rtmutex_chainwalk chwalk)
542 {
543 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
544 return waiter != NULL;
545 return chwalk == RT_MUTEX_FULL_CHAINWALK;
546 }
547
task_blocked_on_lock(struct task_struct * p)548 static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
549 {
550 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
551 }
552
553 /*
554 * Adjust the priority chain. Also used for deadlock detection.
555 * Decreases task's usage by one - may thus free the task.
556 *
557 * @task: the task owning the mutex (owner) for which a chain walk is
558 * probably needed
559 * @chwalk: do we have to carry out deadlock detection?
560 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
561 * things for a task that has just got its priority adjusted, and
562 * is waiting on a mutex)
563 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
564 * we dropped its pi_lock. Is never dereferenced, only used for
565 * comparison to detect lock chain changes.
566 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
567 * its priority to the mutex owner (can be NULL in the case
568 * depicted above or if the top waiter is gone away and we are
569 * actually deboosting the owner)
570 * @top_task: the current top waiter
571 *
572 * Returns 0 or -EDEADLK.
573 *
574 * Chain walk basics and protection scope
575 *
576 * [R] refcount on task
577 * [P] task->pi_lock held
578 * [L] rtmutex->wait_lock held
579 *
580 * Step Description Protected by
581 * function arguments:
582 * @task [R]
583 * @orig_lock if != NULL @top_task is blocked on it
584 * @next_lock Unprotected. Cannot be
585 * dereferenced. Only used for
586 * comparison.
587 * @orig_waiter if != NULL @top_task is blocked on it
588 * @top_task current, or in case of proxy
589 * locking protected by calling
590 * code
591 * again:
592 * loop_sanity_check();
593 * retry:
594 * [1] lock(task->pi_lock); [R] acquire [P]
595 * [2] waiter = task->pi_blocked_on; [P]
596 * [3] check_exit_conditions_1(); [P]
597 * [4] lock = waiter->lock; [P]
598 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
599 * unlock(task->pi_lock); release [P]
600 * goto retry;
601 * }
602 * [6] check_exit_conditions_2(); [P] + [L]
603 * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
604 * [8] unlock(task->pi_lock); release [P]
605 * put_task_struct(task); release [R]
606 * [9] check_exit_conditions_3(); [L]
607 * [10] task = owner(lock); [L]
608 * get_task_struct(task); [L] acquire [R]
609 * lock(task->pi_lock); [L] acquire [P]
610 * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
611 * [12] check_exit_conditions_4(); [P] + [L]
612 * [13] unlock(task->pi_lock); release [P]
613 * unlock(lock->wait_lock); release [L]
614 * goto again;
615 */
rt_mutex_adjust_prio_chain(struct task_struct * task,enum rtmutex_chainwalk chwalk,struct rt_mutex_base * orig_lock,struct rt_mutex_base * next_lock,struct rt_mutex_waiter * orig_waiter,struct task_struct * top_task)616 static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
617 enum rtmutex_chainwalk chwalk,
618 struct rt_mutex_base *orig_lock,
619 struct rt_mutex_base *next_lock,
620 struct rt_mutex_waiter *orig_waiter,
621 struct task_struct *top_task)
622 {
623 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
624 struct rt_mutex_waiter *prerequeue_top_waiter;
625 int ret = 0, depth = 0;
626 struct rt_mutex_base *lock;
627 bool detect_deadlock;
628 bool requeue = true;
629
630 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
631
632 /*
633 * The (de)boosting is a step by step approach with a lot of
634 * pitfalls. We want this to be preemptible and we want hold a
635 * maximum of two locks per step. So we have to check
636 * carefully whether things change under us.
637 */
638 again:
639 /*
640 * We limit the lock chain length for each invocation.
641 */
642 if (++depth > max_lock_depth) {
643 static int prev_max;
644
645 /*
646 * Print this only once. If the admin changes the limit,
647 * print a new message when reaching the limit again.
648 */
649 if (prev_max != max_lock_depth) {
650 prev_max = max_lock_depth;
651 printk(KERN_WARNING "Maximum lock depth %d reached "
652 "task: %s (%d)\n", max_lock_depth,
653 top_task->comm, task_pid_nr(top_task));
654 }
655 put_task_struct(task);
656
657 return -EDEADLK;
658 }
659
660 /*
661 * We are fully preemptible here and only hold the refcount on
662 * @task. So everything can have changed under us since the
663 * caller or our own code below (goto retry/again) dropped all
664 * locks.
665 */
666 retry:
667 /*
668 * [1] Task cannot go away as we did a get_task() before !
669 */
670 raw_spin_lock_irq(&task->pi_lock);
671
672 /*
673 * [2] Get the waiter on which @task is blocked on.
674 */
675 waiter = task->pi_blocked_on;
676
677 /*
678 * [3] check_exit_conditions_1() protected by task->pi_lock.
679 */
680
681 /*
682 * Check whether the end of the boosting chain has been
683 * reached or the state of the chain has changed while we
684 * dropped the locks.
685 */
686 if (!waiter)
687 goto out_unlock_pi;
688
689 /*
690 * Check the orig_waiter state. After we dropped the locks,
691 * the previous owner of the lock might have released the lock.
692 */
693 if (orig_waiter && !rt_mutex_owner(orig_lock))
694 goto out_unlock_pi;
695
696 /*
697 * We dropped all locks after taking a refcount on @task, so
698 * the task might have moved on in the lock chain or even left
699 * the chain completely and blocks now on an unrelated lock or
700 * on @orig_lock.
701 *
702 * We stored the lock on which @task was blocked in @next_lock,
703 * so we can detect the chain change.
704 */
705 if (next_lock != waiter->lock)
706 goto out_unlock_pi;
707
708 /*
709 * There could be 'spurious' loops in the lock graph due to ww_mutex,
710 * consider:
711 *
712 * P1: A, ww_A, ww_B
713 * P2: ww_B, ww_A
714 * P3: A
715 *
716 * P3 should not return -EDEADLK because it gets trapped in the cycle
717 * created by P1 and P2 (which will resolve -- and runs into
718 * max_lock_depth above). Therefore disable detect_deadlock such that
719 * the below termination condition can trigger once all relevant tasks
720 * are boosted.
721 *
722 * Even when we start with ww_mutex we can disable deadlock detection,
723 * since we would supress a ww_mutex induced deadlock at [6] anyway.
724 * Supressing it here however is not sufficient since we might still
725 * hit [6] due to adjustment driven iteration.
726 *
727 * NOTE: if someone were to create a deadlock between 2 ww_classes we'd
728 * utterly fail to report it; lockdep should.
729 */
730 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
731 detect_deadlock = false;
732
733 /*
734 * Drop out, when the task has no waiters. Note,
735 * top_waiter can be NULL, when we are in the deboosting
736 * mode!
737 */
738 if (top_waiter) {
739 if (!task_has_pi_waiters(task))
740 goto out_unlock_pi;
741 /*
742 * If deadlock detection is off, we stop here if we
743 * are not the top pi waiter of the task. If deadlock
744 * detection is enabled we continue, but stop the
745 * requeueing in the chain walk.
746 */
747 if (top_waiter != task_top_pi_waiter(task)) {
748 if (!detect_deadlock)
749 goto out_unlock_pi;
750 else
751 requeue = false;
752 }
753 }
754
755 /*
756 * If the waiter priority is the same as the task priority
757 * then there is no further priority adjustment necessary. If
758 * deadlock detection is off, we stop the chain walk. If its
759 * enabled we continue, but stop the requeueing in the chain
760 * walk.
761 */
762 if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
763 if (!detect_deadlock)
764 goto out_unlock_pi;
765 else
766 requeue = false;
767 }
768
769 /*
770 * [4] Get the next lock
771 */
772 lock = waiter->lock;
773 /*
774 * [5] We need to trylock here as we are holding task->pi_lock,
775 * which is the reverse lock order versus the other rtmutex
776 * operations.
777 */
778 if (!raw_spin_trylock(&lock->wait_lock)) {
779 raw_spin_unlock_irq(&task->pi_lock);
780 cpu_relax();
781 goto retry;
782 }
783
784 /*
785 * [6] check_exit_conditions_2() protected by task->pi_lock and
786 * lock->wait_lock.
787 *
788 * Deadlock detection. If the lock is the same as the original
789 * lock which caused us to walk the lock chain or if the
790 * current lock is owned by the task which initiated the chain
791 * walk, we detected a deadlock.
792 */
793 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
794 ret = -EDEADLK;
795
796 /*
797 * When the deadlock is due to ww_mutex; also see above. Don't
798 * report the deadlock and instead let the ww_mutex wound/die
799 * logic pick which of the contending threads gets -EDEADLK.
800 *
801 * NOTE: assumes the cycle only contains a single ww_class; any
802 * other configuration and we fail to report; also, see
803 * lockdep.
804 */
805 if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx)
806 ret = 0;
807
808 raw_spin_unlock(&lock->wait_lock);
809 goto out_unlock_pi;
810 }
811
812 /*
813 * If we just follow the lock chain for deadlock detection, no
814 * need to do all the requeue operations. To avoid a truckload
815 * of conditionals around the various places below, just do the
816 * minimum chain walk checks.
817 */
818 if (!requeue) {
819 /*
820 * No requeue[7] here. Just release @task [8]
821 */
822 raw_spin_unlock(&task->pi_lock);
823 put_task_struct(task);
824
825 /*
826 * [9] check_exit_conditions_3 protected by lock->wait_lock.
827 * If there is no owner of the lock, end of chain.
828 */
829 if (!rt_mutex_owner(lock)) {
830 raw_spin_unlock_irq(&lock->wait_lock);
831 return 0;
832 }
833
834 /* [10] Grab the next task, i.e. owner of @lock */
835 task = get_task_struct(rt_mutex_owner(lock));
836 raw_spin_lock(&task->pi_lock);
837
838 /*
839 * No requeue [11] here. We just do deadlock detection.
840 *
841 * [12] Store whether owner is blocked
842 * itself. Decision is made after dropping the locks
843 */
844 next_lock = task_blocked_on_lock(task);
845 /*
846 * Get the top waiter for the next iteration
847 */
848 top_waiter = rt_mutex_top_waiter(lock);
849
850 /* [13] Drop locks */
851 raw_spin_unlock(&task->pi_lock);
852 raw_spin_unlock_irq(&lock->wait_lock);
853
854 /* If owner is not blocked, end of chain. */
855 if (!next_lock)
856 goto out_put_task;
857 goto again;
858 }
859
860 /*
861 * Store the current top waiter before doing the requeue
862 * operation on @lock. We need it for the boost/deboost
863 * decision below.
864 */
865 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
866
867 /* [7] Requeue the waiter in the lock waiter tree. */
868 rt_mutex_dequeue(lock, waiter);
869
870 /*
871 * Update the waiter prio fields now that we're dequeued.
872 *
873 * These values can have changed through either:
874 *
875 * sys_sched_set_scheduler() / sys_sched_setattr()
876 *
877 * or
878 *
879 * DL CBS enforcement advancing the effective deadline.
880 *
881 * Even though pi_waiters also uses these fields, and that tree is only
882 * updated in [11], we can do this here, since we hold [L], which
883 * serializes all pi_waiters access and rb_erase() does not care about
884 * the values of the node being removed.
885 */
886 waiter_update_prio(waiter, task);
887
888 rt_mutex_enqueue(lock, waiter);
889
890 /* [8] Release the task */
891 raw_spin_unlock(&task->pi_lock);
892 put_task_struct(task);
893
894 /*
895 * [9] check_exit_conditions_3 protected by lock->wait_lock.
896 *
897 * We must abort the chain walk if there is no lock owner even
898 * in the dead lock detection case, as we have nothing to
899 * follow here. This is the end of the chain we are walking.
900 */
901 if (!rt_mutex_owner(lock)) {
902 /*
903 * If the requeue [7] above changed the top waiter,
904 * then we need to wake the new top waiter up to try
905 * to get the lock.
906 */
907 top_waiter = rt_mutex_top_waiter(lock);
908 if (prerequeue_top_waiter != top_waiter)
909 wake_up_state(top_waiter->task, top_waiter->wake_state);
910 raw_spin_unlock_irq(&lock->wait_lock);
911 return 0;
912 }
913
914 /* [10] Grab the next task, i.e. the owner of @lock */
915 task = get_task_struct(rt_mutex_owner(lock));
916 raw_spin_lock(&task->pi_lock);
917
918 /* [11] requeue the pi waiters if necessary */
919 if (waiter == rt_mutex_top_waiter(lock)) {
920 /*
921 * The waiter became the new top (highest priority)
922 * waiter on the lock. Replace the previous top waiter
923 * in the owner tasks pi waiters tree with this waiter
924 * and adjust the priority of the owner.
925 */
926 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
927 rt_mutex_enqueue_pi(task, waiter);
928 rt_mutex_adjust_prio(task);
929
930 } else if (prerequeue_top_waiter == waiter) {
931 /*
932 * The waiter was the top waiter on the lock, but is
933 * no longer the top priority waiter. Replace waiter in
934 * the owner tasks pi waiters tree with the new top
935 * (highest priority) waiter and adjust the priority
936 * of the owner.
937 * The new top waiter is stored in @waiter so that
938 * @waiter == @top_waiter evaluates to true below and
939 * we continue to deboost the rest of the chain.
940 */
941 rt_mutex_dequeue_pi(task, waiter);
942 waiter = rt_mutex_top_waiter(lock);
943 rt_mutex_enqueue_pi(task, waiter);
944 rt_mutex_adjust_prio(task);
945 } else {
946 /*
947 * Nothing changed. No need to do any priority
948 * adjustment.
949 */
950 }
951
952 /*
953 * [12] check_exit_conditions_4() protected by task->pi_lock
954 * and lock->wait_lock. The actual decisions are made after we
955 * dropped the locks.
956 *
957 * Check whether the task which owns the current lock is pi
958 * blocked itself. If yes we store a pointer to the lock for
959 * the lock chain change detection above. After we dropped
960 * task->pi_lock next_lock cannot be dereferenced anymore.
961 */
962 next_lock = task_blocked_on_lock(task);
963 /*
964 * Store the top waiter of @lock for the end of chain walk
965 * decision below.
966 */
967 top_waiter = rt_mutex_top_waiter(lock);
968
969 /* [13] Drop the locks */
970 raw_spin_unlock(&task->pi_lock);
971 raw_spin_unlock_irq(&lock->wait_lock);
972
973 /*
974 * Make the actual exit decisions [12], based on the stored
975 * values.
976 *
977 * We reached the end of the lock chain. Stop right here. No
978 * point to go back just to figure that out.
979 */
980 if (!next_lock)
981 goto out_put_task;
982
983 /*
984 * If the current waiter is not the top waiter on the lock,
985 * then we can stop the chain walk here if we are not in full
986 * deadlock detection mode.
987 */
988 if (!detect_deadlock && waiter != top_waiter)
989 goto out_put_task;
990
991 goto again;
992
993 out_unlock_pi:
994 raw_spin_unlock_irq(&task->pi_lock);
995 out_put_task:
996 put_task_struct(task);
997
998 return ret;
999 }
1000
1001 /*
1002 * Try to take an rt-mutex
1003 *
1004 * Must be called with lock->wait_lock held and interrupts disabled
1005 *
1006 * @lock: The lock to be acquired.
1007 * @task: The task which wants to acquire the lock
1008 * @waiter: The waiter that is queued to the lock's wait tree if the
1009 * callsite called task_blocked_on_lock(), otherwise NULL
1010 */
1011 static int __sched
try_to_take_rt_mutex(struct rt_mutex_base * lock,struct task_struct * task,struct rt_mutex_waiter * waiter)1012 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
1013 struct rt_mutex_waiter *waiter)
1014 {
1015 lockdep_assert_held(&lock->wait_lock);
1016
1017 /*
1018 * Before testing whether we can acquire @lock, we set the
1019 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
1020 * other tasks which try to modify @lock into the slow path
1021 * and they serialize on @lock->wait_lock.
1022 *
1023 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
1024 * as explained at the top of this file if and only if:
1025 *
1026 * - There is a lock owner. The caller must fixup the
1027 * transient state if it does a trylock or leaves the lock
1028 * function due to a signal or timeout.
1029 *
1030 * - @task acquires the lock and there are no other
1031 * waiters. This is undone in rt_mutex_set_owner(@task) at
1032 * the end of this function.
1033 */
1034 mark_rt_mutex_waiters(lock);
1035
1036 /*
1037 * If @lock has an owner, give up.
1038 */
1039 if (rt_mutex_owner(lock))
1040 return 0;
1041
1042 /*
1043 * If @waiter != NULL, @task has already enqueued the waiter
1044 * into @lock waiter tree. If @waiter == NULL then this is a
1045 * trylock attempt.
1046 */
1047 if (waiter) {
1048 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
1049
1050 /*
1051 * If waiter is the highest priority waiter of @lock,
1052 * or allowed to steal it, take it over.
1053 */
1054 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
1055 /*
1056 * We can acquire the lock. Remove the waiter from the
1057 * lock waiters tree.
1058 */
1059 rt_mutex_dequeue(lock, waiter);
1060 } else {
1061 return 0;
1062 }
1063 } else {
1064 /*
1065 * If the lock has waiters already we check whether @task is
1066 * eligible to take over the lock.
1067 *
1068 * If there are no other waiters, @task can acquire
1069 * the lock. @task->pi_blocked_on is NULL, so it does
1070 * not need to be dequeued.
1071 */
1072 if (rt_mutex_has_waiters(lock)) {
1073 /* Check whether the trylock can steal it. */
1074 if (!rt_mutex_steal(task_to_waiter(task),
1075 rt_mutex_top_waiter(lock)))
1076 return 0;
1077
1078 /*
1079 * The current top waiter stays enqueued. We
1080 * don't have to change anything in the lock
1081 * waiters order.
1082 */
1083 } else {
1084 /*
1085 * No waiters. Take the lock without the
1086 * pi_lock dance.@task->pi_blocked_on is NULL
1087 * and we have no waiters to enqueue in @task
1088 * pi waiters tree.
1089 */
1090 goto takeit;
1091 }
1092 }
1093
1094 /*
1095 * Clear @task->pi_blocked_on. Requires protection by
1096 * @task->pi_lock. Redundant operation for the @waiter == NULL
1097 * case, but conditionals are more expensive than a redundant
1098 * store.
1099 */
1100 raw_spin_lock(&task->pi_lock);
1101 task->pi_blocked_on = NULL;
1102 /*
1103 * Finish the lock acquisition. @task is the new owner. If
1104 * other waiters exist we have to insert the highest priority
1105 * waiter into @task->pi_waiters tree.
1106 */
1107 if (rt_mutex_has_waiters(lock))
1108 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
1109 raw_spin_unlock(&task->pi_lock);
1110
1111 takeit:
1112 /*
1113 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
1114 * are still waiters or clears it.
1115 */
1116 rt_mutex_set_owner(lock, task);
1117
1118 return 1;
1119 }
1120
1121 /*
1122 * Task blocks on lock.
1123 *
1124 * Prepare waiter and propagate pi chain
1125 *
1126 * This must be called with lock->wait_lock held and interrupts disabled
1127 */
task_blocks_on_rt_mutex(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task,struct ww_acquire_ctx * ww_ctx,enum rtmutex_chainwalk chwalk)1128 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
1129 struct rt_mutex_waiter *waiter,
1130 struct task_struct *task,
1131 struct ww_acquire_ctx *ww_ctx,
1132 enum rtmutex_chainwalk chwalk)
1133 {
1134 struct task_struct *owner = rt_mutex_owner(lock);
1135 struct rt_mutex_waiter *top_waiter = waiter;
1136 struct rt_mutex_base *next_lock;
1137 int chain_walk = 0, res;
1138
1139 lockdep_assert_held(&lock->wait_lock);
1140
1141 /*
1142 * Early deadlock detection. We really don't want the task to
1143 * enqueue on itself just to untangle the mess later. It's not
1144 * only an optimization. We drop the locks, so another waiter
1145 * can come in before the chain walk detects the deadlock. So
1146 * the other will detect the deadlock and return -EDEADLOCK,
1147 * which is wrong, as the other waiter is not in a deadlock
1148 * situation.
1149 */
1150 if (owner == task)
1151 return -EDEADLK;
1152
1153 trace_android_vh_task_blocks_on_rtmutex(lock, waiter, task, ww_ctx, &chwalk);
1154 raw_spin_lock(&task->pi_lock);
1155 waiter->task = task;
1156 waiter->lock = lock;
1157 waiter_update_prio(waiter, task);
1158
1159 /* Get the top priority waiter on the lock */
1160 if (rt_mutex_has_waiters(lock))
1161 top_waiter = rt_mutex_top_waiter(lock);
1162 rt_mutex_enqueue(lock, waiter);
1163
1164 task->pi_blocked_on = waiter;
1165
1166 raw_spin_unlock(&task->pi_lock);
1167
1168 if (build_ww_mutex() && ww_ctx) {
1169 struct rt_mutex *rtm;
1170
1171 /* Check whether the waiter should back out immediately */
1172 rtm = container_of(lock, struct rt_mutex, rtmutex);
1173 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
1174 if (res) {
1175 raw_spin_lock(&task->pi_lock);
1176 rt_mutex_dequeue(lock, waiter);
1177 task->pi_blocked_on = NULL;
1178 raw_spin_unlock(&task->pi_lock);
1179 return res;
1180 }
1181 }
1182
1183 if (!owner)
1184 return 0;
1185
1186 raw_spin_lock(&owner->pi_lock);
1187 if (waiter == rt_mutex_top_waiter(lock)) {
1188 rt_mutex_dequeue_pi(owner, top_waiter);
1189 rt_mutex_enqueue_pi(owner, waiter);
1190
1191 rt_mutex_adjust_prio(owner);
1192 if (owner->pi_blocked_on)
1193 chain_walk = 1;
1194 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1195 chain_walk = 1;
1196 }
1197
1198 /* Store the lock on which owner is blocked or NULL */
1199 next_lock = task_blocked_on_lock(owner);
1200
1201 raw_spin_unlock(&owner->pi_lock);
1202 /*
1203 * Even if full deadlock detection is on, if the owner is not
1204 * blocked itself, we can avoid finding this out in the chain
1205 * walk.
1206 */
1207 if (!chain_walk || !next_lock)
1208 return 0;
1209
1210 /*
1211 * The owner can't disappear while holding a lock,
1212 * so the owner struct is protected by wait_lock.
1213 * Gets dropped in rt_mutex_adjust_prio_chain()!
1214 */
1215 get_task_struct(owner);
1216
1217 raw_spin_unlock_irq(&lock->wait_lock);
1218
1219 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1220 next_lock, waiter, task);
1221
1222 raw_spin_lock_irq(&lock->wait_lock);
1223
1224 return res;
1225 }
1226
1227 /*
1228 * Remove the top waiter from the current tasks pi waiter tree and
1229 * queue it up.
1230 *
1231 * Called with lock->wait_lock held and interrupts disabled.
1232 */
mark_wakeup_next_waiter(struct rt_wake_q_head * wqh,struct rt_mutex_base * lock)1233 static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
1234 struct rt_mutex_base *lock)
1235 {
1236 struct rt_mutex_waiter *waiter;
1237
1238 raw_spin_lock(¤t->pi_lock);
1239
1240 waiter = rt_mutex_top_waiter(lock);
1241
1242 /*
1243 * Remove it from current->pi_waiters and deboost.
1244 *
1245 * We must in fact deboost here in order to ensure we call
1246 * rt_mutex_setprio() to update p->pi_top_task before the
1247 * task unblocks.
1248 */
1249 rt_mutex_dequeue_pi(current, waiter);
1250 rt_mutex_adjust_prio(current);
1251
1252 /*
1253 * As we are waking up the top waiter, and the waiter stays
1254 * queued on the lock until it gets the lock, this lock
1255 * obviously has waiters. Just set the bit here and this has
1256 * the added benefit of forcing all new tasks into the
1257 * slow path making sure no task of lower priority than
1258 * the top waiter can steal this lock.
1259 */
1260 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1261
1262 /*
1263 * We deboosted before waking the top waiter task such that we don't
1264 * run two tasks with the 'same' priority (and ensure the
1265 * p->pi_top_task pointer points to a blocked task). This however can
1266 * lead to priority inversion if we would get preempted after the
1267 * deboost but before waking our donor task, hence the preempt_disable()
1268 * before unlock.
1269 *
1270 * Pairs with preempt_enable() in rt_mutex_wake_up_q();
1271 */
1272 preempt_disable();
1273 rt_mutex_wake_q_add(wqh, waiter);
1274 raw_spin_unlock(¤t->pi_lock);
1275 }
1276
__rt_mutex_slowtrylock(struct rt_mutex_base * lock)1277 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1278 {
1279 int ret = try_to_take_rt_mutex(lock, current, NULL);
1280
1281 /*
1282 * try_to_take_rt_mutex() sets the lock waiters bit
1283 * unconditionally. Clean this up.
1284 */
1285 fixup_rt_mutex_waiters(lock, true);
1286
1287 return ret;
1288 }
1289
1290 /*
1291 * Slow path try-lock function:
1292 */
rt_mutex_slowtrylock(struct rt_mutex_base * lock)1293 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1294 {
1295 unsigned long flags;
1296 int ret;
1297
1298 /*
1299 * If the lock already has an owner we fail to get the lock.
1300 * This can be done without taking the @lock->wait_lock as
1301 * it is only being read, and this is a trylock anyway.
1302 */
1303 if (rt_mutex_owner(lock))
1304 return 0;
1305
1306 /*
1307 * The mutex has currently no owner. Lock the wait lock and try to
1308 * acquire the lock. We use irqsave here to support early boot calls.
1309 */
1310 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1311
1312 ret = __rt_mutex_slowtrylock(lock);
1313
1314 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1315
1316 return ret;
1317 }
1318
__rt_mutex_trylock(struct rt_mutex_base * lock)1319 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
1320 {
1321 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1322 return 1;
1323
1324 return rt_mutex_slowtrylock(lock);
1325 }
1326
1327 /*
1328 * Slow path to release a rt-mutex.
1329 */
rt_mutex_slowunlock(struct rt_mutex_base * lock)1330 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
1331 {
1332 DEFINE_RT_WAKE_Q(wqh);
1333 unsigned long flags;
1334
1335 /* irqsave required to support early boot calls */
1336 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1337
1338 debug_rt_mutex_unlock(lock);
1339
1340 /*
1341 * We must be careful here if the fast path is enabled. If we
1342 * have no waiters queued we cannot set owner to NULL here
1343 * because of:
1344 *
1345 * foo->lock->owner = NULL;
1346 * rtmutex_lock(foo->lock); <- fast path
1347 * free = atomic_dec_and_test(foo->refcnt);
1348 * rtmutex_unlock(foo->lock); <- fast path
1349 * if (free)
1350 * kfree(foo);
1351 * raw_spin_unlock(foo->lock->wait_lock);
1352 *
1353 * So for the fastpath enabled kernel:
1354 *
1355 * Nothing can set the waiters bit as long as we hold
1356 * lock->wait_lock. So we do the following sequence:
1357 *
1358 * owner = rt_mutex_owner(lock);
1359 * clear_rt_mutex_waiters(lock);
1360 * raw_spin_unlock(&lock->wait_lock);
1361 * if (cmpxchg(&lock->owner, owner, 0) == owner)
1362 * return;
1363 * goto retry;
1364 *
1365 * The fastpath disabled variant is simple as all access to
1366 * lock->owner is serialized by lock->wait_lock:
1367 *
1368 * lock->owner = NULL;
1369 * raw_spin_unlock(&lock->wait_lock);
1370 */
1371 while (!rt_mutex_has_waiters(lock)) {
1372 /* Drops lock->wait_lock ! */
1373 if (unlock_rt_mutex_safe(lock, flags) == true)
1374 return;
1375 /* Relock the rtmutex and try again */
1376 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1377 }
1378
1379 /*
1380 * The wakeup next waiter path does not suffer from the above
1381 * race. See the comments there.
1382 *
1383 * Queue the next waiter for wakeup once we release the wait_lock.
1384 */
1385 mark_wakeup_next_waiter(&wqh, lock);
1386 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1387
1388 rt_mutex_wake_up_q(&wqh);
1389 }
1390
__rt_mutex_unlock(struct rt_mutex_base * lock)1391 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
1392 {
1393 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1394 return;
1395
1396 rt_mutex_slowunlock(lock);
1397 }
1398
1399 #ifdef CONFIG_SMP
rtmutex_spin_on_owner(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * owner)1400 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1401 struct rt_mutex_waiter *waiter,
1402 struct task_struct *owner)
1403 {
1404 bool res = true;
1405
1406 rcu_read_lock();
1407 for (;;) {
1408 /* If owner changed, trylock again. */
1409 if (owner != rt_mutex_owner(lock))
1410 break;
1411 /*
1412 * Ensure that @owner is dereferenced after checking that
1413 * the lock owner still matches @owner. If that fails,
1414 * @owner might point to freed memory. If it still matches,
1415 * the rcu_read_lock() ensures the memory stays valid.
1416 */
1417 barrier();
1418 /*
1419 * Stop spinning when:
1420 * - the lock owner has been scheduled out
1421 * - current is not longer the top waiter
1422 * - current is requested to reschedule (redundant
1423 * for CONFIG_PREEMPT_RCU=y)
1424 * - the VCPU on which owner runs is preempted
1425 */
1426 if (!owner->on_cpu || need_resched() ||
1427 !rt_mutex_waiter_is_top_waiter(lock, waiter) ||
1428 vcpu_is_preempted(task_cpu(owner))) {
1429 res = false;
1430 break;
1431 }
1432 cpu_relax();
1433 }
1434 rcu_read_unlock();
1435 return res;
1436 }
1437 #else
rtmutex_spin_on_owner(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * owner)1438 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1439 struct rt_mutex_waiter *waiter,
1440 struct task_struct *owner)
1441 {
1442 return false;
1443 }
1444 #endif
1445
1446 #ifdef RT_MUTEX_BUILD_MUTEX
1447 /*
1448 * Functions required for:
1449 * - rtmutex, futex on all kernels
1450 * - mutex and rwsem substitutions on RT kernels
1451 */
1452
1453 /*
1454 * Remove a waiter from a lock and give up
1455 *
1456 * Must be called with lock->wait_lock held and interrupts disabled. It must
1457 * have just failed to try_to_take_rt_mutex().
1458 */
remove_waiter(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)1459 static void __sched remove_waiter(struct rt_mutex_base *lock,
1460 struct rt_mutex_waiter *waiter)
1461 {
1462 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1463 struct task_struct *owner = rt_mutex_owner(lock);
1464 struct rt_mutex_base *next_lock;
1465
1466 lockdep_assert_held(&lock->wait_lock);
1467
1468 raw_spin_lock(¤t->pi_lock);
1469 rt_mutex_dequeue(lock, waiter);
1470 current->pi_blocked_on = NULL;
1471 raw_spin_unlock(¤t->pi_lock);
1472
1473 /*
1474 * Only update priority if the waiter was the highest priority
1475 * waiter of the lock and there is an owner to update.
1476 */
1477 if (!owner || !is_top_waiter)
1478 return;
1479
1480 raw_spin_lock(&owner->pi_lock);
1481
1482 rt_mutex_dequeue_pi(owner, waiter);
1483
1484 if (rt_mutex_has_waiters(lock))
1485 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1486
1487 rt_mutex_adjust_prio(owner);
1488
1489 /* Store the lock on which owner is blocked or NULL */
1490 next_lock = task_blocked_on_lock(owner);
1491
1492 raw_spin_unlock(&owner->pi_lock);
1493
1494 /*
1495 * Don't walk the chain, if the owner task is not blocked
1496 * itself.
1497 */
1498 if (!next_lock)
1499 return;
1500
1501 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1502 get_task_struct(owner);
1503
1504 raw_spin_unlock_irq(&lock->wait_lock);
1505
1506 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1507 next_lock, NULL, current);
1508
1509 raw_spin_lock_irq(&lock->wait_lock);
1510 }
1511
1512 /**
1513 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
1514 * @lock: the rt_mutex to take
1515 * @ww_ctx: WW mutex context pointer
1516 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1517 * or TASK_UNINTERRUPTIBLE)
1518 * @timeout: the pre-initialized and started timer, or NULL for none
1519 * @waiter: the pre-initialized rt_mutex_waiter
1520 *
1521 * Must be called with lock->wait_lock held and interrupts disabled
1522 */
rt_mutex_slowlock_block(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state,struct hrtimer_sleeper * timeout,struct rt_mutex_waiter * waiter)1523 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
1524 struct ww_acquire_ctx *ww_ctx,
1525 unsigned int state,
1526 struct hrtimer_sleeper *timeout,
1527 struct rt_mutex_waiter *waiter)
1528 {
1529 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1530 struct task_struct *owner;
1531 int ret = 0;
1532
1533 trace_android_vh_rtmutex_wait_start(lock);
1534 for (;;) {
1535 /* Try to acquire the lock: */
1536 if (try_to_take_rt_mutex(lock, current, waiter))
1537 break;
1538
1539 if (timeout && !timeout->task) {
1540 ret = -ETIMEDOUT;
1541 break;
1542 }
1543 if (signal_pending_state(state, current)) {
1544 ret = -EINTR;
1545 break;
1546 }
1547
1548 if (build_ww_mutex() && ww_ctx) {
1549 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
1550 if (ret)
1551 break;
1552 }
1553
1554 if (waiter == rt_mutex_top_waiter(lock))
1555 owner = rt_mutex_owner(lock);
1556 else
1557 owner = NULL;
1558 raw_spin_unlock_irq(&lock->wait_lock);
1559
1560 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1561 schedule();
1562
1563 raw_spin_lock_irq(&lock->wait_lock);
1564 set_current_state(state);
1565 }
1566
1567 trace_android_vh_rtmutex_wait_finish(lock);
1568 __set_current_state(TASK_RUNNING);
1569 return ret;
1570 }
1571
rt_mutex_handle_deadlock(int res,int detect_deadlock,struct rt_mutex_waiter * w)1572 static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
1573 struct rt_mutex_waiter *w)
1574 {
1575 /*
1576 * If the result is not -EDEADLOCK or the caller requested
1577 * deadlock detection, nothing to do here.
1578 */
1579 if (res != -EDEADLOCK || detect_deadlock)
1580 return;
1581
1582 if (build_ww_mutex() && w->ww_ctx)
1583 return;
1584
1585 /*
1586 * Yell loudly and stop the task right here.
1587 */
1588 WARN(1, "rtmutex deadlock detected\n");
1589 while (1) {
1590 set_current_state(TASK_INTERRUPTIBLE);
1591 schedule();
1592 }
1593 }
1594
1595 /**
1596 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
1597 * @lock: The rtmutex to block lock
1598 * @ww_ctx: WW mutex context pointer
1599 * @state: The task state for sleeping
1600 * @chwalk: Indicator whether full or partial chainwalk is requested
1601 * @waiter: Initializer waiter for blocking
1602 */
__rt_mutex_slowlock(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state,enum rtmutex_chainwalk chwalk,struct rt_mutex_waiter * waiter)1603 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
1604 struct ww_acquire_ctx *ww_ctx,
1605 unsigned int state,
1606 enum rtmutex_chainwalk chwalk,
1607 struct rt_mutex_waiter *waiter)
1608 {
1609 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1610 struct ww_mutex *ww = ww_container_of(rtm);
1611 int ret;
1612
1613 lockdep_assert_held(&lock->wait_lock);
1614
1615 /* Try to acquire the lock again: */
1616 if (try_to_take_rt_mutex(lock, current, NULL)) {
1617 if (build_ww_mutex() && ww_ctx) {
1618 __ww_mutex_check_waiters(rtm, ww_ctx);
1619 ww_mutex_lock_acquired(ww, ww_ctx);
1620 }
1621 return 0;
1622 }
1623
1624 set_current_state(state);
1625
1626 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
1627 if (likely(!ret))
1628 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
1629
1630 if (likely(!ret)) {
1631 /* acquired the lock */
1632 if (build_ww_mutex() && ww_ctx) {
1633 if (!ww_ctx->is_wait_die)
1634 __ww_mutex_check_waiters(rtm, ww_ctx);
1635 ww_mutex_lock_acquired(ww, ww_ctx);
1636 }
1637 } else {
1638 __set_current_state(TASK_RUNNING);
1639 remove_waiter(lock, waiter);
1640 rt_mutex_handle_deadlock(ret, chwalk, waiter);
1641 }
1642
1643 /*
1644 * try_to_take_rt_mutex() sets the waiter bit
1645 * unconditionally. We might have to fix that up.
1646 */
1647 fixup_rt_mutex_waiters(lock, true);
1648 return ret;
1649 }
1650
__rt_mutex_slowlock_locked(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state)1651 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
1652 struct ww_acquire_ctx *ww_ctx,
1653 unsigned int state)
1654 {
1655 struct rt_mutex_waiter waiter;
1656 int ret;
1657
1658 rt_mutex_init_waiter(&waiter);
1659 waiter.ww_ctx = ww_ctx;
1660
1661 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
1662 &waiter);
1663
1664 debug_rt_mutex_free_waiter(&waiter);
1665 return ret;
1666 }
1667
1668 /*
1669 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
1670 * @lock: The rtmutex to block lock
1671 * @ww_ctx: WW mutex context pointer
1672 * @state: The task state for sleeping
1673 */
rt_mutex_slowlock(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state)1674 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
1675 struct ww_acquire_ctx *ww_ctx,
1676 unsigned int state)
1677 {
1678 unsigned long flags;
1679 int ret;
1680
1681 /*
1682 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1683 * be called in early boot if the cmpxchg() fast path is disabled
1684 * (debug, no architecture support). In this case we will acquire the
1685 * rtmutex with lock->wait_lock held. But we cannot unconditionally
1686 * enable interrupts in that early boot case. So we need to use the
1687 * irqsave/restore variants.
1688 */
1689 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1690 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
1691 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1692
1693 return ret;
1694 }
1695
__rt_mutex_lock(struct rt_mutex_base * lock,unsigned int state)1696 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
1697 unsigned int state)
1698 {
1699 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1700 return 0;
1701
1702 return rt_mutex_slowlock(lock, NULL, state);
1703 }
1704 #endif /* RT_MUTEX_BUILD_MUTEX */
1705
1706 #ifdef RT_MUTEX_BUILD_SPINLOCKS
1707 /*
1708 * Functions required for spin/rw_lock substitution on RT kernels
1709 */
1710
1711 /**
1712 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
1713 * @lock: The underlying RT mutex
1714 */
rtlock_slowlock_locked(struct rt_mutex_base * lock)1715 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
1716 {
1717 struct rt_mutex_waiter waiter;
1718 struct task_struct *owner;
1719
1720 lockdep_assert_held(&lock->wait_lock);
1721
1722 if (try_to_take_rt_mutex(lock, current, NULL))
1723 return;
1724
1725 rt_mutex_init_rtlock_waiter(&waiter);
1726
1727 /* Save current state and set state to TASK_RTLOCK_WAIT */
1728 current_save_and_set_rtlock_wait_state();
1729
1730 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
1731
1732 for (;;) {
1733 /* Try to acquire the lock again */
1734 if (try_to_take_rt_mutex(lock, current, &waiter))
1735 break;
1736
1737 if (&waiter == rt_mutex_top_waiter(lock))
1738 owner = rt_mutex_owner(lock);
1739 else
1740 owner = NULL;
1741 raw_spin_unlock_irq(&lock->wait_lock);
1742
1743 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
1744 schedule_rtlock();
1745
1746 raw_spin_lock_irq(&lock->wait_lock);
1747 set_current_state(TASK_RTLOCK_WAIT);
1748 }
1749
1750 /* Restore the task state */
1751 current_restore_rtlock_saved_state();
1752
1753 /*
1754 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
1755 * We might have to fix that up:
1756 */
1757 fixup_rt_mutex_waiters(lock, true);
1758 debug_rt_mutex_free_waiter(&waiter);
1759 }
1760
rtlock_slowlock(struct rt_mutex_base * lock)1761 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
1762 {
1763 unsigned long flags;
1764
1765 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1766 rtlock_slowlock_locked(lock);
1767 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1768 }
1769
1770 #endif /* RT_MUTEX_BUILD_SPINLOCKS */
1771