• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4  *
5  * started by Ingo Molnar and Thomas Gleixner.
6  *
7  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
9  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
10  *  Copyright (C) 2006 Esben Nielsen
11  *
12  *  See Documentation/locking/rt-mutex-design.rst for details.
13  */
14 #include <linux/spinlock.h>
15 #include <linux/export.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/deadline.h>
19 #include <linux/sched/wake_q.h>
20 #include <linux/sched/debug.h>
21 #include <linux/timer.h>
22 #include <trace/hooks/dtask.h>
23 
24 #include "rtmutex_common.h"
25 
26 /*
27  * lock->owner state tracking:
28  *
29  * lock->owner holds the task_struct pointer of the owner. Bit 0
30  * is used to keep track of the "lock has waiters" state.
31  *
32  * owner	bit0
33  * NULL		0	lock is free (fast acquire possible)
34  * NULL		1	lock is free and has waiters and the top waiter
35  *				is going to take the lock*
36  * taskpointer	0	lock is held (fast release possible)
37  * taskpointer	1	lock is held and has waiters**
38  *
39  * The fast atomic compare exchange based acquire and release is only
40  * possible when bit 0 of lock->owner is 0.
41  *
42  * (*) It also can be a transitional state when grabbing the lock
43  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
44  * we need to set the bit0 before looking at the lock, and the owner may be
45  * NULL in this small time, hence this can be a transitional state.
46  *
47  * (**) There is a small time when bit 0 is set but there are no
48  * waiters. This can happen when grabbing the lock in the slow path.
49  * To prevent a cmpxchg of the owner releasing the lock, we need to
50  * set this bit before looking at the lock.
51  */
52 
53 static void
rt_mutex_set_owner(struct rt_mutex * lock,struct task_struct * owner)54 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
55 {
56 	unsigned long val = (unsigned long)owner;
57 
58 	if (rt_mutex_has_waiters(lock))
59 		val |= RT_MUTEX_HAS_WAITERS;
60 
61 	WRITE_ONCE(lock->owner, (struct task_struct *)val);
62 }
63 
clear_rt_mutex_waiters(struct rt_mutex * lock)64 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
65 {
66 	lock->owner = (struct task_struct *)
67 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
68 }
69 
fixup_rt_mutex_waiters(struct rt_mutex * lock)70 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
71 {
72 	unsigned long owner, *p = (unsigned long *) &lock->owner;
73 
74 	if (rt_mutex_has_waiters(lock))
75 		return;
76 
77 	/*
78 	 * The rbtree has no waiters enqueued, now make sure that the
79 	 * lock->owner still has the waiters bit set, otherwise the
80 	 * following can happen:
81 	 *
82 	 * CPU 0	CPU 1		CPU2
83 	 * l->owner=T1
84 	 *		rt_mutex_lock(l)
85 	 *		lock(l->lock)
86 	 *		l->owner = T1 | HAS_WAITERS;
87 	 *		enqueue(T2)
88 	 *		boost()
89 	 *		  unlock(l->lock)
90 	 *		block()
91 	 *
92 	 *				rt_mutex_lock(l)
93 	 *				lock(l->lock)
94 	 *				l->owner = T1 | HAS_WAITERS;
95 	 *				enqueue(T3)
96 	 *				boost()
97 	 *				  unlock(l->lock)
98 	 *				block()
99 	 *		signal(->T2)	signal(->T3)
100 	 *		lock(l->lock)
101 	 *		dequeue(T2)
102 	 *		deboost()
103 	 *		  unlock(l->lock)
104 	 *				lock(l->lock)
105 	 *				dequeue(T3)
106 	 *				 ==> wait list is empty
107 	 *				deboost()
108 	 *				 unlock(l->lock)
109 	 *		lock(l->lock)
110 	 *		fixup_rt_mutex_waiters()
111 	 *		  if (wait_list_empty(l) {
112 	 *		    l->owner = owner
113 	 *		    owner = l->owner & ~HAS_WAITERS;
114 	 *		      ==> l->owner = T1
115 	 *		  }
116 	 *				lock(l->lock)
117 	 * rt_mutex_unlock(l)		fixup_rt_mutex_waiters()
118 	 *				  if (wait_list_empty(l) {
119 	 *				    owner = l->owner & ~HAS_WAITERS;
120 	 * cmpxchg(l->owner, T1, NULL)
121 	 *  ===> Success (l->owner = NULL)
122 	 *
123 	 *				    l->owner = owner
124 	 *				      ==> l->owner = T1
125 	 *				  }
126 	 *
127 	 * With the check for the waiter bit in place T3 on CPU2 will not
128 	 * overwrite. All tasks fiddling with the waiters bit are
129 	 * serialized by l->lock, so nothing else can modify the waiters
130 	 * bit. If the bit is set then nothing can change l->owner either
131 	 * so the simple RMW is safe. The cmpxchg() will simply fail if it
132 	 * happens in the middle of the RMW because the waiters bit is
133 	 * still set.
134 	 */
135 	owner = READ_ONCE(*p);
136 	if (owner & RT_MUTEX_HAS_WAITERS)
137 		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
138 }
139 
140 /*
141  * We can speed up the acquire/release, if there's no debugging state to be
142  * set up.
143  */
144 #ifndef CONFIG_DEBUG_RT_MUTEXES
145 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
146 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
147 
148 /*
149  * Callers must hold the ->wait_lock -- which is the whole purpose as we force
150  * all future threads that attempt to [Rmw] the lock to the slowpath. As such
151  * relaxed semantics suffice.
152  */
mark_rt_mutex_waiters(struct rt_mutex * lock)153 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
154 {
155 	unsigned long owner, *p = (unsigned long *) &lock->owner;
156 
157 	do {
158 		owner = *p;
159 	} while (cmpxchg_relaxed(p, owner,
160 				 owner | RT_MUTEX_HAS_WAITERS) != owner);
161 }
162 
163 /*
164  * Safe fastpath aware unlock:
165  * 1) Clear the waiters bit
166  * 2) Drop lock->wait_lock
167  * 3) Try to unlock the lock with cmpxchg
168  */
unlock_rt_mutex_safe(struct rt_mutex * lock,unsigned long flags)169 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
170 					unsigned long flags)
171 	__releases(lock->wait_lock)
172 {
173 	struct task_struct *owner = rt_mutex_owner(lock);
174 
175 	clear_rt_mutex_waiters(lock);
176 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
177 	/*
178 	 * If a new waiter comes in between the unlock and the cmpxchg
179 	 * we have two situations:
180 	 *
181 	 * unlock(wait_lock);
182 	 *					lock(wait_lock);
183 	 * cmpxchg(p, owner, 0) == owner
184 	 *					mark_rt_mutex_waiters(lock);
185 	 *					acquire(lock);
186 	 * or:
187 	 *
188 	 * unlock(wait_lock);
189 	 *					lock(wait_lock);
190 	 *					mark_rt_mutex_waiters(lock);
191 	 *
192 	 * cmpxchg(p, owner, 0) != owner
193 	 *					enqueue_waiter();
194 	 *					unlock(wait_lock);
195 	 * lock(wait_lock);
196 	 * wake waiter();
197 	 * unlock(wait_lock);
198 	 *					lock(wait_lock);
199 	 *					acquire(lock);
200 	 */
201 	return rt_mutex_cmpxchg_release(lock, owner, NULL);
202 }
203 
204 #else
205 # define rt_mutex_cmpxchg_acquire(l,c,n)	(0)
206 # define rt_mutex_cmpxchg_release(l,c,n)	(0)
207 
mark_rt_mutex_waiters(struct rt_mutex * lock)208 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
209 {
210 	lock->owner = (struct task_struct *)
211 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
212 }
213 
214 /*
215  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
216  */
unlock_rt_mutex_safe(struct rt_mutex * lock,unsigned long flags)217 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
218 					unsigned long flags)
219 	__releases(lock->wait_lock)
220 {
221 	lock->owner = NULL;
222 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
223 	return true;
224 }
225 #endif
226 
227 /*
228  * Only use with rt_mutex_waiter_{less,equal}()
229  */
230 #define task_to_waiter(p)	\
231 	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
232 
233 static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter * left,struct rt_mutex_waiter * right)234 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
235 		     struct rt_mutex_waiter *right)
236 {
237 	if (left->prio < right->prio)
238 		return 1;
239 
240 	/*
241 	 * If both waiters have dl_prio(), we check the deadlines of the
242 	 * associated tasks.
243 	 * If left waiter has a dl_prio(), and we didn't return 1 above,
244 	 * then right waiter has a dl_prio() too.
245 	 */
246 	if (dl_prio(left->prio))
247 		return dl_time_before(left->deadline, right->deadline);
248 
249 	return 0;
250 }
251 
252 static inline int
rt_mutex_waiter_equal(struct rt_mutex_waiter * left,struct rt_mutex_waiter * right)253 rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
254 		      struct rt_mutex_waiter *right)
255 {
256 	if (left->prio != right->prio)
257 		return 0;
258 
259 	/*
260 	 * If both waiters have dl_prio(), we check the deadlines of the
261 	 * associated tasks.
262 	 * If left waiter has a dl_prio(), and we didn't return 0 above,
263 	 * then right waiter has a dl_prio() too.
264 	 */
265 	if (dl_prio(left->prio))
266 		return left->deadline == right->deadline;
267 
268 	return 1;
269 }
270 
271 static void
rt_mutex_enqueue(struct rt_mutex * lock,struct rt_mutex_waiter * waiter)272 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
273 {
274 	struct rb_node **link = &lock->waiters.rb_root.rb_node;
275 	struct rb_node *parent = NULL;
276 	struct rt_mutex_waiter *entry;
277 	bool leftmost = true;
278 
279 	while (*link) {
280 		parent = *link;
281 		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
282 		if (rt_mutex_waiter_less(waiter, entry)) {
283 			link = &parent->rb_left;
284 		} else {
285 			link = &parent->rb_right;
286 			leftmost = false;
287 		}
288 	}
289 
290 	rb_link_node(&waiter->tree_entry, parent, link);
291 	rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
292 }
293 
294 static void
rt_mutex_dequeue(struct rt_mutex * lock,struct rt_mutex_waiter * waiter)295 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
296 {
297 	if (RB_EMPTY_NODE(&waiter->tree_entry))
298 		return;
299 
300 	rb_erase_cached(&waiter->tree_entry, &lock->waiters);
301 	RB_CLEAR_NODE(&waiter->tree_entry);
302 }
303 
304 static void
rt_mutex_enqueue_pi(struct task_struct * task,struct rt_mutex_waiter * waiter)305 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
306 {
307 	struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
308 	struct rb_node *parent = NULL;
309 	struct rt_mutex_waiter *entry;
310 	bool leftmost = true;
311 
312 	while (*link) {
313 		parent = *link;
314 		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
315 		if (rt_mutex_waiter_less(waiter, entry)) {
316 			link = &parent->rb_left;
317 		} else {
318 			link = &parent->rb_right;
319 			leftmost = false;
320 		}
321 	}
322 
323 	rb_link_node(&waiter->pi_tree_entry, parent, link);
324 	rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost);
325 }
326 
327 static void
rt_mutex_dequeue_pi(struct task_struct * task,struct rt_mutex_waiter * waiter)328 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
329 {
330 	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
331 		return;
332 
333 	rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
334 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
335 }
336 
rt_mutex_adjust_prio(struct task_struct * p)337 static void rt_mutex_adjust_prio(struct task_struct *p)
338 {
339 	struct task_struct *pi_task = NULL;
340 
341 	lockdep_assert_held(&p->pi_lock);
342 
343 	if (task_has_pi_waiters(p))
344 		pi_task = task_top_pi_waiter(p)->task;
345 
346 	rt_mutex_setprio(p, pi_task);
347 }
348 
349 /*
350  * Deadlock detection is conditional:
351  *
352  * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
353  * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
354  *
355  * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
356  * conducted independent of the detect argument.
357  *
358  * If the waiter argument is NULL this indicates the deboost path and
359  * deadlock detection is disabled independent of the detect argument
360  * and the config settings.
361  */
rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter * waiter,enum rtmutex_chainwalk chwalk)362 static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
363 					  enum rtmutex_chainwalk chwalk)
364 {
365 	/*
366 	 * This is just a wrapper function for the following call,
367 	 * because debug_rt_mutex_detect_deadlock() smells like a magic
368 	 * debug feature and I wanted to keep the cond function in the
369 	 * main source file along with the comments instead of having
370 	 * two of the same in the headers.
371 	 */
372 	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
373 }
374 
375 /*
376  * Max number of times we'll walk the boosting chain:
377  */
378 int max_lock_depth = 1024;
379 
task_blocked_on_lock(struct task_struct * p)380 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
381 {
382 	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
383 }
384 
385 /*
386  * Adjust the priority chain. Also used for deadlock detection.
387  * Decreases task's usage by one - may thus free the task.
388  *
389  * @task:	the task owning the mutex (owner) for which a chain walk is
390  *		probably needed
391  * @chwalk:	do we have to carry out deadlock detection?
392  * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
393  *		things for a task that has just got its priority adjusted, and
394  *		is waiting on a mutex)
395  * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
396  *		we dropped its pi_lock. Is never dereferenced, only used for
397  *		comparison to detect lock chain changes.
398  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
399  *		its priority to the mutex owner (can be NULL in the case
400  *		depicted above or if the top waiter is gone away and we are
401  *		actually deboosting the owner)
402  * @top_task:	the current top waiter
403  *
404  * Returns 0 or -EDEADLK.
405  *
406  * Chain walk basics and protection scope
407  *
408  * [R] refcount on task
409  * [P] task->pi_lock held
410  * [L] rtmutex->wait_lock held
411  *
412  * Step	Description				Protected by
413  *	function arguments:
414  *	@task					[R]
415  *	@orig_lock if != NULL			@top_task is blocked on it
416  *	@next_lock				Unprotected. Cannot be
417  *						dereferenced. Only used for
418  *						comparison.
419  *	@orig_waiter if != NULL			@top_task is blocked on it
420  *	@top_task				current, or in case of proxy
421  *						locking protected by calling
422  *						code
423  *	again:
424  *	  loop_sanity_check();
425  *	retry:
426  * [1]	  lock(task->pi_lock);			[R] acquire [P]
427  * [2]	  waiter = task->pi_blocked_on;		[P]
428  * [3]	  check_exit_conditions_1();		[P]
429  * [4]	  lock = waiter->lock;			[P]
430  * [5]	  if (!try_lock(lock->wait_lock)) {	[P] try to acquire [L]
431  *	    unlock(task->pi_lock);		release [P]
432  *	    goto retry;
433  *	  }
434  * [6]	  check_exit_conditions_2();		[P] + [L]
435  * [7]	  requeue_lock_waiter(lock, waiter);	[P] + [L]
436  * [8]	  unlock(task->pi_lock);		release [P]
437  *	  put_task_struct(task);		release [R]
438  * [9]	  check_exit_conditions_3();		[L]
439  * [10]	  task = owner(lock);			[L]
440  *	  get_task_struct(task);		[L] acquire [R]
441  *	  lock(task->pi_lock);			[L] acquire [P]
442  * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
443  * [12]	  check_exit_conditions_4();		[P] + [L]
444  * [13]	  unlock(task->pi_lock);		release [P]
445  *	  unlock(lock->wait_lock);		release [L]
446  *	  goto again;
447  */
rt_mutex_adjust_prio_chain(struct task_struct * task,enum rtmutex_chainwalk chwalk,struct rt_mutex * orig_lock,struct rt_mutex * next_lock,struct rt_mutex_waiter * orig_waiter,struct task_struct * top_task)448 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
449 				      enum rtmutex_chainwalk chwalk,
450 				      struct rt_mutex *orig_lock,
451 				      struct rt_mutex *next_lock,
452 				      struct rt_mutex_waiter *orig_waiter,
453 				      struct task_struct *top_task)
454 {
455 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
456 	struct rt_mutex_waiter *prerequeue_top_waiter;
457 	int ret = 0, depth = 0;
458 	struct rt_mutex *lock;
459 	bool detect_deadlock;
460 	bool requeue = true;
461 
462 	detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
463 
464 	/*
465 	 * The (de)boosting is a step by step approach with a lot of
466 	 * pitfalls. We want this to be preemptible and we want hold a
467 	 * maximum of two locks per step. So we have to check
468 	 * carefully whether things change under us.
469 	 */
470  again:
471 	/*
472 	 * We limit the lock chain length for each invocation.
473 	 */
474 	if (++depth > max_lock_depth) {
475 		static int prev_max;
476 
477 		/*
478 		 * Print this only once. If the admin changes the limit,
479 		 * print a new message when reaching the limit again.
480 		 */
481 		if (prev_max != max_lock_depth) {
482 			prev_max = max_lock_depth;
483 			printk(KERN_WARNING "Maximum lock depth %d reached "
484 			       "task: %s (%d)\n", max_lock_depth,
485 			       top_task->comm, task_pid_nr(top_task));
486 		}
487 		put_task_struct(task);
488 
489 		return -EDEADLK;
490 	}
491 
492 	/*
493 	 * We are fully preemptible here and only hold the refcount on
494 	 * @task. So everything can have changed under us since the
495 	 * caller or our own code below (goto retry/again) dropped all
496 	 * locks.
497 	 */
498  retry:
499 	/*
500 	 * [1] Task cannot go away as we did a get_task() before !
501 	 */
502 	raw_spin_lock_irq(&task->pi_lock);
503 
504 	/*
505 	 * [2] Get the waiter on which @task is blocked on.
506 	 */
507 	waiter = task->pi_blocked_on;
508 
509 	/*
510 	 * [3] check_exit_conditions_1() protected by task->pi_lock.
511 	 */
512 
513 	/*
514 	 * Check whether the end of the boosting chain has been
515 	 * reached or the state of the chain has changed while we
516 	 * dropped the locks.
517 	 */
518 	if (!waiter)
519 		goto out_unlock_pi;
520 
521 	/*
522 	 * Check the orig_waiter state. After we dropped the locks,
523 	 * the previous owner of the lock might have released the lock.
524 	 */
525 	if (orig_waiter && !rt_mutex_owner(orig_lock))
526 		goto out_unlock_pi;
527 
528 	/*
529 	 * We dropped all locks after taking a refcount on @task, so
530 	 * the task might have moved on in the lock chain or even left
531 	 * the chain completely and blocks now on an unrelated lock or
532 	 * on @orig_lock.
533 	 *
534 	 * We stored the lock on which @task was blocked in @next_lock,
535 	 * so we can detect the chain change.
536 	 */
537 	if (next_lock != waiter->lock)
538 		goto out_unlock_pi;
539 
540 	/*
541 	 * Drop out, when the task has no waiters. Note,
542 	 * top_waiter can be NULL, when we are in the deboosting
543 	 * mode!
544 	 */
545 	if (top_waiter) {
546 		if (!task_has_pi_waiters(task))
547 			goto out_unlock_pi;
548 		/*
549 		 * If deadlock detection is off, we stop here if we
550 		 * are not the top pi waiter of the task. If deadlock
551 		 * detection is enabled we continue, but stop the
552 		 * requeueing in the chain walk.
553 		 */
554 		if (top_waiter != task_top_pi_waiter(task)) {
555 			if (!detect_deadlock)
556 				goto out_unlock_pi;
557 			else
558 				requeue = false;
559 		}
560 	}
561 
562 	/*
563 	 * If the waiter priority is the same as the task priority
564 	 * then there is no further priority adjustment necessary.  If
565 	 * deadlock detection is off, we stop the chain walk. If its
566 	 * enabled we continue, but stop the requeueing in the chain
567 	 * walk.
568 	 */
569 	if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
570 		if (!detect_deadlock)
571 			goto out_unlock_pi;
572 		else
573 			requeue = false;
574 	}
575 
576 	/*
577 	 * [4] Get the next lock
578 	 */
579 	lock = waiter->lock;
580 	/*
581 	 * [5] We need to trylock here as we are holding task->pi_lock,
582 	 * which is the reverse lock order versus the other rtmutex
583 	 * operations.
584 	 */
585 	if (!raw_spin_trylock(&lock->wait_lock)) {
586 		raw_spin_unlock_irq(&task->pi_lock);
587 		cpu_relax();
588 		goto retry;
589 	}
590 
591 	/*
592 	 * [6] check_exit_conditions_2() protected by task->pi_lock and
593 	 * lock->wait_lock.
594 	 *
595 	 * Deadlock detection. If the lock is the same as the original
596 	 * lock which caused us to walk the lock chain or if the
597 	 * current lock is owned by the task which initiated the chain
598 	 * walk, we detected a deadlock.
599 	 */
600 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
601 		debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
602 		raw_spin_unlock(&lock->wait_lock);
603 		ret = -EDEADLK;
604 		goto out_unlock_pi;
605 	}
606 
607 	/*
608 	 * If we just follow the lock chain for deadlock detection, no
609 	 * need to do all the requeue operations. To avoid a truckload
610 	 * of conditionals around the various places below, just do the
611 	 * minimum chain walk checks.
612 	 */
613 	if (!requeue) {
614 		/*
615 		 * No requeue[7] here. Just release @task [8]
616 		 */
617 		raw_spin_unlock(&task->pi_lock);
618 		put_task_struct(task);
619 
620 		/*
621 		 * [9] check_exit_conditions_3 protected by lock->wait_lock.
622 		 * If there is no owner of the lock, end of chain.
623 		 */
624 		if (!rt_mutex_owner(lock)) {
625 			raw_spin_unlock_irq(&lock->wait_lock);
626 			return 0;
627 		}
628 
629 		/* [10] Grab the next task, i.e. owner of @lock */
630 		task = get_task_struct(rt_mutex_owner(lock));
631 		raw_spin_lock(&task->pi_lock);
632 
633 		/*
634 		 * No requeue [11] here. We just do deadlock detection.
635 		 *
636 		 * [12] Store whether owner is blocked
637 		 * itself. Decision is made after dropping the locks
638 		 */
639 		next_lock = task_blocked_on_lock(task);
640 		/*
641 		 * Get the top waiter for the next iteration
642 		 */
643 		top_waiter = rt_mutex_top_waiter(lock);
644 
645 		/* [13] Drop locks */
646 		raw_spin_unlock(&task->pi_lock);
647 		raw_spin_unlock_irq(&lock->wait_lock);
648 
649 		/* If owner is not blocked, end of chain. */
650 		if (!next_lock)
651 			goto out_put_task;
652 		goto again;
653 	}
654 
655 	/*
656 	 * Store the current top waiter before doing the requeue
657 	 * operation on @lock. We need it for the boost/deboost
658 	 * decision below.
659 	 */
660 	prerequeue_top_waiter = rt_mutex_top_waiter(lock);
661 
662 	/* [7] Requeue the waiter in the lock waiter tree. */
663 	rt_mutex_dequeue(lock, waiter);
664 
665 	/*
666 	 * Update the waiter prio fields now that we're dequeued.
667 	 *
668 	 * These values can have changed through either:
669 	 *
670 	 *   sys_sched_set_scheduler() / sys_sched_setattr()
671 	 *
672 	 * or
673 	 *
674 	 *   DL CBS enforcement advancing the effective deadline.
675 	 *
676 	 * Even though pi_waiters also uses these fields, and that tree is only
677 	 * updated in [11], we can do this here, since we hold [L], which
678 	 * serializes all pi_waiters access and rb_erase() does not care about
679 	 * the values of the node being removed.
680 	 */
681 	waiter->prio = task->prio;
682 	waiter->deadline = task->dl.deadline;
683 
684 	rt_mutex_enqueue(lock, waiter);
685 
686 	/* [8] Release the task */
687 	raw_spin_unlock(&task->pi_lock);
688 	put_task_struct(task);
689 
690 	/*
691 	 * [9] check_exit_conditions_3 protected by lock->wait_lock.
692 	 *
693 	 * We must abort the chain walk if there is no lock owner even
694 	 * in the dead lock detection case, as we have nothing to
695 	 * follow here. This is the end of the chain we are walking.
696 	 */
697 	if (!rt_mutex_owner(lock)) {
698 		/*
699 		 * If the requeue [7] above changed the top waiter,
700 		 * then we need to wake the new top waiter up to try
701 		 * to get the lock.
702 		 */
703 		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
704 			wake_up_process(rt_mutex_top_waiter(lock)->task);
705 		raw_spin_unlock_irq(&lock->wait_lock);
706 		return 0;
707 	}
708 
709 	/* [10] Grab the next task, i.e. the owner of @lock */
710 	task = get_task_struct(rt_mutex_owner(lock));
711 	raw_spin_lock(&task->pi_lock);
712 
713 	/* [11] requeue the pi waiters if necessary */
714 	if (waiter == rt_mutex_top_waiter(lock)) {
715 		/*
716 		 * The waiter became the new top (highest priority)
717 		 * waiter on the lock. Replace the previous top waiter
718 		 * in the owner tasks pi waiters tree with this waiter
719 		 * and adjust the priority of the owner.
720 		 */
721 		rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
722 		rt_mutex_enqueue_pi(task, waiter);
723 		rt_mutex_adjust_prio(task);
724 
725 	} else if (prerequeue_top_waiter == waiter) {
726 		/*
727 		 * The waiter was the top waiter on the lock, but is
728 		 * no longer the top prority waiter. Replace waiter in
729 		 * the owner tasks pi waiters tree with the new top
730 		 * (highest priority) waiter and adjust the priority
731 		 * of the owner.
732 		 * The new top waiter is stored in @waiter so that
733 		 * @waiter == @top_waiter evaluates to true below and
734 		 * we continue to deboost the rest of the chain.
735 		 */
736 		rt_mutex_dequeue_pi(task, waiter);
737 		waiter = rt_mutex_top_waiter(lock);
738 		rt_mutex_enqueue_pi(task, waiter);
739 		rt_mutex_adjust_prio(task);
740 	} else {
741 		/*
742 		 * Nothing changed. No need to do any priority
743 		 * adjustment.
744 		 */
745 	}
746 
747 	/*
748 	 * [12] check_exit_conditions_4() protected by task->pi_lock
749 	 * and lock->wait_lock. The actual decisions are made after we
750 	 * dropped the locks.
751 	 *
752 	 * Check whether the task which owns the current lock is pi
753 	 * blocked itself. If yes we store a pointer to the lock for
754 	 * the lock chain change detection above. After we dropped
755 	 * task->pi_lock next_lock cannot be dereferenced anymore.
756 	 */
757 	next_lock = task_blocked_on_lock(task);
758 	/*
759 	 * Store the top waiter of @lock for the end of chain walk
760 	 * decision below.
761 	 */
762 	top_waiter = rt_mutex_top_waiter(lock);
763 
764 	/* [13] Drop the locks */
765 	raw_spin_unlock(&task->pi_lock);
766 	raw_spin_unlock_irq(&lock->wait_lock);
767 
768 	/*
769 	 * Make the actual exit decisions [12], based on the stored
770 	 * values.
771 	 *
772 	 * We reached the end of the lock chain. Stop right here. No
773 	 * point to go back just to figure that out.
774 	 */
775 	if (!next_lock)
776 		goto out_put_task;
777 
778 	/*
779 	 * If the current waiter is not the top waiter on the lock,
780 	 * then we can stop the chain walk here if we are not in full
781 	 * deadlock detection mode.
782 	 */
783 	if (!detect_deadlock && waiter != top_waiter)
784 		goto out_put_task;
785 
786 	goto again;
787 
788  out_unlock_pi:
789 	raw_spin_unlock_irq(&task->pi_lock);
790  out_put_task:
791 	put_task_struct(task);
792 
793 	return ret;
794 }
795 
796 /*
797  * Try to take an rt-mutex
798  *
799  * Must be called with lock->wait_lock held and interrupts disabled
800  *
801  * @lock:   The lock to be acquired.
802  * @task:   The task which wants to acquire the lock
803  * @waiter: The waiter that is queued to the lock's wait tree if the
804  *	    callsite called task_blocked_on_lock(), otherwise NULL
805  */
try_to_take_rt_mutex(struct rt_mutex * lock,struct task_struct * task,struct rt_mutex_waiter * waiter)806 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
807 				struct rt_mutex_waiter *waiter)
808 {
809 	lockdep_assert_held(&lock->wait_lock);
810 
811 	/*
812 	 * Before testing whether we can acquire @lock, we set the
813 	 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
814 	 * other tasks which try to modify @lock into the slow path
815 	 * and they serialize on @lock->wait_lock.
816 	 *
817 	 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
818 	 * as explained at the top of this file if and only if:
819 	 *
820 	 * - There is a lock owner. The caller must fixup the
821 	 *   transient state if it does a trylock or leaves the lock
822 	 *   function due to a signal or timeout.
823 	 *
824 	 * - @task acquires the lock and there are no other
825 	 *   waiters. This is undone in rt_mutex_set_owner(@task) at
826 	 *   the end of this function.
827 	 */
828 	mark_rt_mutex_waiters(lock);
829 
830 	/*
831 	 * If @lock has an owner, give up.
832 	 */
833 	if (rt_mutex_owner(lock))
834 		return 0;
835 
836 	/*
837 	 * If @waiter != NULL, @task has already enqueued the waiter
838 	 * into @lock waiter tree. If @waiter == NULL then this is a
839 	 * trylock attempt.
840 	 */
841 	if (waiter) {
842 		/*
843 		 * If waiter is not the highest priority waiter of
844 		 * @lock, give up.
845 		 */
846 		if (waiter != rt_mutex_top_waiter(lock))
847 			return 0;
848 
849 		/*
850 		 * We can acquire the lock. Remove the waiter from the
851 		 * lock waiters tree.
852 		 */
853 		rt_mutex_dequeue(lock, waiter);
854 
855 	} else {
856 		/*
857 		 * If the lock has waiters already we check whether @task is
858 		 * eligible to take over the lock.
859 		 *
860 		 * If there are no other waiters, @task can acquire
861 		 * the lock.  @task->pi_blocked_on is NULL, so it does
862 		 * not need to be dequeued.
863 		 */
864 		if (rt_mutex_has_waiters(lock)) {
865 			/*
866 			 * If @task->prio is greater than or equal to
867 			 * the top waiter priority (kernel view),
868 			 * @task lost.
869 			 */
870 			if (!rt_mutex_waiter_less(task_to_waiter(task),
871 						  rt_mutex_top_waiter(lock)))
872 				return 0;
873 
874 			/*
875 			 * The current top waiter stays enqueued. We
876 			 * don't have to change anything in the lock
877 			 * waiters order.
878 			 */
879 		} else {
880 			/*
881 			 * No waiters. Take the lock without the
882 			 * pi_lock dance.@task->pi_blocked_on is NULL
883 			 * and we have no waiters to enqueue in @task
884 			 * pi waiters tree.
885 			 */
886 			goto takeit;
887 		}
888 	}
889 
890 	/*
891 	 * Clear @task->pi_blocked_on. Requires protection by
892 	 * @task->pi_lock. Redundant operation for the @waiter == NULL
893 	 * case, but conditionals are more expensive than a redundant
894 	 * store.
895 	 */
896 	raw_spin_lock(&task->pi_lock);
897 	task->pi_blocked_on = NULL;
898 	/*
899 	 * Finish the lock acquisition. @task is the new owner. If
900 	 * other waiters exist we have to insert the highest priority
901 	 * waiter into @task->pi_waiters tree.
902 	 */
903 	if (rt_mutex_has_waiters(lock))
904 		rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
905 	raw_spin_unlock(&task->pi_lock);
906 
907 takeit:
908 	/* We got the lock. */
909 	debug_rt_mutex_lock(lock);
910 
911 	/*
912 	 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
913 	 * are still waiters or clears it.
914 	 */
915 	rt_mutex_set_owner(lock, task);
916 
917 	return 1;
918 }
919 
920 /*
921  * Task blocks on lock.
922  *
923  * Prepare waiter and propagate pi chain
924  *
925  * This must be called with lock->wait_lock held and interrupts disabled
926  */
task_blocks_on_rt_mutex(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct task_struct * task,enum rtmutex_chainwalk chwalk)927 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
928 				   struct rt_mutex_waiter *waiter,
929 				   struct task_struct *task,
930 				   enum rtmutex_chainwalk chwalk)
931 {
932 	struct task_struct *owner = rt_mutex_owner(lock);
933 	struct rt_mutex_waiter *top_waiter = waiter;
934 	struct rt_mutex *next_lock;
935 	int chain_walk = 0, res;
936 
937 	lockdep_assert_held(&lock->wait_lock);
938 
939 	/*
940 	 * Early deadlock detection. We really don't want the task to
941 	 * enqueue on itself just to untangle the mess later. It's not
942 	 * only an optimization. We drop the locks, so another waiter
943 	 * can come in before the chain walk detects the deadlock. So
944 	 * the other will detect the deadlock and return -EDEADLOCK,
945 	 * which is wrong, as the other waiter is not in a deadlock
946 	 * situation.
947 	 */
948 	if (owner == task)
949 		return -EDEADLK;
950 
951 	raw_spin_lock(&task->pi_lock);
952 	waiter->task = task;
953 	waiter->lock = lock;
954 	waiter->prio = task->prio;
955 	waiter->deadline = task->dl.deadline;
956 
957 	/* Get the top priority waiter on the lock */
958 	if (rt_mutex_has_waiters(lock))
959 		top_waiter = rt_mutex_top_waiter(lock);
960 	rt_mutex_enqueue(lock, waiter);
961 
962 	task->pi_blocked_on = waiter;
963 
964 	raw_spin_unlock(&task->pi_lock);
965 
966 	if (!owner)
967 		return 0;
968 
969 	raw_spin_lock(&owner->pi_lock);
970 	if (waiter == rt_mutex_top_waiter(lock)) {
971 		rt_mutex_dequeue_pi(owner, top_waiter);
972 		rt_mutex_enqueue_pi(owner, waiter);
973 
974 		rt_mutex_adjust_prio(owner);
975 		if (owner->pi_blocked_on)
976 			chain_walk = 1;
977 	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
978 		chain_walk = 1;
979 	}
980 
981 	/* Store the lock on which owner is blocked or NULL */
982 	next_lock = task_blocked_on_lock(owner);
983 
984 	raw_spin_unlock(&owner->pi_lock);
985 	/*
986 	 * Even if full deadlock detection is on, if the owner is not
987 	 * blocked itself, we can avoid finding this out in the chain
988 	 * walk.
989 	 */
990 	if (!chain_walk || !next_lock)
991 		return 0;
992 
993 	/*
994 	 * The owner can't disappear while holding a lock,
995 	 * so the owner struct is protected by wait_lock.
996 	 * Gets dropped in rt_mutex_adjust_prio_chain()!
997 	 */
998 	get_task_struct(owner);
999 
1000 	raw_spin_unlock_irq(&lock->wait_lock);
1001 
1002 	res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1003 					 next_lock, waiter, task);
1004 
1005 	raw_spin_lock_irq(&lock->wait_lock);
1006 
1007 	return res;
1008 }
1009 
1010 /*
1011  * Remove the top waiter from the current tasks pi waiter tree and
1012  * queue it up.
1013  *
1014  * Called with lock->wait_lock held and interrupts disabled.
1015  */
mark_wakeup_next_waiter(struct wake_q_head * wake_q,struct rt_mutex * lock)1016 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1017 				    struct rt_mutex *lock)
1018 {
1019 	struct rt_mutex_waiter *waiter;
1020 
1021 	raw_spin_lock(&current->pi_lock);
1022 
1023 	waiter = rt_mutex_top_waiter(lock);
1024 
1025 	/*
1026 	 * Remove it from current->pi_waiters and deboost.
1027 	 *
1028 	 * We must in fact deboost here in order to ensure we call
1029 	 * rt_mutex_setprio() to update p->pi_top_task before the
1030 	 * task unblocks.
1031 	 */
1032 	rt_mutex_dequeue_pi(current, waiter);
1033 	rt_mutex_adjust_prio(current);
1034 
1035 	/*
1036 	 * As we are waking up the top waiter, and the waiter stays
1037 	 * queued on the lock until it gets the lock, this lock
1038 	 * obviously has waiters. Just set the bit here and this has
1039 	 * the added benefit of forcing all new tasks into the
1040 	 * slow path making sure no task of lower priority than
1041 	 * the top waiter can steal this lock.
1042 	 */
1043 	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1044 
1045 	/*
1046 	 * We deboosted before waking the top waiter task such that we don't
1047 	 * run two tasks with the 'same' priority (and ensure the
1048 	 * p->pi_top_task pointer points to a blocked task). This however can
1049 	 * lead to priority inversion if we would get preempted after the
1050 	 * deboost but before waking our donor task, hence the preempt_disable()
1051 	 * before unlock.
1052 	 *
1053 	 * Pairs with preempt_enable() in rt_mutex_postunlock();
1054 	 */
1055 	preempt_disable();
1056 	wake_q_add(wake_q, waiter->task);
1057 	raw_spin_unlock(&current->pi_lock);
1058 }
1059 
1060 /*
1061  * Remove a waiter from a lock and give up
1062  *
1063  * Must be called with lock->wait_lock held and interrupts disabled. I must
1064  * have just failed to try_to_take_rt_mutex().
1065  */
remove_waiter(struct rt_mutex * lock,struct rt_mutex_waiter * waiter)1066 static void remove_waiter(struct rt_mutex *lock,
1067 			  struct rt_mutex_waiter *waiter)
1068 {
1069 	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1070 	struct task_struct *owner = rt_mutex_owner(lock);
1071 	struct rt_mutex *next_lock;
1072 
1073 	lockdep_assert_held(&lock->wait_lock);
1074 
1075 	raw_spin_lock(&current->pi_lock);
1076 	rt_mutex_dequeue(lock, waiter);
1077 	current->pi_blocked_on = NULL;
1078 	raw_spin_unlock(&current->pi_lock);
1079 
1080 	/*
1081 	 * Only update priority if the waiter was the highest priority
1082 	 * waiter of the lock and there is an owner to update.
1083 	 */
1084 	if (!owner || !is_top_waiter)
1085 		return;
1086 
1087 	raw_spin_lock(&owner->pi_lock);
1088 
1089 	rt_mutex_dequeue_pi(owner, waiter);
1090 
1091 	if (rt_mutex_has_waiters(lock))
1092 		rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1093 
1094 	rt_mutex_adjust_prio(owner);
1095 
1096 	/* Store the lock on which owner is blocked or NULL */
1097 	next_lock = task_blocked_on_lock(owner);
1098 
1099 	raw_spin_unlock(&owner->pi_lock);
1100 
1101 	/*
1102 	 * Don't walk the chain, if the owner task is not blocked
1103 	 * itself.
1104 	 */
1105 	if (!next_lock)
1106 		return;
1107 
1108 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
1109 	get_task_struct(owner);
1110 
1111 	raw_spin_unlock_irq(&lock->wait_lock);
1112 
1113 	rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1114 				   next_lock, NULL, current);
1115 
1116 	raw_spin_lock_irq(&lock->wait_lock);
1117 }
1118 
1119 /*
1120  * Recheck the pi chain, in case we got a priority setting
1121  *
1122  * Called from sched_setscheduler
1123  */
rt_mutex_adjust_pi(struct task_struct * task)1124 void rt_mutex_adjust_pi(struct task_struct *task)
1125 {
1126 	struct rt_mutex_waiter *waiter;
1127 	struct rt_mutex *next_lock;
1128 	unsigned long flags;
1129 
1130 	raw_spin_lock_irqsave(&task->pi_lock, flags);
1131 
1132 	waiter = task->pi_blocked_on;
1133 	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
1134 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1135 		return;
1136 	}
1137 	next_lock = waiter->lock;
1138 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1139 
1140 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
1141 	get_task_struct(task);
1142 
1143 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1144 				   next_lock, NULL, task);
1145 }
1146 
rt_mutex_init_waiter(struct rt_mutex_waiter * waiter)1147 void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
1148 {
1149 	debug_rt_mutex_init_waiter(waiter);
1150 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
1151 	RB_CLEAR_NODE(&waiter->tree_entry);
1152 	waiter->task = NULL;
1153 }
1154 
1155 /**
1156  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1157  * @lock:		 the rt_mutex to take
1158  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
1159  *			 or TASK_UNINTERRUPTIBLE)
1160  * @timeout:		 the pre-initialized and started timer, or NULL for none
1161  * @waiter:		 the pre-initialized rt_mutex_waiter
1162  *
1163  * Must be called with lock->wait_lock held and interrupts disabled
1164  */
1165 static int __sched
__rt_mutex_slowlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,struct rt_mutex_waiter * waiter)1166 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1167 		    struct hrtimer_sleeper *timeout,
1168 		    struct rt_mutex_waiter *waiter)
1169 {
1170 	int ret = 0;
1171 
1172 	trace_android_vh_rtmutex_wait_start(lock);
1173 	for (;;) {
1174 		/* Try to acquire the lock: */
1175 		if (try_to_take_rt_mutex(lock, current, waiter))
1176 			break;
1177 
1178 		/*
1179 		 * TASK_INTERRUPTIBLE checks for signals and
1180 		 * timeout. Ignored otherwise.
1181 		 */
1182 		if (likely(state == TASK_INTERRUPTIBLE)) {
1183 			/* Signal pending? */
1184 			if (signal_pending(current))
1185 				ret = -EINTR;
1186 			if (timeout && !timeout->task)
1187 				ret = -ETIMEDOUT;
1188 			if (ret)
1189 				break;
1190 		}
1191 
1192 		raw_spin_unlock_irq(&lock->wait_lock);
1193 
1194 		debug_rt_mutex_print_deadlock(waiter);
1195 
1196 		schedule();
1197 
1198 		raw_spin_lock_irq(&lock->wait_lock);
1199 		set_current_state(state);
1200 	}
1201 
1202 	trace_android_vh_rtmutex_wait_finish(lock);
1203 	__set_current_state(TASK_RUNNING);
1204 	return ret;
1205 }
1206 
rt_mutex_handle_deadlock(int res,int detect_deadlock,struct rt_mutex_waiter * w)1207 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1208 				     struct rt_mutex_waiter *w)
1209 {
1210 	/*
1211 	 * If the result is not -EDEADLOCK or the caller requested
1212 	 * deadlock detection, nothing to do here.
1213 	 */
1214 	if (res != -EDEADLOCK || detect_deadlock)
1215 		return;
1216 
1217 	/*
1218 	 * Yell lowdly and stop the task right here.
1219 	 */
1220 	rt_mutex_print_deadlock(w);
1221 	while (1) {
1222 		set_current_state(TASK_INTERRUPTIBLE);
1223 		schedule();
1224 	}
1225 }
1226 
1227 /*
1228  * Slow path lock function:
1229  */
1230 static int __sched
rt_mutex_slowlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,enum rtmutex_chainwalk chwalk)1231 rt_mutex_slowlock(struct rt_mutex *lock, int state,
1232 		  struct hrtimer_sleeper *timeout,
1233 		  enum rtmutex_chainwalk chwalk)
1234 {
1235 	struct rt_mutex_waiter waiter;
1236 	unsigned long flags;
1237 	int ret = 0;
1238 
1239 	rt_mutex_init_waiter(&waiter);
1240 
1241 	/*
1242 	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1243 	 * be called in early boot if the cmpxchg() fast path is disabled
1244 	 * (debug, no architecture support). In this case we will acquire the
1245 	 * rtmutex with lock->wait_lock held. But we cannot unconditionally
1246 	 * enable interrupts in that early boot case. So we need to use the
1247 	 * irqsave/restore variants.
1248 	 */
1249 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1250 
1251 	/* Try to acquire the lock again: */
1252 	if (try_to_take_rt_mutex(lock, current, NULL)) {
1253 		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1254 		return 0;
1255 	}
1256 
1257 	set_current_state(state);
1258 
1259 	/* Setup the timer, when timeout != NULL */
1260 	if (unlikely(timeout))
1261 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1262 
1263 	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1264 
1265 	if (likely(!ret))
1266 		/* sleep on the mutex */
1267 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1268 
1269 	if (unlikely(ret)) {
1270 		__set_current_state(TASK_RUNNING);
1271 		remove_waiter(lock, &waiter);
1272 		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1273 	}
1274 
1275 	/*
1276 	 * try_to_take_rt_mutex() sets the waiter bit
1277 	 * unconditionally. We might have to fix that up.
1278 	 */
1279 	fixup_rt_mutex_waiters(lock);
1280 
1281 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1282 
1283 	/* Remove pending timer: */
1284 	if (unlikely(timeout))
1285 		hrtimer_cancel(&timeout->timer);
1286 
1287 	debug_rt_mutex_free_waiter(&waiter);
1288 
1289 	return ret;
1290 }
1291 
__rt_mutex_slowtrylock(struct rt_mutex * lock)1292 static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1293 {
1294 	int ret = try_to_take_rt_mutex(lock, current, NULL);
1295 
1296 	/*
1297 	 * try_to_take_rt_mutex() sets the lock waiters bit
1298 	 * unconditionally. Clean this up.
1299 	 */
1300 	fixup_rt_mutex_waiters(lock);
1301 
1302 	return ret;
1303 }
1304 
1305 /*
1306  * Slow path try-lock function:
1307  */
rt_mutex_slowtrylock(struct rt_mutex * lock)1308 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1309 {
1310 	unsigned long flags;
1311 	int ret;
1312 
1313 	/*
1314 	 * If the lock already has an owner we fail to get the lock.
1315 	 * This can be done without taking the @lock->wait_lock as
1316 	 * it is only being read, and this is a trylock anyway.
1317 	 */
1318 	if (rt_mutex_owner(lock))
1319 		return 0;
1320 
1321 	/*
1322 	 * The mutex has currently no owner. Lock the wait lock and try to
1323 	 * acquire the lock. We use irqsave here to support early boot calls.
1324 	 */
1325 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1326 
1327 	ret = __rt_mutex_slowtrylock(lock);
1328 
1329 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1330 
1331 	return ret;
1332 }
1333 
1334 /*
1335  * Slow path to release a rt-mutex.
1336  *
1337  * Return whether the current task needs to call rt_mutex_postunlock().
1338  */
rt_mutex_slowunlock(struct rt_mutex * lock,struct wake_q_head * wake_q)1339 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1340 					struct wake_q_head *wake_q)
1341 {
1342 	unsigned long flags;
1343 
1344 	/* irqsave required to support early boot calls */
1345 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1346 
1347 	debug_rt_mutex_unlock(lock);
1348 
1349 	/*
1350 	 * We must be careful here if the fast path is enabled. If we
1351 	 * have no waiters queued we cannot set owner to NULL here
1352 	 * because of:
1353 	 *
1354 	 * foo->lock->owner = NULL;
1355 	 *			rtmutex_lock(foo->lock);   <- fast path
1356 	 *			free = atomic_dec_and_test(foo->refcnt);
1357 	 *			rtmutex_unlock(foo->lock); <- fast path
1358 	 *			if (free)
1359 	 *				kfree(foo);
1360 	 * raw_spin_unlock(foo->lock->wait_lock);
1361 	 *
1362 	 * So for the fastpath enabled kernel:
1363 	 *
1364 	 * Nothing can set the waiters bit as long as we hold
1365 	 * lock->wait_lock. So we do the following sequence:
1366 	 *
1367 	 *	owner = rt_mutex_owner(lock);
1368 	 *	clear_rt_mutex_waiters(lock);
1369 	 *	raw_spin_unlock(&lock->wait_lock);
1370 	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
1371 	 *		return;
1372 	 *	goto retry;
1373 	 *
1374 	 * The fastpath disabled variant is simple as all access to
1375 	 * lock->owner is serialized by lock->wait_lock:
1376 	 *
1377 	 *	lock->owner = NULL;
1378 	 *	raw_spin_unlock(&lock->wait_lock);
1379 	 */
1380 	while (!rt_mutex_has_waiters(lock)) {
1381 		/* Drops lock->wait_lock ! */
1382 		if (unlock_rt_mutex_safe(lock, flags) == true)
1383 			return false;
1384 		/* Relock the rtmutex and try again */
1385 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
1386 	}
1387 
1388 	/*
1389 	 * The wakeup next waiter path does not suffer from the above
1390 	 * race. See the comments there.
1391 	 *
1392 	 * Queue the next waiter for wakeup once we release the wait_lock.
1393 	 */
1394 	mark_wakeup_next_waiter(wake_q, lock);
1395 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1396 
1397 	return true; /* call rt_mutex_postunlock() */
1398 }
1399 
1400 /*
1401  * debug aware fast / slowpath lock,trylock,unlock
1402  *
1403  * The atomic acquire/release ops are compiled away, when either the
1404  * architecture does not support cmpxchg or when debugging is enabled.
1405  */
1406 static inline int
rt_mutex_fastlock(struct rt_mutex * lock,int state,int (* slowfn)(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,enum rtmutex_chainwalk chwalk))1407 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1408 		  int (*slowfn)(struct rt_mutex *lock, int state,
1409 				struct hrtimer_sleeper *timeout,
1410 				enum rtmutex_chainwalk chwalk))
1411 {
1412 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1413 		return 0;
1414 
1415 	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1416 }
1417 
1418 static inline int
rt_mutex_timed_fastlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,enum rtmutex_chainwalk chwalk,int (* slowfn)(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,enum rtmutex_chainwalk chwalk))1419 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1420 			struct hrtimer_sleeper *timeout,
1421 			enum rtmutex_chainwalk chwalk,
1422 			int (*slowfn)(struct rt_mutex *lock, int state,
1423 				      struct hrtimer_sleeper *timeout,
1424 				      enum rtmutex_chainwalk chwalk))
1425 {
1426 	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1427 	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1428 		return 0;
1429 
1430 	return slowfn(lock, state, timeout, chwalk);
1431 }
1432 
1433 static inline int
rt_mutex_fasttrylock(struct rt_mutex * lock,int (* slowfn)(struct rt_mutex * lock))1434 rt_mutex_fasttrylock(struct rt_mutex *lock,
1435 		     int (*slowfn)(struct rt_mutex *lock))
1436 {
1437 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1438 		return 1;
1439 
1440 	return slowfn(lock);
1441 }
1442 
1443 /*
1444  * Performs the wakeup of the top-waiter and re-enables preemption.
1445  */
rt_mutex_postunlock(struct wake_q_head * wake_q)1446 void rt_mutex_postunlock(struct wake_q_head *wake_q)
1447 {
1448 	wake_up_q(wake_q);
1449 
1450 	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
1451 	preempt_enable();
1452 }
1453 
1454 static inline void
rt_mutex_fastunlock(struct rt_mutex * lock,bool (* slowfn)(struct rt_mutex * lock,struct wake_q_head * wqh))1455 rt_mutex_fastunlock(struct rt_mutex *lock,
1456 		    bool (*slowfn)(struct rt_mutex *lock,
1457 				   struct wake_q_head *wqh))
1458 {
1459 	DEFINE_WAKE_Q(wake_q);
1460 
1461 	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1462 		return;
1463 
1464 	if (slowfn(lock, &wake_q))
1465 		rt_mutex_postunlock(&wake_q);
1466 }
1467 
__rt_mutex_lock(struct rt_mutex * lock,unsigned int subclass)1468 static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
1469 {
1470 	might_sleep();
1471 
1472 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1473 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1474 	trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
1475 }
1476 
1477 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1478 /**
1479  * rt_mutex_lock_nested - lock a rt_mutex
1480  *
1481  * @lock: the rt_mutex to be locked
1482  * @subclass: the lockdep subclass
1483  */
rt_mutex_lock_nested(struct rt_mutex * lock,unsigned int subclass)1484 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
1485 {
1486 	__rt_mutex_lock(lock, subclass);
1487 }
1488 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
1489 
1490 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
1491 
1492 /**
1493  * rt_mutex_lock - lock a rt_mutex
1494  *
1495  * @lock: the rt_mutex to be locked
1496  */
rt_mutex_lock(struct rt_mutex * lock)1497 void __sched rt_mutex_lock(struct rt_mutex *lock)
1498 {
1499 	__rt_mutex_lock(lock, 0);
1500 }
1501 EXPORT_SYMBOL_GPL(rt_mutex_lock);
1502 #endif
1503 
1504 /**
1505  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1506  *
1507  * @lock:		the rt_mutex to be locked
1508  *
1509  * Returns:
1510  *  0		on success
1511  * -EINTR	when interrupted by a signal
1512  */
rt_mutex_lock_interruptible(struct rt_mutex * lock)1513 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1514 {
1515 	int ret;
1516 
1517 	might_sleep();
1518 
1519 	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1520 	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1521 	if (ret)
1522 		mutex_release(&lock->dep_map, _RET_IP_);
1523 	else
1524 		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
1525 
1526 	return ret;
1527 }
1528 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1529 
1530 /*
1531  * Futex variant, must not use fastpath.
1532  */
rt_mutex_futex_trylock(struct rt_mutex * lock)1533 int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1534 {
1535 	return rt_mutex_slowtrylock(lock);
1536 }
1537 
__rt_mutex_futex_trylock(struct rt_mutex * lock)1538 int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1539 {
1540 	return __rt_mutex_slowtrylock(lock);
1541 }
1542 
1543 /**
1544  * rt_mutex_timed_lock - lock a rt_mutex interruptible
1545  *			the timeout structure is provided
1546  *			by the caller
1547  *
1548  * @lock:		the rt_mutex to be locked
1549  * @timeout:		timeout structure or NULL (no timeout)
1550  *
1551  * Returns:
1552  *  0		on success
1553  * -EINTR	when interrupted by a signal
1554  * -ETIMEDOUT	when the timeout expired
1555  */
1556 int
rt_mutex_timed_lock(struct rt_mutex * lock,struct hrtimer_sleeper * timeout)1557 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1558 {
1559 	int ret;
1560 
1561 	might_sleep();
1562 
1563 	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1564 	ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1565 				       RT_MUTEX_MIN_CHAINWALK,
1566 				       rt_mutex_slowlock);
1567 	if (ret)
1568 		mutex_release(&lock->dep_map, _RET_IP_);
1569 	else
1570 		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
1571 
1572 	return ret;
1573 }
1574 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1575 
1576 /**
1577  * rt_mutex_trylock - try to lock a rt_mutex
1578  *
1579  * @lock:	the rt_mutex to be locked
1580  *
1581  * This function can only be called in thread context. It's safe to
1582  * call it from atomic regions, but not from hard interrupt or soft
1583  * interrupt context.
1584  *
1585  * Returns 1 on success and 0 on contention
1586  */
rt_mutex_trylock(struct rt_mutex * lock)1587 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1588 {
1589 	int ret;
1590 
1591 	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
1592 		return 0;
1593 
1594 	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1595 	if (ret)
1596 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1597 	else
1598 		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
1599 
1600 	return ret;
1601 }
1602 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1603 
1604 /**
1605  * rt_mutex_unlock - unlock a rt_mutex
1606  *
1607  * @lock: the rt_mutex to be unlocked
1608  */
rt_mutex_unlock(struct rt_mutex * lock)1609 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1610 {
1611 	mutex_release(&lock->dep_map, _RET_IP_);
1612 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1613 	trace_android_vh_record_rtmutex_lock_starttime(current, 0);
1614 }
1615 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1616 
1617 /**
1618  * Futex variant, that since futex variants do not use the fast-path, can be
1619  * simple and will not need to retry.
1620  */
__rt_mutex_futex_unlock(struct rt_mutex * lock,struct wake_q_head * wake_q)1621 bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1622 				    struct wake_q_head *wake_q)
1623 {
1624 	lockdep_assert_held(&lock->wait_lock);
1625 
1626 	debug_rt_mutex_unlock(lock);
1627 
1628 	if (!rt_mutex_has_waiters(lock)) {
1629 		lock->owner = NULL;
1630 		return false; /* done */
1631 	}
1632 
1633 	/*
1634 	 * We've already deboosted, mark_wakeup_next_waiter() will
1635 	 * retain preempt_disabled when we drop the wait_lock, to
1636 	 * avoid inversion prior to the wakeup.  preempt_disable()
1637 	 * therein pairs with rt_mutex_postunlock().
1638 	 */
1639 	mark_wakeup_next_waiter(wake_q, lock);
1640 
1641 	return true; /* call postunlock() */
1642 }
1643 
rt_mutex_futex_unlock(struct rt_mutex * lock)1644 void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1645 {
1646 	DEFINE_WAKE_Q(wake_q);
1647 	unsigned long flags;
1648 	bool postunlock;
1649 
1650 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1651 	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1652 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1653 
1654 	if (postunlock)
1655 		rt_mutex_postunlock(&wake_q);
1656 }
1657 
1658 /**
1659  * rt_mutex_destroy - mark a mutex unusable
1660  * @lock: the mutex to be destroyed
1661  *
1662  * This function marks the mutex uninitialized, and any subsequent
1663  * use of the mutex is forbidden. The mutex must not be locked when
1664  * this function is called.
1665  */
rt_mutex_destroy(struct rt_mutex * lock)1666 void rt_mutex_destroy(struct rt_mutex *lock)
1667 {
1668 	WARN_ON(rt_mutex_is_locked(lock));
1669 #ifdef CONFIG_DEBUG_RT_MUTEXES
1670 	lock->magic = NULL;
1671 #endif
1672 }
1673 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1674 
1675 /**
1676  * __rt_mutex_init - initialize the rt lock
1677  *
1678  * @lock: the rt lock to be initialized
1679  *
1680  * Initialize the rt lock to unlocked state.
1681  *
1682  * Initializing of a locked rt lock is not allowed
1683  */
__rt_mutex_init(struct rt_mutex * lock,const char * name,struct lock_class_key * key)1684 void __rt_mutex_init(struct rt_mutex *lock, const char *name,
1685 		     struct lock_class_key *key)
1686 {
1687 	lock->owner = NULL;
1688 	raw_spin_lock_init(&lock->wait_lock);
1689 	lock->waiters = RB_ROOT_CACHED;
1690 
1691 	if (name && key)
1692 		debug_rt_mutex_init(lock, name, key);
1693 }
1694 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1695 
1696 /**
1697  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1698  *				proxy owner
1699  *
1700  * @lock:	the rt_mutex to be locked
1701  * @proxy_owner:the task to set as owner
1702  *
1703  * No locking. Caller has to do serializing itself
1704  *
1705  * Special API call for PI-futex support. This initializes the rtmutex and
1706  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
1707  * possible at this point because the pi_state which contains the rtmutex
1708  * is not yet visible to other tasks.
1709  */
rt_mutex_init_proxy_locked(struct rt_mutex * lock,struct task_struct * proxy_owner)1710 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1711 				struct task_struct *proxy_owner)
1712 {
1713 	__rt_mutex_init(lock, NULL, NULL);
1714 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
1715 	rt_mutex_set_owner(lock, proxy_owner);
1716 }
1717 
1718 /**
1719  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1720  *
1721  * @lock:	the rt_mutex to be locked
1722  *
1723  * No locking. Caller has to do serializing itself
1724  *
1725  * Special API call for PI-futex support. This merrily cleans up the rtmutex
1726  * (debugging) state. Concurrent operations on this rt_mutex are not
1727  * possible because it belongs to the pi_state which is about to be freed
1728  * and it is not longer visible to other tasks.
1729  */
rt_mutex_proxy_unlock(struct rt_mutex * lock)1730 void rt_mutex_proxy_unlock(struct rt_mutex *lock)
1731 {
1732 	debug_rt_mutex_proxy_unlock(lock);
1733 	rt_mutex_set_owner(lock, NULL);
1734 }
1735 
1736 /**
1737  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1738  * @lock:		the rt_mutex to take
1739  * @waiter:		the pre-initialized rt_mutex_waiter
1740  * @task:		the task to prepare
1741  *
1742  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
1743  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
1744  *
1745  * NOTE: does _NOT_ remove the @waiter on failure; must either call
1746  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
1747  *
1748  * Returns:
1749  *  0 - task blocked on lock
1750  *  1 - acquired the lock for task, caller should wake it up
1751  * <0 - error
1752  *
1753  * Special API call for PI-futex support.
1754  */
__rt_mutex_start_proxy_lock(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct task_struct * task)1755 int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1756 			      struct rt_mutex_waiter *waiter,
1757 			      struct task_struct *task)
1758 {
1759 	int ret;
1760 
1761 	lockdep_assert_held(&lock->wait_lock);
1762 
1763 	if (try_to_take_rt_mutex(lock, task, NULL))
1764 		return 1;
1765 
1766 	/* We enforce deadlock detection for futexes */
1767 	ret = task_blocks_on_rt_mutex(lock, waiter, task,
1768 				      RT_MUTEX_FULL_CHAINWALK);
1769 
1770 	if (ret && !rt_mutex_owner(lock)) {
1771 		/*
1772 		 * Reset the return value. We might have
1773 		 * returned with -EDEADLK and the owner
1774 		 * released the lock while we were walking the
1775 		 * pi chain.  Let the waiter sort it out.
1776 		 */
1777 		ret = 0;
1778 	}
1779 
1780 	debug_rt_mutex_print_deadlock(waiter);
1781 
1782 	return ret;
1783 }
1784 
1785 /**
1786  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1787  * @lock:		the rt_mutex to take
1788  * @waiter:		the pre-initialized rt_mutex_waiter
1789  * @task:		the task to prepare
1790  *
1791  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
1792  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
1793  *
1794  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
1795  * on failure.
1796  *
1797  * Returns:
1798  *  0 - task blocked on lock
1799  *  1 - acquired the lock for task, caller should wake it up
1800  * <0 - error
1801  *
1802  * Special API call for PI-futex support.
1803  */
rt_mutex_start_proxy_lock(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct task_struct * task)1804 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1805 			      struct rt_mutex_waiter *waiter,
1806 			      struct task_struct *task)
1807 {
1808 	int ret;
1809 
1810 	raw_spin_lock_irq(&lock->wait_lock);
1811 	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
1812 	if (unlikely(ret))
1813 		remove_waiter(lock, waiter);
1814 	raw_spin_unlock_irq(&lock->wait_lock);
1815 
1816 	return ret;
1817 }
1818 
1819 /**
1820  * rt_mutex_next_owner - return the next owner of the lock
1821  *
1822  * @lock: the rt lock query
1823  *
1824  * Returns the next owner of the lock or NULL
1825  *
1826  * Caller has to serialize against other accessors to the lock
1827  * itself.
1828  *
1829  * Special API call for PI-futex support
1830  */
rt_mutex_next_owner(struct rt_mutex * lock)1831 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1832 {
1833 	if (!rt_mutex_has_waiters(lock))
1834 		return NULL;
1835 
1836 	return rt_mutex_top_waiter(lock)->task;
1837 }
1838 
1839 /**
1840  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
1841  * @lock:		the rt_mutex we were woken on
1842  * @to:			the timeout, null if none. hrtimer should already have
1843  *			been started.
1844  * @waiter:		the pre-initialized rt_mutex_waiter
1845  *
1846  * Wait for the lock acquisition started on our behalf by
1847  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
1848  * rt_mutex_cleanup_proxy_lock().
1849  *
1850  * Returns:
1851  *  0 - success
1852  * <0 - error, one of -EINTR, -ETIMEDOUT
1853  *
1854  * Special API call for PI-futex support
1855  */
rt_mutex_wait_proxy_lock(struct rt_mutex * lock,struct hrtimer_sleeper * to,struct rt_mutex_waiter * waiter)1856 int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1857 			       struct hrtimer_sleeper *to,
1858 			       struct rt_mutex_waiter *waiter)
1859 {
1860 	int ret;
1861 
1862 	raw_spin_lock_irq(&lock->wait_lock);
1863 	/* sleep on the mutex */
1864 	set_current_state(TASK_INTERRUPTIBLE);
1865 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1866 	/*
1867 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1868 	 * have to fix that up.
1869 	 */
1870 	fixup_rt_mutex_waiters(lock);
1871 	raw_spin_unlock_irq(&lock->wait_lock);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
1878  * @lock:		the rt_mutex we were woken on
1879  * @waiter:		the pre-initialized rt_mutex_waiter
1880  *
1881  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
1882  * rt_mutex_wait_proxy_lock().
1883  *
1884  * Unless we acquired the lock; we're still enqueued on the wait-list and can
1885  * in fact still be granted ownership until we're removed. Therefore we can
1886  * find we are in fact the owner and must disregard the
1887  * rt_mutex_wait_proxy_lock() failure.
1888  *
1889  * Returns:
1890  *  true  - did the cleanup, we done.
1891  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
1892  *          caller should disregards its return value.
1893  *
1894  * Special API call for PI-futex support
1895  */
rt_mutex_cleanup_proxy_lock(struct rt_mutex * lock,struct rt_mutex_waiter * waiter)1896 bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1897 				 struct rt_mutex_waiter *waiter)
1898 {
1899 	bool cleanup = false;
1900 
1901 	raw_spin_lock_irq(&lock->wait_lock);
1902 	/*
1903 	 * Do an unconditional try-lock, this deals with the lock stealing
1904 	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
1905 	 * sets a NULL owner.
1906 	 *
1907 	 * We're not interested in the return value, because the subsequent
1908 	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
1909 	 * we will own the lock and it will have removed the waiter. If we
1910 	 * failed the trylock, we're still not owner and we need to remove
1911 	 * ourselves.
1912 	 */
1913 	try_to_take_rt_mutex(lock, current, waiter);
1914 	/*
1915 	 * Unless we're the owner; we're still enqueued on the wait_list.
1916 	 * So check if we became owner, if not, take us off the wait_list.
1917 	 */
1918 	if (rt_mutex_owner(lock) != current) {
1919 		remove_waiter(lock, waiter);
1920 		cleanup = true;
1921 	}
1922 	/*
1923 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1924 	 * have to fix that up.
1925 	 */
1926 	fixup_rt_mutex_waiters(lock);
1927 
1928 	raw_spin_unlock_irq(&lock->wait_lock);
1929 
1930 	return cleanup;
1931 }
1932