• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 
18 #include "rtmutex_common.h"
19 
20 /*
21  * lock->owner state tracking:
22  *
23  * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
24  * are used to keep track of the "owner is pending" and "lock has
25  * waiters" state.
26  *
27  * owner	bit1	bit0
28  * NULL		0	0	lock is free (fast acquire possible)
29  * NULL		0	1	invalid state
30  * NULL		1	0	Transitional State*
31  * NULL		1	1	invalid state
32  * taskpointer	0	0	lock is held (fast release possible)
33  * taskpointer	0	1	task is pending owner
34  * taskpointer	1	0	lock is held and has waiters
35  * taskpointer	1	1	task is pending owner and lock has more waiters
36  *
37  * Pending ownership is assigned to the top (highest priority)
38  * waiter of the lock, when the lock is released. The thread is woken
39  * up and can now take the lock. Until the lock is taken (bit 0
40  * cleared) a competing higher priority thread can steal the lock
41  * which puts the woken up thread back on the waiters list.
42  *
43  * The fast atomic compare exchange based acquire and release is only
44  * possible when bit 0 and 1 of lock->owner are 0.
45  *
46  * (*) There's a small time where the owner can be NULL and the
47  * "lock has waiters" bit is set.  This can happen when grabbing the lock.
48  * To prevent a cmpxchg of the owner releasing the lock, we need to set this
49  * bit before looking at the lock, hence the reason this is a transitional
50  * state.
51  */
52 
53 static void
rt_mutex_set_owner(struct rt_mutex * lock,struct task_struct * owner,unsigned long mask)54 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
55 		   unsigned long mask)
56 {
57 	unsigned long val = (unsigned long)owner | mask;
58 
59 	if (rt_mutex_has_waiters(lock))
60 		val |= RT_MUTEX_HAS_WAITERS;
61 
62 	lock->owner = (struct task_struct *)val;
63 }
64 
clear_rt_mutex_waiters(struct rt_mutex * lock)65 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
66 {
67 	lock->owner = (struct task_struct *)
68 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
69 }
70 
fixup_rt_mutex_waiters(struct rt_mutex * lock)71 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
72 {
73 	if (!rt_mutex_has_waiters(lock))
74 		clear_rt_mutex_waiters(lock);
75 }
76 
77 /*
78  * We can speed up the acquire/release, if the architecture
79  * supports cmpxchg and if there's no debugging state to be set up
80  */
81 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
82 # define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
mark_rt_mutex_waiters(struct rt_mutex * lock)83 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
84 {
85 	unsigned long owner, *p = (unsigned long *) &lock->owner;
86 
87 	do {
88 		owner = *p;
89 	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
90 }
91 #else
92 # define rt_mutex_cmpxchg(l,c,n)	(0)
mark_rt_mutex_waiters(struct rt_mutex * lock)93 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
94 {
95 	lock->owner = (struct task_struct *)
96 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
97 }
98 #endif
99 
100 /*
101  * Calculate task priority from the waiter list priority
102  *
103  * Return task->normal_prio when the waiter list is empty or when
104  * the waiter is not allowed to do priority boosting
105  */
rt_mutex_getprio(struct task_struct * task)106 int rt_mutex_getprio(struct task_struct *task)
107 {
108 	if (likely(!task_has_pi_waiters(task)))
109 		return task->normal_prio;
110 
111 	return min(task_top_pi_waiter(task)->pi_list_entry.prio,
112 		   task->normal_prio);
113 }
114 
115 /*
116  * Adjust the priority of a task, after its pi_waiters got modified.
117  *
118  * This can be both boosting and unboosting. task->pi_lock must be held.
119  */
__rt_mutex_adjust_prio(struct task_struct * task)120 static void __rt_mutex_adjust_prio(struct task_struct *task)
121 {
122 	int prio = rt_mutex_getprio(task);
123 
124 	if (task->prio != prio)
125 		rt_mutex_setprio(task, prio);
126 }
127 
128 /*
129  * Adjust task priority (undo boosting). Called from the exit path of
130  * rt_mutex_slowunlock() and rt_mutex_slowlock().
131  *
132  * (Note: We do this outside of the protection of lock->wait_lock to
133  * allow the lock to be taken while or before we readjust the priority
134  * of task. We do not use the spin_xx_mutex() variants here as we are
135  * outside of the debug path.)
136  */
rt_mutex_adjust_prio(struct task_struct * task)137 static void rt_mutex_adjust_prio(struct task_struct *task)
138 {
139 	unsigned long flags;
140 
141 	spin_lock_irqsave(&task->pi_lock, flags);
142 	__rt_mutex_adjust_prio(task);
143 	spin_unlock_irqrestore(&task->pi_lock, flags);
144 }
145 
146 /*
147  * Max number of times we'll walk the boosting chain:
148  */
149 int max_lock_depth = 1024;
150 
151 /*
152  * Adjust the priority chain. Also used for deadlock detection.
153  * Decreases task's usage by one - may thus free the task.
154  * Returns 0 or -EDEADLK.
155  */
rt_mutex_adjust_prio_chain(struct task_struct * task,int deadlock_detect,struct rt_mutex * orig_lock,struct rt_mutex_waiter * orig_waiter,struct task_struct * top_task)156 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
157 				      int deadlock_detect,
158 				      struct rt_mutex *orig_lock,
159 				      struct rt_mutex_waiter *orig_waiter,
160 				      struct task_struct *top_task)
161 {
162 	struct rt_mutex *lock;
163 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
164 	int detect_deadlock, ret = 0, depth = 0;
165 	unsigned long flags;
166 
167 	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
168 							 deadlock_detect);
169 
170 	/*
171 	 * The (de)boosting is a step by step approach with a lot of
172 	 * pitfalls. We want this to be preemptible and we want hold a
173 	 * maximum of two locks per step. So we have to check
174 	 * carefully whether things change under us.
175 	 */
176  again:
177 	if (++depth > max_lock_depth) {
178 		static int prev_max;
179 
180 		/*
181 		 * Print this only once. If the admin changes the limit,
182 		 * print a new message when reaching the limit again.
183 		 */
184 		if (prev_max != max_lock_depth) {
185 			prev_max = max_lock_depth;
186 			printk(KERN_WARNING "Maximum lock depth %d reached "
187 			       "task: %s (%d)\n", max_lock_depth,
188 			       top_task->comm, task_pid_nr(top_task));
189 		}
190 		put_task_struct(task);
191 
192 		return deadlock_detect ? -EDEADLK : 0;
193 	}
194  retry:
195 	/*
196 	 * Task can not go away as we did a get_task() before !
197 	 */
198 	spin_lock_irqsave(&task->pi_lock, flags);
199 
200 	waiter = task->pi_blocked_on;
201 	/*
202 	 * Check whether the end of the boosting chain has been
203 	 * reached or the state of the chain has changed while we
204 	 * dropped the locks.
205 	 */
206 	if (!waiter || !waiter->task)
207 		goto out_unlock_pi;
208 
209 	/*
210 	 * Check the orig_waiter state. After we dropped the locks,
211 	 * the previous owner of the lock might have released the lock
212 	 * and made us the pending owner:
213 	 */
214 	if (orig_waiter && !orig_waiter->task)
215 		goto out_unlock_pi;
216 
217 	/*
218 	 * Drop out, when the task has no waiters. Note,
219 	 * top_waiter can be NULL, when we are in the deboosting
220 	 * mode!
221 	 */
222 	if (top_waiter && (!task_has_pi_waiters(task) ||
223 			   top_waiter != task_top_pi_waiter(task)))
224 		goto out_unlock_pi;
225 
226 	/*
227 	 * When deadlock detection is off then we check, if further
228 	 * priority adjustment is necessary.
229 	 */
230 	if (!detect_deadlock && waiter->list_entry.prio == task->prio)
231 		goto out_unlock_pi;
232 
233 	lock = waiter->lock;
234 	if (!spin_trylock(&lock->wait_lock)) {
235 		spin_unlock_irqrestore(&task->pi_lock, flags);
236 		cpu_relax();
237 		goto retry;
238 	}
239 
240 	/* Deadlock detection */
241 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
242 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
243 		spin_unlock(&lock->wait_lock);
244 		ret = deadlock_detect ? -EDEADLK : 0;
245 		goto out_unlock_pi;
246 	}
247 
248 	top_waiter = rt_mutex_top_waiter(lock);
249 
250 	/* Requeue the waiter */
251 	plist_del(&waiter->list_entry, &lock->wait_list);
252 	waiter->list_entry.prio = task->prio;
253 	plist_add(&waiter->list_entry, &lock->wait_list);
254 
255 	/* Release the task */
256 	spin_unlock_irqrestore(&task->pi_lock, flags);
257 	put_task_struct(task);
258 
259 	/* Grab the next task */
260 	task = rt_mutex_owner(lock);
261 	get_task_struct(task);
262 	spin_lock_irqsave(&task->pi_lock, flags);
263 
264 	if (waiter == rt_mutex_top_waiter(lock)) {
265 		/* Boost the owner */
266 		plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
267 		waiter->pi_list_entry.prio = waiter->list_entry.prio;
268 		plist_add(&waiter->pi_list_entry, &task->pi_waiters);
269 		__rt_mutex_adjust_prio(task);
270 
271 	} else if (top_waiter == waiter) {
272 		/* Deboost the owner */
273 		plist_del(&waiter->pi_list_entry, &task->pi_waiters);
274 		waiter = rt_mutex_top_waiter(lock);
275 		waiter->pi_list_entry.prio = waiter->list_entry.prio;
276 		plist_add(&waiter->pi_list_entry, &task->pi_waiters);
277 		__rt_mutex_adjust_prio(task);
278 	}
279 
280 	spin_unlock_irqrestore(&task->pi_lock, flags);
281 
282 	top_waiter = rt_mutex_top_waiter(lock);
283 	spin_unlock(&lock->wait_lock);
284 
285 	if (!detect_deadlock && waiter != top_waiter)
286 		goto out_put_task;
287 
288 	goto again;
289 
290  out_unlock_pi:
291 	spin_unlock_irqrestore(&task->pi_lock, flags);
292  out_put_task:
293 	put_task_struct(task);
294 
295 	return ret;
296 }
297 
298 /*
299  * Optimization: check if we can steal the lock from the
300  * assigned pending owner [which might not have taken the
301  * lock yet]:
302  */
try_to_steal_lock(struct rt_mutex * lock)303 static inline int try_to_steal_lock(struct rt_mutex *lock)
304 {
305 	struct task_struct *pendowner = rt_mutex_owner(lock);
306 	struct rt_mutex_waiter *next;
307 	unsigned long flags;
308 
309 	if (!rt_mutex_owner_pending(lock))
310 		return 0;
311 
312 	if (pendowner == current)
313 		return 1;
314 
315 	spin_lock_irqsave(&pendowner->pi_lock, flags);
316 	if (current->prio >= pendowner->prio) {
317 		spin_unlock_irqrestore(&pendowner->pi_lock, flags);
318 		return 0;
319 	}
320 
321 	/*
322 	 * Check if a waiter is enqueued on the pending owners
323 	 * pi_waiters list. Remove it and readjust pending owners
324 	 * priority.
325 	 */
326 	if (likely(!rt_mutex_has_waiters(lock))) {
327 		spin_unlock_irqrestore(&pendowner->pi_lock, flags);
328 		return 1;
329 	}
330 
331 	/* No chain handling, pending owner is not blocked on anything: */
332 	next = rt_mutex_top_waiter(lock);
333 	plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
334 	__rt_mutex_adjust_prio(pendowner);
335 	spin_unlock_irqrestore(&pendowner->pi_lock, flags);
336 
337 	/*
338 	 * We are going to steal the lock and a waiter was
339 	 * enqueued on the pending owners pi_waiters queue. So
340 	 * we have to enqueue this waiter into
341 	 * current->pi_waiters list. This covers the case,
342 	 * where current is boosted because it holds another
343 	 * lock and gets unboosted because the booster is
344 	 * interrupted, so we would delay a waiter with higher
345 	 * priority as current->normal_prio.
346 	 *
347 	 * Note: in the rare case of a SCHED_OTHER task changing
348 	 * its priority and thus stealing the lock, next->task
349 	 * might be current:
350 	 */
351 	if (likely(next->task != current)) {
352 		spin_lock_irqsave(&current->pi_lock, flags);
353 		plist_add(&next->pi_list_entry, &current->pi_waiters);
354 		__rt_mutex_adjust_prio(current);
355 		spin_unlock_irqrestore(&current->pi_lock, flags);
356 	}
357 	return 1;
358 }
359 
360 /*
361  * Try to take an rt-mutex
362  *
363  * This fails
364  * - when the lock has a real owner
365  * - when a different pending owner exists and has higher priority than current
366  *
367  * Must be called with lock->wait_lock held.
368  */
try_to_take_rt_mutex(struct rt_mutex * lock)369 static int try_to_take_rt_mutex(struct rt_mutex *lock)
370 {
371 	/*
372 	 * We have to be careful here if the atomic speedups are
373 	 * enabled, such that, when
374 	 *  - no other waiter is on the lock
375 	 *  - the lock has been released since we did the cmpxchg
376 	 * the lock can be released or taken while we are doing the
377 	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
378 	 *
379 	 * The atomic acquire/release aware variant of
380 	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
381 	 * the WAITERS bit, the atomic release / acquire can not
382 	 * happen anymore and lock->wait_lock protects us from the
383 	 * non-atomic case.
384 	 *
385 	 * Note, that this might set lock->owner =
386 	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
387 	 * any more. This is fixed up when we take the ownership.
388 	 * This is the transitional state explained at the top of this file.
389 	 */
390 	mark_rt_mutex_waiters(lock);
391 
392 	if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
393 		return 0;
394 
395 	/* We got the lock. */
396 	debug_rt_mutex_lock(lock);
397 
398 	rt_mutex_set_owner(lock, current, 0);
399 
400 	rt_mutex_deadlock_account_lock(lock, current);
401 
402 	return 1;
403 }
404 
405 /*
406  * Task blocks on lock.
407  *
408  * Prepare waiter and propagate pi chain
409  *
410  * This must be called with lock->wait_lock held.
411  */
task_blocks_on_rt_mutex(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,int detect_deadlock)412 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
413 				   struct rt_mutex_waiter *waiter,
414 				   int detect_deadlock)
415 {
416 	struct task_struct *owner = rt_mutex_owner(lock);
417 	struct rt_mutex_waiter *top_waiter = waiter;
418 	unsigned long flags;
419 	int chain_walk = 0, res;
420 
421 	spin_lock_irqsave(&current->pi_lock, flags);
422 	__rt_mutex_adjust_prio(current);
423 	waiter->task = current;
424 	waiter->lock = lock;
425 	plist_node_init(&waiter->list_entry, current->prio);
426 	plist_node_init(&waiter->pi_list_entry, current->prio);
427 
428 	/* Get the top priority waiter on the lock */
429 	if (rt_mutex_has_waiters(lock))
430 		top_waiter = rt_mutex_top_waiter(lock);
431 	plist_add(&waiter->list_entry, &lock->wait_list);
432 
433 	current->pi_blocked_on = waiter;
434 
435 	spin_unlock_irqrestore(&current->pi_lock, flags);
436 
437 	if (waiter == rt_mutex_top_waiter(lock)) {
438 		spin_lock_irqsave(&owner->pi_lock, flags);
439 		plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
440 		plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
441 
442 		__rt_mutex_adjust_prio(owner);
443 		if (owner->pi_blocked_on)
444 			chain_walk = 1;
445 		spin_unlock_irqrestore(&owner->pi_lock, flags);
446 	}
447 	else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
448 		chain_walk = 1;
449 
450 	if (!chain_walk)
451 		return 0;
452 
453 	/*
454 	 * The owner can't disappear while holding a lock,
455 	 * so the owner struct is protected by wait_lock.
456 	 * Gets dropped in rt_mutex_adjust_prio_chain()!
457 	 */
458 	get_task_struct(owner);
459 
460 	spin_unlock(&lock->wait_lock);
461 
462 	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
463 					 current);
464 
465 	spin_lock(&lock->wait_lock);
466 
467 	return res;
468 }
469 
470 /*
471  * Wake up the next waiter on the lock.
472  *
473  * Remove the top waiter from the current tasks waiter list and from
474  * the lock waiter list. Set it as pending owner. Then wake it up.
475  *
476  * Called with lock->wait_lock held.
477  */
wakeup_next_waiter(struct rt_mutex * lock)478 static void wakeup_next_waiter(struct rt_mutex *lock)
479 {
480 	struct rt_mutex_waiter *waiter;
481 	struct task_struct *pendowner;
482 	unsigned long flags;
483 
484 	spin_lock_irqsave(&current->pi_lock, flags);
485 
486 	waiter = rt_mutex_top_waiter(lock);
487 	plist_del(&waiter->list_entry, &lock->wait_list);
488 
489 	/*
490 	 * Remove it from current->pi_waiters. We do not adjust a
491 	 * possible priority boost right now. We execute wakeup in the
492 	 * boosted mode and go back to normal after releasing
493 	 * lock->wait_lock.
494 	 */
495 	plist_del(&waiter->pi_list_entry, &current->pi_waiters);
496 	pendowner = waiter->task;
497 	waiter->task = NULL;
498 
499 	rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
500 
501 	spin_unlock_irqrestore(&current->pi_lock, flags);
502 
503 	/*
504 	 * Clear the pi_blocked_on variable and enqueue a possible
505 	 * waiter into the pi_waiters list of the pending owner. This
506 	 * prevents that in case the pending owner gets unboosted a
507 	 * waiter with higher priority than pending-owner->normal_prio
508 	 * is blocked on the unboosted (pending) owner.
509 	 */
510 	spin_lock_irqsave(&pendowner->pi_lock, flags);
511 
512 	WARN_ON(!pendowner->pi_blocked_on);
513 	WARN_ON(pendowner->pi_blocked_on != waiter);
514 	WARN_ON(pendowner->pi_blocked_on->lock != lock);
515 
516 	pendowner->pi_blocked_on = NULL;
517 
518 	if (rt_mutex_has_waiters(lock)) {
519 		struct rt_mutex_waiter *next;
520 
521 		next = rt_mutex_top_waiter(lock);
522 		plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
523 	}
524 	spin_unlock_irqrestore(&pendowner->pi_lock, flags);
525 
526 	wake_up_process(pendowner);
527 }
528 
529 /*
530  * Remove a waiter from a lock
531  *
532  * Must be called with lock->wait_lock held
533  */
remove_waiter(struct rt_mutex * lock,struct rt_mutex_waiter * waiter)534 static void remove_waiter(struct rt_mutex *lock,
535 			  struct rt_mutex_waiter *waiter)
536 {
537 	int first = (waiter == rt_mutex_top_waiter(lock));
538 	struct task_struct *owner = rt_mutex_owner(lock);
539 	unsigned long flags;
540 	int chain_walk = 0;
541 
542 	spin_lock_irqsave(&current->pi_lock, flags);
543 	plist_del(&waiter->list_entry, &lock->wait_list);
544 	waiter->task = NULL;
545 	current->pi_blocked_on = NULL;
546 	spin_unlock_irqrestore(&current->pi_lock, flags);
547 
548 	if (first && owner != current) {
549 
550 		spin_lock_irqsave(&owner->pi_lock, flags);
551 
552 		plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
553 
554 		if (rt_mutex_has_waiters(lock)) {
555 			struct rt_mutex_waiter *next;
556 
557 			next = rt_mutex_top_waiter(lock);
558 			plist_add(&next->pi_list_entry, &owner->pi_waiters);
559 		}
560 		__rt_mutex_adjust_prio(owner);
561 
562 		if (owner->pi_blocked_on)
563 			chain_walk = 1;
564 
565 		spin_unlock_irqrestore(&owner->pi_lock, flags);
566 	}
567 
568 	WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
569 
570 	if (!chain_walk)
571 		return;
572 
573 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
574 	get_task_struct(owner);
575 
576 	spin_unlock(&lock->wait_lock);
577 
578 	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
579 
580 	spin_lock(&lock->wait_lock);
581 }
582 
583 /*
584  * Recheck the pi chain, in case we got a priority setting
585  *
586  * Called from sched_setscheduler
587  */
rt_mutex_adjust_pi(struct task_struct * task)588 void rt_mutex_adjust_pi(struct task_struct *task)
589 {
590 	struct rt_mutex_waiter *waiter;
591 	unsigned long flags;
592 
593 	spin_lock_irqsave(&task->pi_lock, flags);
594 
595 	waiter = task->pi_blocked_on;
596 	if (!waiter || waiter->list_entry.prio == task->prio) {
597 		spin_unlock_irqrestore(&task->pi_lock, flags);
598 		return;
599 	}
600 
601 	spin_unlock_irqrestore(&task->pi_lock, flags);
602 
603 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
604 	get_task_struct(task);
605 	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
606 }
607 
608 /*
609  * Slow path lock function:
610  */
611 static int __sched
rt_mutex_slowlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock)612 rt_mutex_slowlock(struct rt_mutex *lock, int state,
613 		  struct hrtimer_sleeper *timeout,
614 		  int detect_deadlock)
615 {
616 	struct rt_mutex_waiter waiter;
617 	int ret = 0;
618 
619 	debug_rt_mutex_init_waiter(&waiter);
620 	waiter.task = NULL;
621 
622 	spin_lock(&lock->wait_lock);
623 
624 	/* Try to acquire the lock again: */
625 	if (try_to_take_rt_mutex(lock)) {
626 		spin_unlock(&lock->wait_lock);
627 		return 0;
628 	}
629 
630 	set_current_state(state);
631 
632 	/* Setup the timer, when timeout != NULL */
633 	if (unlikely(timeout)) {
634 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
635 		if (!hrtimer_active(&timeout->timer))
636 			timeout->task = NULL;
637 	}
638 
639 	for (;;) {
640 		/* Try to acquire the lock: */
641 		if (try_to_take_rt_mutex(lock))
642 			break;
643 
644 		/*
645 		 * TASK_INTERRUPTIBLE checks for signals and
646 		 * timeout. Ignored otherwise.
647 		 */
648 		if (unlikely(state == TASK_INTERRUPTIBLE)) {
649 			/* Signal pending? */
650 			if (signal_pending(current))
651 				ret = -EINTR;
652 			if (timeout && !timeout->task)
653 				ret = -ETIMEDOUT;
654 			if (ret)
655 				break;
656 		}
657 
658 		/*
659 		 * waiter.task is NULL the first time we come here and
660 		 * when we have been woken up by the previous owner
661 		 * but the lock got stolen by a higher prio task.
662 		 */
663 		if (!waiter.task) {
664 			ret = task_blocks_on_rt_mutex(lock, &waiter,
665 						      detect_deadlock);
666 			/*
667 			 * If we got woken up by the owner then start loop
668 			 * all over without going into schedule to try
669 			 * to get the lock now:
670 			 */
671 			if (unlikely(!waiter.task)) {
672 				/*
673 				 * Reset the return value. We might
674 				 * have returned with -EDEADLK and the
675 				 * owner released the lock while we
676 				 * were walking the pi chain.
677 				 */
678 				ret = 0;
679 				continue;
680 			}
681 			if (unlikely(ret))
682 				break;
683 		}
684 
685 		spin_unlock(&lock->wait_lock);
686 
687 		debug_rt_mutex_print_deadlock(&waiter);
688 
689 		if (waiter.task)
690 			schedule_rt_mutex(lock);
691 
692 		spin_lock(&lock->wait_lock);
693 		set_current_state(state);
694 	}
695 
696 	set_current_state(TASK_RUNNING);
697 
698 	if (unlikely(waiter.task))
699 		remove_waiter(lock, &waiter);
700 
701 	/*
702 	 * try_to_take_rt_mutex() sets the waiter bit
703 	 * unconditionally. We might have to fix that up.
704 	 */
705 	fixup_rt_mutex_waiters(lock);
706 
707 	spin_unlock(&lock->wait_lock);
708 
709 	/* Remove pending timer: */
710 	if (unlikely(timeout))
711 		hrtimer_cancel(&timeout->timer);
712 
713 	/*
714 	 * Readjust priority, when we did not get the lock. We might
715 	 * have been the pending owner and boosted. Since we did not
716 	 * take the lock, the PI boost has to go.
717 	 */
718 	if (unlikely(ret))
719 		rt_mutex_adjust_prio(current);
720 
721 	debug_rt_mutex_free_waiter(&waiter);
722 
723 	return ret;
724 }
725 
726 /*
727  * Slow path try-lock function:
728  */
729 static inline int
rt_mutex_slowtrylock(struct rt_mutex * lock)730 rt_mutex_slowtrylock(struct rt_mutex *lock)
731 {
732 	int ret = 0;
733 
734 	spin_lock(&lock->wait_lock);
735 
736 	if (likely(rt_mutex_owner(lock) != current)) {
737 
738 		ret = try_to_take_rt_mutex(lock);
739 		/*
740 		 * try_to_take_rt_mutex() sets the lock waiters
741 		 * bit unconditionally. Clean this up.
742 		 */
743 		fixup_rt_mutex_waiters(lock);
744 	}
745 
746 	spin_unlock(&lock->wait_lock);
747 
748 	return ret;
749 }
750 
751 /*
752  * Slow path to release a rt-mutex:
753  */
754 static void __sched
rt_mutex_slowunlock(struct rt_mutex * lock)755 rt_mutex_slowunlock(struct rt_mutex *lock)
756 {
757 	spin_lock(&lock->wait_lock);
758 
759 	debug_rt_mutex_unlock(lock);
760 
761 	rt_mutex_deadlock_account_unlock(current);
762 
763 	if (!rt_mutex_has_waiters(lock)) {
764 		lock->owner = NULL;
765 		spin_unlock(&lock->wait_lock);
766 		return;
767 	}
768 
769 	wakeup_next_waiter(lock);
770 
771 	spin_unlock(&lock->wait_lock);
772 
773 	/* Undo pi boosting if necessary: */
774 	rt_mutex_adjust_prio(current);
775 }
776 
777 /*
778  * debug aware fast / slowpath lock,trylock,unlock
779  *
780  * The atomic acquire/release ops are compiled away, when either the
781  * architecture does not support cmpxchg or when debugging is enabled.
782  */
783 static inline int
rt_mutex_fastlock(struct rt_mutex * lock,int state,int detect_deadlock,int (* slowfn)(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock))784 rt_mutex_fastlock(struct rt_mutex *lock, int state,
785 		  int detect_deadlock,
786 		  int (*slowfn)(struct rt_mutex *lock, int state,
787 				struct hrtimer_sleeper *timeout,
788 				int detect_deadlock))
789 {
790 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
791 		rt_mutex_deadlock_account_lock(lock, current);
792 		return 0;
793 	} else
794 		return slowfn(lock, state, NULL, detect_deadlock);
795 }
796 
797 static inline int
rt_mutex_timed_fastlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock,int (* slowfn)(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock))798 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
799 			struct hrtimer_sleeper *timeout, int detect_deadlock,
800 			int (*slowfn)(struct rt_mutex *lock, int state,
801 				      struct hrtimer_sleeper *timeout,
802 				      int detect_deadlock))
803 {
804 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
805 		rt_mutex_deadlock_account_lock(lock, current);
806 		return 0;
807 	} else
808 		return slowfn(lock, state, timeout, detect_deadlock);
809 }
810 
811 static inline int
rt_mutex_fasttrylock(struct rt_mutex * lock,int (* slowfn)(struct rt_mutex * lock))812 rt_mutex_fasttrylock(struct rt_mutex *lock,
813 		     int (*slowfn)(struct rt_mutex *lock))
814 {
815 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
816 		rt_mutex_deadlock_account_lock(lock, current);
817 		return 1;
818 	}
819 	return slowfn(lock);
820 }
821 
822 static inline void
rt_mutex_fastunlock(struct rt_mutex * lock,void (* slowfn)(struct rt_mutex * lock))823 rt_mutex_fastunlock(struct rt_mutex *lock,
824 		    void (*slowfn)(struct rt_mutex *lock))
825 {
826 	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
827 		rt_mutex_deadlock_account_unlock(current);
828 	else
829 		slowfn(lock);
830 }
831 
832 /**
833  * rt_mutex_lock - lock a rt_mutex
834  *
835  * @lock: the rt_mutex to be locked
836  */
rt_mutex_lock(struct rt_mutex * lock)837 void __sched rt_mutex_lock(struct rt_mutex *lock)
838 {
839 	might_sleep();
840 
841 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
842 }
843 EXPORT_SYMBOL_GPL(rt_mutex_lock);
844 
845 /**
846  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
847  *
848  * @lock: 		the rt_mutex to be locked
849  * @detect_deadlock:	deadlock detection on/off
850  *
851  * Returns:
852  *  0 		on success
853  * -EINTR 	when interrupted by a signal
854  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
855  */
rt_mutex_lock_interruptible(struct rt_mutex * lock,int detect_deadlock)856 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
857 						 int detect_deadlock)
858 {
859 	might_sleep();
860 
861 	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
862 				 detect_deadlock, rt_mutex_slowlock);
863 }
864 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
865 
866 /**
867  * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
868  *				       the timeout structure is provided
869  *				       by the caller
870  *
871  * @lock: 		the rt_mutex to be locked
872  * @timeout:		timeout structure or NULL (no timeout)
873  * @detect_deadlock:	deadlock detection on/off
874  *
875  * Returns:
876  *  0 		on success
877  * -EINTR 	when interrupted by a signal
878  * -ETIMEOUT	when the timeout expired
879  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
880  */
881 int
rt_mutex_timed_lock(struct rt_mutex * lock,struct hrtimer_sleeper * timeout,int detect_deadlock)882 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
883 		    int detect_deadlock)
884 {
885 	might_sleep();
886 
887 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
888 				       detect_deadlock, rt_mutex_slowlock);
889 }
890 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
891 
892 /**
893  * rt_mutex_trylock - try to lock a rt_mutex
894  *
895  * @lock:	the rt_mutex to be locked
896  *
897  * Returns 1 on success and 0 on contention
898  */
rt_mutex_trylock(struct rt_mutex * lock)899 int __sched rt_mutex_trylock(struct rt_mutex *lock)
900 {
901 	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
902 }
903 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
904 
905 /**
906  * rt_mutex_unlock - unlock a rt_mutex
907  *
908  * @lock: the rt_mutex to be unlocked
909  */
rt_mutex_unlock(struct rt_mutex * lock)910 void __sched rt_mutex_unlock(struct rt_mutex *lock)
911 {
912 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
913 }
914 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
915 
916 /***
917  * rt_mutex_destroy - mark a mutex unusable
918  * @lock: the mutex to be destroyed
919  *
920  * This function marks the mutex uninitialized, and any subsequent
921  * use of the mutex is forbidden. The mutex must not be locked when
922  * this function is called.
923  */
rt_mutex_destroy(struct rt_mutex * lock)924 void rt_mutex_destroy(struct rt_mutex *lock)
925 {
926 	WARN_ON(rt_mutex_is_locked(lock));
927 #ifdef CONFIG_DEBUG_RT_MUTEXES
928 	lock->magic = NULL;
929 #endif
930 }
931 
932 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
933 
934 /**
935  * __rt_mutex_init - initialize the rt lock
936  *
937  * @lock: the rt lock to be initialized
938  *
939  * Initialize the rt lock to unlocked state.
940  *
941  * Initializing of a locked rt lock is not allowed
942  */
__rt_mutex_init(struct rt_mutex * lock,const char * name)943 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
944 {
945 	lock->owner = NULL;
946 	spin_lock_init(&lock->wait_lock);
947 	plist_head_init(&lock->wait_list, &lock->wait_lock);
948 
949 	debug_rt_mutex_init(lock, name);
950 }
951 EXPORT_SYMBOL_GPL(__rt_mutex_init);
952 
953 /**
954  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
955  *				proxy owner
956  *
957  * @lock: 	the rt_mutex to be locked
958  * @proxy_owner:the task to set as owner
959  *
960  * No locking. Caller has to do serializing itself
961  * Special API call for PI-futex support
962  */
rt_mutex_init_proxy_locked(struct rt_mutex * lock,struct task_struct * proxy_owner)963 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
964 				struct task_struct *proxy_owner)
965 {
966 	__rt_mutex_init(lock, NULL);
967 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
968 	rt_mutex_set_owner(lock, proxy_owner, 0);
969 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
970 }
971 
972 /**
973  * rt_mutex_proxy_unlock - release a lock on behalf of owner
974  *
975  * @lock: 	the rt_mutex to be locked
976  *
977  * No locking. Caller has to do serializing itself
978  * Special API call for PI-futex support
979  */
rt_mutex_proxy_unlock(struct rt_mutex * lock,struct task_struct * proxy_owner)980 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
981 			   struct task_struct *proxy_owner)
982 {
983 	debug_rt_mutex_proxy_unlock(lock);
984 	rt_mutex_set_owner(lock, NULL, 0);
985 	rt_mutex_deadlock_account_unlock(proxy_owner);
986 }
987 
988 /**
989  * rt_mutex_next_owner - return the next owner of the lock
990  *
991  * @lock: the rt lock query
992  *
993  * Returns the next owner of the lock or NULL
994  *
995  * Caller has to serialize against other accessors to the lock
996  * itself.
997  *
998  * Special API call for PI-futex support
999  */
rt_mutex_next_owner(struct rt_mutex * lock)1000 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1001 {
1002 	if (!rt_mutex_has_waiters(lock))
1003 		return NULL;
1004 
1005 	return rt_mutex_top_waiter(lock)->task;
1006 }
1007