Lines Matching refs:lock
37 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
43 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
48 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
53 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
94 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_owner_encode() argument
98 if (rt_mutex_has_waiters(lock)) in rt_mutex_owner_encode()
105 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
111 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner()
114 static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock) in rt_mutex_clear_owner() argument
117 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner()
120 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) in clear_rt_mutex_waiters() argument
122 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
123 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
127 fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock) in fixup_rt_mutex_waiters() argument
129 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
131 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
215 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
219 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
222 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
226 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
234 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
236 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
257 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
259 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
261 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe()
263 clear_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
264 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
289 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
293 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
301 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
308 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
310 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
311 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
317 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
319 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
321 lock->owner = NULL; in unlock_rt_mutex_safe()
322 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
447 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
449 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
453 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
458 rb_erase_cached(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
559 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
635 struct rt_mutex_base *lock; in rt_mutex_adjust_prio_chain() local
714 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
781 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
787 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
802 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
817 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
838 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
839 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
844 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
857 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
861 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
874 prerequeue_top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
877 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
897 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
910 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
916 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
919 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
924 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
928 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
951 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
976 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
980 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
1021 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
1024 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
1043 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
1048 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
1057 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); in try_to_take_rt_mutex()
1068 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
1081 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
1084 rt_mutex_top_waiter(lock))) in try_to_take_rt_mutex()
1116 if (rt_mutex_has_waiters(lock)) in try_to_take_rt_mutex()
1117 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
1125 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
1137 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, in task_blocks_on_rt_mutex() argument
1143 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
1148 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
1165 trace_android_vh_task_blocks_on_rtmutex(lock, waiter, task, ww_ctx, &chwalk); in task_blocks_on_rt_mutex()
1168 waiter->lock = lock; in task_blocks_on_rt_mutex()
1172 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
1173 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
1174 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
1184 rtm = container_of(lock, struct rt_mutex, rtmutex); in task_blocks_on_rt_mutex()
1188 rt_mutex_dequeue(lock, waiter); in task_blocks_on_rt_mutex()
1199 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
1229 raw_spin_unlock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1231 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1234 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1246 struct rt_mutex_base *lock) in mark_wakeup_next_waiter() argument
1252 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1272 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1289 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) in __rt_mutex_slowtrylock() argument
1291 int ret = try_to_take_rt_mutex(lock, current, NULL); in __rt_mutex_slowtrylock()
1297 fixup_rt_mutex_waiters(lock, true); in __rt_mutex_slowtrylock()
1305 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) in rt_mutex_slowtrylock() argument
1315 if (rt_mutex_owner(lock)) in rt_mutex_slowtrylock()
1322 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1324 ret = __rt_mutex_slowtrylock(lock); in rt_mutex_slowtrylock()
1326 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1331 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) in __rt_mutex_trylock() argument
1333 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_trylock()
1336 return rt_mutex_slowtrylock(lock); in __rt_mutex_trylock()
1342 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) in rt_mutex_slowunlock() argument
1348 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1350 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
1383 while (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
1385 if (unlock_rt_mutex_safe(lock, flags) == true) in rt_mutex_slowunlock()
1388 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1397 mark_wakeup_next_waiter(&wqh, lock); in rt_mutex_slowunlock()
1398 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1403 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) in __rt_mutex_unlock() argument
1405 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) in __rt_mutex_unlock()
1408 rt_mutex_slowunlock(lock); in __rt_mutex_unlock()
1412 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1421 if (owner != rt_mutex_owner(lock)) in rtmutex_spin_on_owner()
1439 !rt_mutex_waiter_is_top_waiter(lock, waiter)) { in rtmutex_spin_on_owner()
1449 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1470 static void __sched remove_waiter(struct rt_mutex_base *lock, in remove_waiter() argument
1473 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1474 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
1477 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1480 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1495 if (rt_mutex_has_waiters(lock)) in remove_waiter()
1496 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1515 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1517 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1520 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1534 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, in rt_mutex_slowlock_block() argument
1540 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in rt_mutex_slowlock_block()
1544 trace_android_vh_rtmutex_wait_start(lock); in rt_mutex_slowlock_block()
1547 if (try_to_take_rt_mutex(lock, current, waiter)) in rt_mutex_slowlock_block()
1565 if (waiter == rt_mutex_top_waiter(lock)) in rt_mutex_slowlock_block()
1566 owner = rt_mutex_owner(lock); in rt_mutex_slowlock_block()
1569 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1571 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1574 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1578 trace_android_vh_rtmutex_wait_finish(lock); in rt_mutex_slowlock_block()
1614 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, in __rt_mutex_slowlock() argument
1620 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in __rt_mutex_slowlock()
1624 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_slowlock()
1627 if (try_to_take_rt_mutex(lock, current, NULL)) { in __rt_mutex_slowlock()
1637 trace_contention_begin(lock, LCB_F_RT); in __rt_mutex_slowlock()
1639 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); in __rt_mutex_slowlock()
1641 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); in __rt_mutex_slowlock()
1652 remove_waiter(lock, waiter); in __rt_mutex_slowlock()
1660 fixup_rt_mutex_waiters(lock, true); in __rt_mutex_slowlock()
1662 trace_contention_end(lock, ret); in __rt_mutex_slowlock()
1667 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, in __rt_mutex_slowlock_locked() argument
1677 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, in __rt_mutex_slowlock_locked()
1690 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, in rt_mutex_slowlock() argument
1705 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1706 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state); in rt_mutex_slowlock()
1707 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1712 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, in __rt_mutex_lock() argument
1715 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_lock()
1718 return rt_mutex_slowlock(lock, NULL, state); in __rt_mutex_lock()
1731 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) in rtlock_slowlock_locked() argument
1736 lockdep_assert_held(&lock->wait_lock); in rtlock_slowlock_locked()
1738 if (try_to_take_rt_mutex(lock, current, NULL)) in rtlock_slowlock_locked()
1746 trace_contention_begin(lock, LCB_F_RT); in rtlock_slowlock_locked()
1748 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); in rtlock_slowlock_locked()
1752 if (try_to_take_rt_mutex(lock, current, &waiter)) in rtlock_slowlock_locked()
1755 if (&waiter == rt_mutex_top_waiter(lock)) in rtlock_slowlock_locked()
1756 owner = rt_mutex_owner(lock); in rtlock_slowlock_locked()
1759 raw_spin_unlock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1761 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()
1764 raw_spin_lock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1775 fixup_rt_mutex_waiters(lock, true); in rtlock_slowlock_locked()
1778 trace_contention_end(lock, 0); in rtlock_slowlock_locked()
1781 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) in rtlock_slowlock() argument
1785 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rtlock_slowlock()
1786 rtlock_slowlock_locked(lock); in rtlock_slowlock()
1787 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rtlock_slowlock()