Lines Matching refs:base
179 if (likely(atomic_read(&lock->base.count) == 0)) in ww_mutex_set_context_fastpath()
186 spin_lock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath()
187 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_fastpath()
188 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_fastpath()
191 spin_unlock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath()
213 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_slowpath()
214 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_slowpath()
328 ww = container_of(lock, struct ww_mutex, base); in mutex_optimistic_spin()
355 ww = container_of(lock, struct ww_mutex, base); in mutex_optimistic_spin()
474 mutex_clear_owner(&lock->base); in ww_mutex_unlock()
476 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); in ww_mutex_unlock()
483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_lock_check_stamp()
515 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common()
599 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common()
688 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, in __ww_mutex_lock()
703 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, in __ww_mutex_lock_interruptible()
845 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, in __ww_mutex_lock_slowpath()
853 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, in __ww_mutex_lock_interruptible_slowpath()
924 ret = __mutex_fastpath_lock_retval(&lock->base.count); in __ww_mutex_lock()
928 mutex_set_owner(&lock->base); in __ww_mutex_lock()
942 ret = __mutex_fastpath_lock_retval(&lock->base.count); in __ww_mutex_lock_interruptible()
946 mutex_set_owner(&lock->base); in __ww_mutex_lock_interruptible()