• Home
  • Raw
  • Download

Lines Matching refs:lock

42 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)  in __mutex_init()  argument
44 atomic_long_set(&lock->owner, 0); in __mutex_init()
45 spin_lock_init(&lock->wait_lock); in __mutex_init()
46 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
48 osq_lock_init(&lock->osq); in __mutex_init()
51 debug_mutex_init(lock, name, key); in __mutex_init()
75 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument
77 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner()
85 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument
87 return __mutex_owner(lock) != NULL; in mutex_is_locked()
92 mutex_trylock_recursive(struct mutex *lock) in mutex_trylock_recursive() argument
94 if (unlikely(__mutex_owner(lock) == current)) in mutex_trylock_recursive()
97 return mutex_trylock(lock); in mutex_trylock_recursive()
109 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) in __mutex_trylock_or_owner() argument
113 owner = atomic_long_read(&lock->owner); in __mutex_trylock_or_owner()
139 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); in __mutex_trylock_or_owner()
152 static inline bool __mutex_trylock(struct mutex *lock) in __mutex_trylock() argument
154 return !__mutex_trylock_or_owner(lock); in __mutex_trylock()
168 static __always_inline bool __mutex_trylock_fast(struct mutex *lock) in __mutex_trylock_fast() argument
173 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) { in __mutex_trylock_fast()
181 static __always_inline bool __mutex_unlock_fast(struct mutex *lock) in __mutex_unlock_fast() argument
185 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) in __mutex_unlock_fast()
192 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) in __mutex_set_flag() argument
194 atomic_long_or(flag, &lock->owner); in __mutex_set_flag()
197 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) in __mutex_clear_flag() argument
199 atomic_long_andnot(flag, &lock->owner); in __mutex_clear_flag()
202 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_waiter_is_first() argument
204 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; in __mutex_waiter_is_first()
212 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in __mutex_add_waiter() argument
216 debug_mutex_add_waiter(lock, waiter, current); in __mutex_add_waiter()
218 trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list); in __mutex_add_waiter()
221 if (__mutex_waiter_is_first(lock, waiter)) in __mutex_add_waiter()
222 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); in __mutex_add_waiter()
226 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_remove_waiter() argument
229 if (likely(list_empty(&lock->wait_list))) in __mutex_remove_waiter()
230 __mutex_clear_flag(lock, MUTEX_FLAGS); in __mutex_remove_waiter()
232 debug_mutex_remove_waiter(lock, waiter, current); in __mutex_remove_waiter()
241 static void __mutex_handoff(struct mutex *lock, struct task_struct *task) in __mutex_handoff() argument
243 unsigned long owner = atomic_long_read(&lock->owner); in __mutex_handoff()
258 old = atomic_long_cmpxchg_release(&lock->owner, owner, new); in __mutex_handoff()
273 static void __sched __mutex_lock_slowpath(struct mutex *lock);
296 void __sched mutex_lock(struct mutex *lock) in mutex_lock() argument
300 if (!__mutex_trylock_fast(lock)) in mutex_lock()
301 __mutex_lock_slowpath(lock); in mutex_lock()
384 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, in __ww_mutex_die() argument
392 debug_mutex_wake_waiter(lock, waiter); in __ww_mutex_die()
406 static bool __ww_mutex_wound(struct mutex *lock, in __ww_mutex_wound() argument
410 struct task_struct *owner = __mutex_owner(lock); in __ww_mutex_wound()
412 lockdep_assert_held(&lock->wait_lock); in __ww_mutex_wound()
461 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) in __ww_mutex_check_waiters() argument
465 lockdep_assert_held(&lock->wait_lock); in __ww_mutex_check_waiters()
467 list_for_each_entry(cur, &lock->wait_list, list) { in __ww_mutex_check_waiters()
471 if (__ww_mutex_die(lock, cur, ww_ctx) || in __ww_mutex_check_waiters()
472 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) in __ww_mutex_check_waiters()
482 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_set_context_fastpath() argument
484 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_fastpath()
504 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) in ww_mutex_set_context_fastpath()
511 spin_lock(&lock->base.wait_lock); in ww_mutex_set_context_fastpath()
512 __ww_mutex_check_waiters(&lock->base, ctx); in ww_mutex_set_context_fastpath()
513 spin_unlock(&lock->base.wait_lock); in ww_mutex_set_context_fastpath()
519 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in ww_mutex_spin_on_owner() argument
524 ww = container_of(lock, struct ww_mutex, base); in ww_mutex_spin_on_owner()
547 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) in ww_mutex_spin_on_owner()
554 if (waiter && !__mutex_waiter_is_first(lock, waiter)) in ww_mutex_spin_on_owner()
567 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, in mutex_spin_on_owner() argument
575 while (__mutex_owner(lock) == owner) { in mutex_spin_on_owner()
576 trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt); in mutex_spin_on_owner()
598 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { in mutex_spin_on_owner()
613 static inline int mutex_can_spin_on_owner(struct mutex *lock) in mutex_can_spin_on_owner() argument
622 owner = __mutex_owner(lock); in mutex_can_spin_on_owner()
631 trace_android_vh_mutex_can_spin_on_owner(lock, &retval); in mutex_can_spin_on_owner()
663 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin() argument
674 if (!mutex_can_spin_on_owner(lock)) in mutex_optimistic_spin()
682 if (!osq_lock(&lock->osq)) in mutex_optimistic_spin()
690 owner = __mutex_trylock_or_owner(lock); in mutex_optimistic_spin()
698 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) in mutex_optimistic_spin()
711 osq_unlock(&lock->osq); in mutex_optimistic_spin()
713 trace_android_vh_mutex_opt_spin_finish(lock, true); in mutex_optimistic_spin()
719 osq_unlock(&lock->osq); in mutex_optimistic_spin()
722 trace_android_vh_mutex_opt_spin_finish(lock, false); in mutex_optimistic_spin()
741 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, in mutex_optimistic_spin() argument
748 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
761 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock() argument
764 if (__mutex_unlock_fast(lock)) { in mutex_unlock()
769 __mutex_unlock_slowpath(lock, _RET_IP_); in mutex_unlock()
785 void __sched ww_mutex_unlock(struct ww_mutex *lock) in ww_mutex_unlock() argument
791 if (lock->ctx) { in ww_mutex_unlock()
793 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); in ww_mutex_unlock()
795 if (lock->ctx->acquired > 0) in ww_mutex_unlock()
796 lock->ctx->acquired--; in ww_mutex_unlock()
797 lock->ctx = NULL; in ww_mutex_unlock()
800 mutex_unlock(&lock->base); in ww_mutex_unlock()
806 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) in __ww_mutex_kill() argument
812 ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_kill()
835 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, in __ww_mutex_check_kill() argument
838 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_check_kill()
847 return __ww_mutex_kill(lock, ctx); in __ww_mutex_check_kill()
853 return __ww_mutex_kill(lock, ctx); in __ww_mutex_check_kill()
860 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { in __ww_mutex_check_kill()
864 return __ww_mutex_kill(lock, ctx); in __ww_mutex_check_kill()
883 struct mutex *lock, in __ww_mutex_add_waiter() argument
891 __mutex_add_waiter(lock, waiter, &lock->wait_list); in __ww_mutex_add_waiter()
904 pos = &lock->wait_list; in __ww_mutex_add_waiter()
905 list_for_each_entry_reverse(cur, &lock->wait_list, list) { in __ww_mutex_add_waiter()
916 int ret = __ww_mutex_kill(lock, ww_ctx); in __ww_mutex_add_waiter()
928 __ww_mutex_die(lock, cur, ww_ctx); in __ww_mutex_add_waiter()
931 __mutex_add_waiter(lock, waiter, pos); in __ww_mutex_add_waiter()
938 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_add_waiter()
946 __ww_mutex_wound(lock, ww_ctx, ww->ctx); in __ww_mutex_add_waiter()
956 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, in __mutex_lock_common() argument
970 DEBUG_LOCKS_WARN_ON(lock->magic != lock); in __mutex_lock_common()
973 ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common()
988 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
990 if (__mutex_trylock(lock) || in __mutex_lock_common()
991 mutex_optimistic_spin(lock, ww_ctx, NULL)) { in __mutex_lock_common()
993 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
1001 spin_lock(&lock->wait_lock); in __mutex_lock_common()
1005 if (__mutex_trylock(lock)) { in __mutex_lock_common()
1007 __ww_mutex_check_waiters(lock, ww_ctx); in __mutex_lock_common()
1012 debug_mutex_lock_common(lock, &waiter); in __mutex_lock_common()
1014 lock_contended(&lock->dep_map, ip); in __mutex_lock_common()
1018 __mutex_add_waiter(lock, &waiter, &lock->wait_list); in __mutex_lock_common()
1029 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); in __mutex_lock_common()
1038 trace_android_vh_mutex_wait_start(lock); in __mutex_lock_common()
1049 if (__mutex_trylock(lock)) in __mutex_lock_common()
1063 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); in __mutex_lock_common()
1068 spin_unlock(&lock->wait_lock); in __mutex_lock_common()
1071 first = __mutex_waiter_is_first(lock, &waiter); in __mutex_lock_common()
1073 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); in __mutex_lock_common()
1081 if (__mutex_trylock(lock) || in __mutex_lock_common()
1082 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) in __mutex_lock_common()
1085 spin_lock(&lock->wait_lock); in __mutex_lock_common()
1087 spin_lock(&lock->wait_lock); in __mutex_lock_common()
1090 trace_android_vh_mutex_wait_finish(lock); in __mutex_lock_common()
1098 !__mutex_waiter_is_first(lock, &waiter)) in __mutex_lock_common()
1099 __ww_mutex_check_waiters(lock, ww_ctx); in __mutex_lock_common()
1102 __mutex_remove_waiter(lock, &waiter); in __mutex_lock_common()
1108 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
1113 spin_unlock(&lock->wait_lock); in __mutex_lock_common()
1120 trace_android_vh_mutex_wait_finish(lock); in __mutex_lock_common()
1121 __mutex_remove_waiter(lock, &waiter); in __mutex_lock_common()
1123 spin_unlock(&lock->wait_lock); in __mutex_lock_common()
1125 mutex_release(&lock->dep_map, ip); in __mutex_lock_common()
1131 __mutex_lock(struct mutex *lock, long state, unsigned int subclass, in __mutex_lock() argument
1134 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); in __mutex_lock()
1138 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, in __ww_mutex_lock() argument
1142 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); in __ww_mutex_lock()
1147 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested() argument
1149 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_nested()
1155 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock() argument
1157 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); in _mutex_lock_nest_lock()
1162 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_killable_nested() argument
1164 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); in mutex_lock_killable_nested()
1169 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested() argument
1171 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); in mutex_lock_interruptible_nested()
1176 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_io_nested() argument
1183 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, in mutex_lock_io_nested()
1190 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_deadlock_injection() argument
1204 ctx->contending_lock = lock; in ww_mutex_deadlock_injection()
1206 ww_mutex_unlock(lock); in ww_mutex_deadlock_injection()
1216 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument
1221 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, in ww_mutex_lock()
1225 return ww_mutex_deadlock_injection(lock, ctx); in ww_mutex_lock()
1232 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument
1237 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, in ww_mutex_lock_interruptible()
1242 return ww_mutex_deadlock_injection(lock, ctx); in ww_mutex_lock_interruptible()
1253 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) in __mutex_unlock_slowpath() argument
1259 mutex_release(&lock->dep_map, ip); in __mutex_unlock_slowpath()
1268 owner = atomic_long_read(&lock->owner); in __mutex_unlock_slowpath()
1280 old = atomic_long_cmpxchg_release(&lock->owner, owner, in __mutex_unlock_slowpath()
1292 spin_lock(&lock->wait_lock); in __mutex_unlock_slowpath()
1293 debug_mutex_unlock(lock); in __mutex_unlock_slowpath()
1294 if (!list_empty(&lock->wait_list)) { in __mutex_unlock_slowpath()
1297 list_first_entry(&lock->wait_list, in __mutex_unlock_slowpath()
1302 debug_mutex_wake_waiter(lock, waiter); in __mutex_unlock_slowpath()
1307 __mutex_handoff(lock, next); in __mutex_unlock_slowpath()
1309 trace_android_vh_mutex_unlock_slowpath(lock); in __mutex_unlock_slowpath()
1310 spin_unlock(&lock->wait_lock); in __mutex_unlock_slowpath()
1313 trace_android_vh_mutex_unlock_slowpath_end(lock, next); in __mutex_unlock_slowpath()
1322 __mutex_lock_killable_slowpath(struct mutex *lock);
1325 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1339 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible() argument
1343 if (__mutex_trylock_fast(lock)) in mutex_lock_interruptible()
1346 return __mutex_lock_interruptible_slowpath(lock); in mutex_lock_interruptible()
1363 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable() argument
1367 if (__mutex_trylock_fast(lock)) in mutex_lock_killable()
1370 return __mutex_lock_killable_slowpath(lock); in mutex_lock_killable()
1384 void __sched mutex_lock_io(struct mutex *lock) in mutex_lock_io() argument
1389 mutex_lock(lock); in mutex_lock_io()
1395 __mutex_lock_slowpath(struct mutex *lock) in __mutex_lock_slowpath() argument
1397 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_slowpath()
1401 __mutex_lock_killable_slowpath(struct mutex *lock) in __mutex_lock_killable_slowpath() argument
1403 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); in __mutex_lock_killable_slowpath()
1407 __mutex_lock_interruptible_slowpath(struct mutex *lock) in __mutex_lock_interruptible_slowpath() argument
1409 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); in __mutex_lock_interruptible_slowpath()
1413 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_slowpath() argument
1415 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, in __ww_mutex_lock_slowpath()
1420 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, in __ww_mutex_lock_interruptible_slowpath() argument
1423 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, in __ww_mutex_lock_interruptible_slowpath()
1443 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock() argument
1448 DEBUG_LOCKS_WARN_ON(lock->magic != lock); in mutex_trylock()
1451 locked = __mutex_trylock(lock); in mutex_trylock()
1454 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in mutex_trylock()
1463 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument
1467 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock()
1469 ww_mutex_set_context_fastpath(lock, ctx); in ww_mutex_lock()
1473 return __ww_mutex_lock_slowpath(lock, ctx); in ww_mutex_lock()
1478 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument
1482 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock_interruptible()
1484 ww_mutex_set_context_fastpath(lock, ctx); in ww_mutex_lock_interruptible()
1488 return __ww_mutex_lock_interruptible_slowpath(lock, ctx); in ww_mutex_lock_interruptible()
1501 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock() argument
1507 mutex_lock(lock); in atomic_dec_and_mutex_lock()
1510 mutex_unlock(lock); in atomic_dec_and_mutex_lock()