/kernel/locking/ |
D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 35 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 42 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __rwlock_init() 43 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG); in __rwlock_init() [all …]
|
D | mutex.c | 49 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 51 atomic_long_set(&lock->owner, 0); in __mutex_init() 52 raw_spin_lock_init(&lock->wait_lock); in __mutex_init() 53 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 55 osq_lock_init(&lock->osq); in __mutex_init() 58 trace_android_vh_mutex_init(lock); in __mutex_init() 59 debug_mutex_init(lock, name, key); in __mutex_init() 83 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument 85 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner() 93 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument [all …]
|
D | rtmutex_api.c | 22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument 30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common() 31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common() 33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common() 52 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument 54 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested() 58 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock) in _rt_mutex_lock_nest_lock() argument 60 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0); in _rt_mutex_lock_nest_lock() 71 void __sched rt_mutex_lock(struct rt_mutex *lock) in rt_mutex_lock() argument 73 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0); in rt_mutex_lock() [all …]
|
D | rtmutex.c | 37 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument 43 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument 48 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument 53 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument 94 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_owner_encode() argument 98 if (rt_mutex_has_waiters(lock)) in rt_mutex_owner_encode() 105 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument 111 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner() 114 static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock) in rt_mutex_clear_owner() argument 117 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner() [all …]
|
D | spinlock.c | 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ 80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 87 if (likely(do_raw_##op##_trylock(lock))) \ 92 arch_##op##_relax(&lock->raw_lock); \ 98 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ 100 _raw_##op##_lock_irqsave(lock); \ 103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ 112 flags = _raw_##op##_lock_irqsave(lock); \ [all …]
|
D | ww_mutex.h | 9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument 13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_first() 14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first() 21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_next() argument 24 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_next() 31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) in __ww_waiter_prev() argument 34 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_prev() 41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument 45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); in __ww_waiter_last() 46 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_last() [all …]
|
D | qspinlock_paravirt.h | 81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) in pv_hybrid_queued_unfair_trylock() argument 88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock() 91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock() 109 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 111 WRITE_ONCE(lock->pending, 1); in set_pending() 119 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument 121 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 122 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, in trylock_clear_pending() 126 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 128 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending() [all …]
|
D | spinlock_rt.c | 44 static __always_inline void __rt_spin_lock(spinlock_t *lock) in __rt_spin_lock() argument 47 rtlock_lock(&lock->lock); in __rt_spin_lock() 52 void __sched rt_spin_lock(spinlock_t *lock) in rt_spin_lock() argument 54 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); in rt_spin_lock() 55 __rt_spin_lock(lock); in rt_spin_lock() 60 void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass) in rt_spin_lock_nested() argument 62 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); in rt_spin_lock_nested() 63 __rt_spin_lock(lock); in rt_spin_lock_nested() 67 void __sched rt_spin_lock_nest_lock(spinlock_t *lock, in rt_spin_lock_nest_lock() argument 70 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); in rt_spin_lock_nest_lock() [all …]
|
D | qspinlock.c | 149 static __always_inline void clear_pending(struct qspinlock *lock) in clear_pending() argument 151 WRITE_ONCE(lock->pending, 0); in clear_pending() 162 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) in clear_pending_set_locked() argument 164 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); in clear_pending_set_locked() 177 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument 183 return (u32)xchg_relaxed(&lock->tail, in xchg_tail() 195 static __always_inline void clear_pending(struct qspinlock *lock) in clear_pending() argument 197 atomic_andnot(_Q_PENDING_VAL, &lock->val); in clear_pending() 206 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) in clear_pending_set_locked() argument 208 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); in clear_pending_set_locked() [all …]
|
D | rtmutex_common.h | 37 struct rt_mutex_base *lock; member 64 extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, 66 extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock); 67 extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 70 extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 73 extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, 76 extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, 82 extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock); 83 extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock, 93 static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock) in rt_mutex_has_waiters() argument [all …]
|
D | qrwlock.c | 21 void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument 33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath() 36 atomic_sub(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath() 38 trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ); in queued_read_lock_slowpath() 43 arch_spin_lock(&lock->wait_lock); in queued_read_lock_slowpath() 44 atomic_add(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath() 51 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath() 56 arch_spin_unlock(&lock->wait_lock); in queued_read_lock_slowpath() 58 trace_contention_end(lock, 0); in queued_read_lock_slowpath() 66 void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock) in queued_write_lock_slowpath() argument [all …]
|
D | mutex-debug.c | 28 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument 36 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_wake_waiter() argument 38 lockdep_assert_held(&lock->wait_lock); in debug_mutex_wake_waiter() 39 DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); in debug_mutex_wake_waiter() 50 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in debug_mutex_add_waiter() argument 53 lockdep_assert_held(&lock->wait_lock); in debug_mutex_add_waiter() 59 void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, in debug_mutex_remove_waiter() argument 71 void debug_mutex_unlock(struct mutex *lock) in debug_mutex_unlock() argument 74 DEBUG_LOCKS_WARN_ON(lock->magic != lock); in debug_mutex_unlock() 75 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); in debug_mutex_unlock() [all …]
|
D | mutex.h | 24 extern void debug_mutex_lock_common(struct mutex *lock, 26 extern void debug_mutex_wake_waiter(struct mutex *lock, 29 extern void debug_mutex_add_waiter(struct mutex *lock, 32 extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 34 extern void debug_mutex_unlock(struct mutex *lock); 35 extern void debug_mutex_init(struct mutex *lock, const char *name, 38 # define debug_mutex_lock_common(lock, waiter) do { } while (0) argument 39 # define debug_mutex_wake_waiter(lock, waiter) do { } while (0) argument 41 # define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) argument 42 # define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0) argument [all …]
|
D | ww_rt_mutex.c | 12 int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) in ww_mutex_trylock() argument 14 struct rt_mutex *rtm = &lock->base; in ww_mutex_trylock() 28 ww_mutex_set_context_fastpath(lock, ww_ctx); in ww_mutex_trylock() 38 __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx, in __ww_rt_mutex_lock() argument 42 struct rt_mutex *rtm = &lock->base; in __ww_rt_mutex_lock() 48 if (unlikely(ww_ctx == READ_ONCE(lock->ctx))) in __ww_rt_mutex_lock() 67 ww_mutex_set_context_fastpath(lock, ww_ctx); in __ww_rt_mutex_lock() 79 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument 81 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); in ww_mutex_lock() 86 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument [all …]
|
D | lockdep.c | 743 static void print_lockdep_cache(struct lockdep_map *lock) in print_lockdep_cache() argument 748 name = lock->name; in print_lockdep_cache() 750 name = __get_key_name(lock->key->subkeys, str); in print_lockdep_cache() 767 struct lock_class *lock = hlock_class(hlock); in print_lock() local 769 if (!lock) { in print_lock() 775 print_lock_name(lock); in print_lock() 880 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) in look_up_lock_class() argument 902 if (unlikely(!lock->key)) in look_up_lock_class() 914 key = lock->key->subkeys + subclass; in look_up_lock_class() 930 WARN_ONCE(class->name != lock->name && in look_up_lock_class() [all …]
|
D | semaphore.c | 59 raw_spin_lock_irqsave(&sem->lock, flags); in down() 64 raw_spin_unlock_irqrestore(&sem->lock, flags); in down() 83 raw_spin_lock_irqsave(&sem->lock, flags); in down_interruptible() 88 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_interruptible() 110 raw_spin_lock_irqsave(&sem->lock, flags); in down_killable() 115 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_killable() 139 raw_spin_lock_irqsave(&sem->lock, flags); in down_trylock() 143 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_trylock() 165 raw_spin_lock_irqsave(&sem->lock, flags); in down_timeout() 170 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_timeout() [all …]
|
D | osq_lock.c | 42 osq_wait_next(struct optimistic_spin_queue *lock, in osq_wait_next() argument 58 if (atomic_read(&lock->tail) == curr && in osq_wait_next() 59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next() 90 bool osq_lock(struct optimistic_spin_queue *lock) in osq_lock() argument 107 old = atomic_xchg(&lock->tail, curr); in osq_lock() 189 next = osq_wait_next(lock, node, prev); in osq_lock() 207 void osq_unlock(struct optimistic_spin_queue *lock) in osq_unlock() argument 215 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock() 229 next = osq_wait_next(lock, node, NULL); in osq_unlock()
|
/kernel/sched/ |
D | wait.c | 11 spin_lock_init(&wq_head->lock); in __init_waitqueue_head() 12 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head() 23 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue() 25 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue() 34 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive() 36 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive() 45 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_priority() 47 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_priority() 55 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue() 57 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue() [all …]
|
D | swait.c | 9 raw_spin_lock_init(&q->lock); in __init_swait_queue_head() 10 lockdep_set_class_and_name(&q->lock, key, name); in __init_swait_queue_head() 51 raw_spin_lock_irqsave(&q->lock, flags); in swake_up_one() 53 raw_spin_unlock_irqrestore(&q->lock, flags); in swake_up_one() 66 raw_spin_lock_irq(&q->lock); in swake_up_all() 77 raw_spin_unlock_irq(&q->lock); in swake_up_all() 78 raw_spin_lock_irq(&q->lock); in swake_up_all() 80 raw_spin_unlock_irq(&q->lock); in swake_up_all() 95 raw_spin_lock_irqsave(&q->lock, flags); in prepare_to_swait_exclusive() 98 raw_spin_unlock_irqrestore(&q->lock, flags); in prepare_to_swait_exclusive() [all …]
|
D | completion.c | 32 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete() 37 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete() 63 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete_all() 66 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete_all() 84 raw_spin_unlock_irq(&x->wait.lock); in do_wait_for_common() 86 raw_spin_lock_irq(&x->wait.lock); in do_wait_for_common() 105 raw_spin_lock_irq(&x->wait.lock); in __wait_for_common() 107 raw_spin_unlock_irq(&x->wait.lock); in __wait_for_common() 308 raw_spin_lock_irqsave(&x->wait.lock, flags); in try_wait_for_completion() 313 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in try_wait_for_completion() [all …]
|
/kernel/bpf/ |
D | percpu_freelist.c | 17 raw_spin_lock_init(&head->lock); in pcpu_freelist_init() 20 raw_spin_lock_init(&s->extralist.lock); in pcpu_freelist_init() 40 raw_spin_lock(&head->lock); in ___pcpu_freelist_push() 42 raw_spin_unlock(&head->lock); in ___pcpu_freelist_push() 48 if (!raw_spin_trylock(&s->extralist.lock)) in pcpu_freelist_try_push_extra() 52 raw_spin_unlock(&s->extralist.lock); in pcpu_freelist_try_push_extra() 67 if (raw_spin_trylock(&head->lock)) { in ___pcpu_freelist_push_nmi() 69 raw_spin_unlock(&head->lock); in ___pcpu_freelist_push_nmi() 131 raw_spin_lock(&head->lock); in ___pcpu_freelist_pop() 135 raw_spin_unlock(&head->lock); in ___pcpu_freelist_pop() [all …]
|
/kernel/irq/ |
D | autoprobe.c | 46 raw_spin_lock_irq(&desc->lock); in probe_irq_on() 57 raw_spin_unlock_irq(&desc->lock); in probe_irq_on() 69 raw_spin_lock_irq(&desc->lock); in probe_irq_on() 75 raw_spin_unlock_irq(&desc->lock); in probe_irq_on() 87 raw_spin_lock_irq(&desc->lock); in probe_irq_on() 98 raw_spin_unlock_irq(&desc->lock); in probe_irq_on() 124 raw_spin_lock_irq(&desc->lock); in probe_irq_mask() 132 raw_spin_unlock_irq(&desc->lock); in probe_irq_mask() 163 raw_spin_lock_irq(&desc->lock); in probe_irq_off() 174 raw_spin_unlock_irq(&desc->lock); in probe_irq_off()
|
/kernel/ |
D | cpu_pm.c | 23 raw_spinlock_t lock; member 26 .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock), 53 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); in cpu_pm_notify_robust() 55 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); in cpu_pm_notify_robust() 75 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); in cpu_pm_register_notifier() 77 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); in cpu_pm_register_notifier() 95 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); in cpu_pm_unregister_notifier() 97 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); in cpu_pm_unregister_notifier()
|
D | kcov.c | 56 spinlock_t lock; member 92 local_lock_t lock; member 102 .lock = INIT_LOCAL_LOCK(lock), 442 spin_lock_irqsave(&kcov->lock, flags); in kcov_task_exit() 467 spin_unlock_irqrestore(&kcov->lock, flags); in kcov_task_exit() 472 spin_unlock_irqrestore(&kcov->lock, flags); in kcov_task_exit() 484 spin_lock_irqsave(&kcov->lock, flags); in kcov_mmap() 491 spin_unlock_irqrestore(&kcov->lock, flags); in kcov_mmap() 503 spin_unlock_irqrestore(&kcov->lock, flags); in kcov_mmap() 517 spin_lock_init(&kcov->lock); in kcov_open() [all …]
|
/kernel/trace/ |
D | pid_list.c | 16 lockdep_assert_held(&pid_list->lock); in get_lower_chunk() 40 lockdep_assert_held(&pid_list->lock); in get_upper_chunk() 63 lockdep_assert_held(&pid_list->lock); in put_lower_chunk() 73 lockdep_assert_held(&pid_list->lock); in put_upper_chunk() 145 raw_spin_lock_irqsave(&pid_list->lock, flags); in trace_pid_list_is_set() 152 raw_spin_unlock_irqrestore(&pid_list->lock, flags); in trace_pid_list_is_set() 184 raw_spin_lock_irqsave(&pid_list->lock, flags); in trace_pid_list_set() 206 raw_spin_unlock_irqrestore(&pid_list->lock, flags); in trace_pid_list_set() 236 raw_spin_lock_irqsave(&pid_list->lock, flags); in trace_pid_list_clear() 257 raw_spin_unlock_irqrestore(&pid_list->lock, flags); in trace_pid_list_clear() [all …]
|