/kernel/locking/ |
D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __rwlock_init() 42 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG); in __rwlock_init() [all …]
|
D | mutex.c | 42 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 44 atomic_long_set(&lock->owner, 0); in __mutex_init() 45 spin_lock_init(&lock->wait_lock); in __mutex_init() 46 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 48 osq_lock_init(&lock->osq); in __mutex_init() 51 debug_mutex_init(lock, name, key); in __mutex_init() 75 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument 77 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner() 85 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument 87 return __mutex_owner(lock) != NULL; in mutex_is_locked() [all …]
|
D | rtmutex.c | 54 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) in rt_mutex_set_owner() argument 58 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner() 61 WRITE_ONCE(lock->owner, (struct task_struct *)val); in rt_mutex_set_owner() 64 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) in clear_rt_mutex_waiters() argument 66 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters() 67 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters() 70 static void fixup_rt_mutex_waiters(struct rt_mutex *lock) in fixup_rt_mutex_waiters() argument 72 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters() 74 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters() 153 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument [all …]
|
D | spinlock.c | 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ 80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 87 if (likely(do_raw_##op##_trylock(lock))) \ 92 arch_##op##_relax(&lock->raw_lock); \ 98 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ 100 _raw_##op##_lock_irqsave(lock); \ 103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ 112 flags = _raw_##op##_lock_irqsave(lock); \ [all …]
|
D | qspinlock_paravirt.h | 81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) in pv_hybrid_queued_unfair_trylock() argument 88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock() 91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock() 109 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 111 WRITE_ONCE(lock->pending, 1); in set_pending() 119 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument 121 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 122 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, in trylock_clear_pending() 126 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument 128 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending() [all …]
|
D | qspinlock.c | 148 static __always_inline void clear_pending(struct qspinlock *lock) in clear_pending() argument 150 WRITE_ONCE(lock->pending, 0); in clear_pending() 161 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) in clear_pending_set_locked() argument 163 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); in clear_pending_set_locked() 176 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument 182 return (u32)xchg_relaxed(&lock->tail, in xchg_tail() 194 static __always_inline void clear_pending(struct qspinlock *lock) in clear_pending() argument 196 atomic_andnot(_Q_PENDING_VAL, &lock->val); in clear_pending() 205 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) in clear_pending_set_locked() argument 207 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); in clear_pending_set_locked() [all …]
|
D | rtmutex-debug.c | 43 static void printk_lock(struct rt_mutex *lock, int print_owner) in printk_lock() argument 45 if (lock->name) in printk_lock() 47 lock, lock->name); in printk_lock() 50 lock, lock->file, lock->line); in printk_lock() 52 if (print_owner && rt_mutex_owner(lock)) { in printk_lock() 53 printk(".. ->owner: %p\n", lock->owner); in printk_lock() 55 printk_task(rt_mutex_owner(lock)); in printk_lock() 73 struct rt_mutex *lock) in debug_rt_mutex_deadlock() argument 80 task = rt_mutex_owner(act_waiter->lock); in debug_rt_mutex_deadlock() 83 act_waiter->deadlock_lock = lock; in debug_rt_mutex_deadlock() [all …]
|
D | mutex-debug.c | 30 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument 37 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_wake_waiter() argument 39 lockdep_assert_held(&lock->wait_lock); in debug_mutex_wake_waiter() 40 DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); in debug_mutex_wake_waiter() 51 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in debug_mutex_add_waiter() argument 54 lockdep_assert_held(&lock->wait_lock); in debug_mutex_add_waiter() 60 void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, in debug_mutex_remove_waiter() argument 72 void debug_mutex_unlock(struct mutex *lock) in debug_mutex_unlock() argument 75 DEBUG_LOCKS_WARN_ON(lock->magic != lock); in debug_mutex_unlock() 76 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); in debug_mutex_unlock() [all …]
|
D | rtmutex_common.h | 31 struct rt_mutex *lock; member 47 static inline int rt_mutex_has_waiters(struct rt_mutex *lock) in rt_mutex_has_waiters() argument 49 return !RB_EMPTY_ROOT(&lock->waiters.rb_root); in rt_mutex_has_waiters() 53 rt_mutex_top_waiter(struct rt_mutex *lock) in rt_mutex_top_waiter() argument 55 struct rb_node *leftmost = rb_first_cached(&lock->waiters); in rt_mutex_top_waiter() 60 BUG_ON(w->lock != lock); in rt_mutex_top_waiter() 79 static inline int rt_mutex_has_waiters(struct rt_mutex *lock) in rt_mutex_has_waiters() argument 85 rt_mutex_top_waiter(struct rt_mutex *lock) in rt_mutex_top_waiter() argument 108 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) in rt_mutex_owner() argument 110 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); in rt_mutex_owner() [all …]
|
D | qrwlock.c | 21 void queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument 33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath() 36 atomic_sub(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath() 41 arch_spin_lock(&lock->wait_lock); in queued_read_lock_slowpath() 42 atomic_add(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath() 49 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath() 54 arch_spin_unlock(&lock->wait_lock); in queued_read_lock_slowpath() 62 void queued_write_lock_slowpath(struct qrwlock *lock) in queued_write_lock_slowpath() argument 67 arch_spin_lock(&lock->wait_lock); in queued_write_lock_slowpath() 70 if (!atomic_read(&lock->cnts) && in queued_write_lock_slowpath() [all …]
|
D | lockdep.c | 711 static void print_lockdep_cache(struct lockdep_map *lock) in print_lockdep_cache() argument 716 name = lock->name; in print_lockdep_cache() 718 name = __get_key_name(lock->key->subkeys, str); in print_lockdep_cache() 735 struct lock_class *lock = hlock_class(hlock); in print_lock() local 737 if (!lock) { in print_lock() 743 print_lock_name(lock); in print_lock() 844 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) in look_up_lock_class() argument 866 if (unlikely(!lock->key)) in look_up_lock_class() 878 key = lock->key->subkeys + subclass; in look_up_lock_class() 894 WARN_ON_ONCE(class->name != lock->name && in look_up_lock_class() [all …]
|
D | semaphore.c | 57 raw_spin_lock_irqsave(&sem->lock, flags); in down() 62 raw_spin_unlock_irqrestore(&sem->lock, flags); in down() 80 raw_spin_lock_irqsave(&sem->lock, flags); in down_interruptible() 85 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_interruptible() 106 raw_spin_lock_irqsave(&sem->lock, flags); in down_killable() 111 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_killable() 135 raw_spin_lock_irqsave(&sem->lock, flags); in down_trylock() 139 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_trylock() 160 raw_spin_lock_irqsave(&sem->lock, flags); in down_timeout() 165 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_timeout() [all …]
|
D | osq_lock.c | 42 osq_wait_next(struct optimistic_spin_queue *lock, in osq_wait_next() argument 58 if (atomic_read(&lock->tail) == curr && in osq_wait_next() 59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next() 90 bool osq_lock(struct optimistic_spin_queue *lock) in osq_lock() argument 107 old = atomic_xchg(&lock->tail, curr); in osq_lock() 189 next = osq_wait_next(lock, node, prev); in osq_lock() 207 void osq_unlock(struct optimistic_spin_queue *lock) in osq_unlock() argument 215 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock() 229 next = osq_wait_next(lock, node, NULL); in osq_unlock()
|
D | mutex.h | 13 #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) argument 15 #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) argument 16 #define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0) argument 17 #define debug_mutex_unlock(lock) do { } while (0) argument 18 #define debug_mutex_init(lock, name, key) do { } while (0) argument 21 debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument
|
D | mutex-debug.h | 17 extern void debug_mutex_lock_common(struct mutex *lock, 19 extern void debug_mutex_wake_waiter(struct mutex *lock, 22 extern void debug_mutex_add_waiter(struct mutex *lock, 25 extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 27 extern void debug_mutex_unlock(struct mutex *lock); 28 extern void debug_mutex_init(struct mutex *lock, const char *name,
|
/kernel/bpf/ |
D | percpu_freelist.c | 17 raw_spin_lock_init(&head->lock); in pcpu_freelist_init() 20 raw_spin_lock_init(&s->extralist.lock); in pcpu_freelist_init() 40 raw_spin_lock(&head->lock); in ___pcpu_freelist_push() 42 raw_spin_unlock(&head->lock); in ___pcpu_freelist_push() 48 if (!raw_spin_trylock(&s->extralist.lock)) in pcpu_freelist_try_push_extra() 52 raw_spin_unlock(&s->extralist.lock); in pcpu_freelist_try_push_extra() 66 if (raw_spin_trylock(&head->lock)) { in ___pcpu_freelist_push_nmi() 68 raw_spin_unlock(&head->lock); in ___pcpu_freelist_push_nmi() 132 raw_spin_lock(&head->lock); in ___pcpu_freelist_pop() 136 raw_spin_unlock(&head->lock); in ___pcpu_freelist_pop() [all …]
|
D | helpers.c | 237 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() argument 239 arch_spinlock_t *l = (void *)lock; in __bpf_spin_lock() 242 arch_spinlock_t lock; in __bpf_spin_lock() member 243 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; in __bpf_spin_lock() 247 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); in __bpf_spin_lock() 251 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) in __bpf_spin_unlock() argument 253 arch_spinlock_t *l = (void *)lock; in __bpf_spin_unlock() 260 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() argument 262 atomic_t *l = (void *)lock; in __bpf_spin_lock() 264 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); in __bpf_spin_lock() [all …]
|
/kernel/sched/ |
D | wait.c | 12 spin_lock_init(&wq_head->lock); in __init_waitqueue_head() 13 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head() 24 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue() 26 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue() 35 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive() 37 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive() 45 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue() 47 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue() 74 lockdep_assert_held(&wq_head->lock); in __wake_up_common() 123 spin_lock_irqsave(&wq_head->lock, flags); in __wake_up_common_lock() [all …]
|
D | swait.c | 10 raw_spin_lock_init(&q->lock); in __init_swait_queue_head() 11 lockdep_set_class_and_name(&q->lock, key, name); in __init_swait_queue_head() 52 raw_spin_lock_irqsave(&q->lock, flags); in swake_up_one() 54 raw_spin_unlock_irqrestore(&q->lock, flags); in swake_up_one() 67 raw_spin_lock_irq(&q->lock); in swake_up_all() 78 raw_spin_unlock_irq(&q->lock); in swake_up_all() 79 raw_spin_lock_irq(&q->lock); in swake_up_all() 81 raw_spin_unlock_irq(&q->lock); in swake_up_all() 96 raw_spin_lock_irqsave(&q->lock, flags); in prepare_to_swait_exclusive() 99 raw_spin_unlock_irqrestore(&q->lock, flags); in prepare_to_swait_exclusive() [all …]
|
D | sched.h | 301 raw_spinlock_t lock; member 367 raw_spinlock_t lock; member 567 raw_spinlock_t lock ____cacheline_aligned; 919 raw_spinlock_t lock; member 1181 lockdep_assert_held(&rq->lock); in rq_clock() 1189 lockdep_assert_held(&rq->lock); in rq_clock_task() 1200 lockdep_assert_held(&rq->lock); in rq_clock_task_mult() 1232 lockdep_assert_held(&rq->lock); in rq_clock_skip_update() 1242 lockdep_assert_held(&rq->lock); in rq_clock_cancel_skipupdate() 1271 rf->cookie = lockdep_pin_lock(&rq->lock); in rq_pin_lock() [all …]
|
D | completion.c | 32 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete() 37 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete() 63 raw_spin_lock_irqsave(&x->wait.lock, flags); in complete_all() 66 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in complete_all() 84 raw_spin_unlock_irq(&x->wait.lock); in do_wait_for_common() 86 raw_spin_lock_irq(&x->wait.lock); in do_wait_for_common() 105 raw_spin_lock_irq(&x->wait.lock); in __wait_for_common() 107 raw_spin_unlock_irq(&x->wait.lock); in __wait_for_common() 296 raw_spin_lock_irqsave(&x->wait.lock, flags); in try_wait_for_completion() 301 raw_spin_unlock_irqrestore(&x->wait.lock, flags); in try_wait_for_completion() [all …]
|
/kernel/irq/ |
D | autoprobe.c | 46 raw_spin_lock_irq(&desc->lock); in probe_irq_on() 57 raw_spin_unlock_irq(&desc->lock); in probe_irq_on() 69 raw_spin_lock_irq(&desc->lock); in probe_irq_on() 75 raw_spin_unlock_irq(&desc->lock); in probe_irq_on() 87 raw_spin_lock_irq(&desc->lock); in probe_irq_on() 98 raw_spin_unlock_irq(&desc->lock); in probe_irq_on() 124 raw_spin_lock_irq(&desc->lock); in probe_irq_mask() 132 raw_spin_unlock_irq(&desc->lock); in probe_irq_mask() 163 raw_spin_lock_irq(&desc->lock); in probe_irq_off() 174 raw_spin_unlock_irq(&desc->lock); in probe_irq_off()
|
/kernel/ |
D | cpu_pm.c | 23 raw_spinlock_t lock; member 26 .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock), 53 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); in cpu_pm_notify_robust() 55 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); in cpu_pm_notify_robust() 75 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); in cpu_pm_register_notifier() 77 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); in cpu_pm_register_notifier() 95 raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); in cpu_pm_unregister_notifier() 97 raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); in cpu_pm_unregister_notifier()
|
D | delayacct.c | 38 raw_spin_lock_init(&tsk->delays->lock); in __delayacct_tsk_init() 45 static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, in delayacct_end() argument 52 raw_spin_lock_irqsave(lock, flags); in delayacct_end() 55 raw_spin_unlock_irqrestore(lock, flags); in delayacct_end() 82 delayacct_end(&delays->lock, &delays->blkio_start, total, count); in __delayacct_blkio_end() 122 raw_spin_lock_irqsave(&tsk->delays->lock, flags); in __delayacct_add_tsk() 135 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); in __delayacct_add_tsk() 145 raw_spin_lock_irqsave(&tsk->delays->lock, flags); in __delayacct_blkio_ticks() 148 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); in __delayacct_blkio_ticks() 160 ¤t->delays->lock, in __delayacct_freepages_end() [all …]
|
D | padata.c | 52 spinlock_t lock; member 262 spin_lock(&reorder->lock); in padata_find_next() 264 spin_unlock(&reorder->lock); in padata_find_next() 275 spin_unlock(&reorder->lock); in padata_find_next() 285 spin_unlock(&reorder->lock); in padata_find_next() 307 if (!spin_trylock_bh(&pd->lock)) in padata_reorder() 324 spin_lock(&squeue->serial.lock); in padata_reorder() 326 spin_unlock(&squeue->serial.lock); in padata_reorder() 331 spin_unlock_bh(&pd->lock); in padata_reorder() 369 spin_lock(&squeue->serial.lock); in padata_serial_worker() [all …]
|