Home
last modified time | relevance | path

Searched refs:lock (Results 1 – 25 of 98) sorted by relevance

1234

/kernel/locking/
Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map(&lock->dep_map, name, key, 0); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __rwlock_init()
42 lockdep_init_map(&lock->dep_map, name, key, 0); in __rwlock_init()
[all …]
Dmutex.c40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
42 atomic_long_set(&lock->owner, 0); in __mutex_init()
43 spin_lock_init(&lock->wait_lock); in __mutex_init()
44 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
46 osq_lock_init(&lock->osq); in __mutex_init()
49 debug_mutex_init(lock, name, key); in __mutex_init()
73 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner()
83 bool mutex_is_locked(struct mutex *lock) in mutex_is_locked() argument
85 return __mutex_owner(lock) != NULL; in mutex_is_locked()
[all …]
Drtmutex.c53 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
57 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner()
60 lock->owner = (struct task_struct *)val; in rt_mutex_set_owner()
63 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) in clear_rt_mutex_waiters() argument
65 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
66 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
69 static void fixup_rt_mutex_waiters(struct rt_mutex *lock) in fixup_rt_mutex_waiters() argument
71 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
73 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
153 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument
[all …]
Dspinlock.c68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
72 if (likely(do_raw_##op##_trylock(lock))) \
76 arch_##op##_relax(&lock->raw_lock); \
80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
87 if (likely(do_raw_##op##_trylock(lock))) \
92 arch_##op##_relax(&lock->raw_lock); \
98 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
100 _raw_##op##_lock_irqsave(lock); \
103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
112 flags = _raw_##op##_lock_irqsave(lock); \
[all …]
Dqspinlock_paravirt.h81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) in pv_hybrid_queued_unfair_trylock() argument
88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock()
91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock()
109 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
111 WRITE_ONCE(lock->pending, 1); in set_pending()
119 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument
121 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
122 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, in trylock_clear_pending()
126 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
128 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending()
[all …]
Dqspinlock.c147 static __always_inline void clear_pending(struct qspinlock *lock) in clear_pending() argument
149 WRITE_ONCE(lock->pending, 0); in clear_pending()
160 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) in clear_pending_set_locked() argument
162 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); in clear_pending_set_locked()
175 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument
181 return (u32)xchg_relaxed(&lock->tail, in xchg_tail()
193 static __always_inline void clear_pending(struct qspinlock *lock) in clear_pending() argument
195 atomic_andnot(_Q_PENDING_VAL, &lock->val); in clear_pending()
204 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) in clear_pending_set_locked() argument
206 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); in clear_pending_set_locked()
[all …]
Drtmutex-debug.c43 static void printk_lock(struct rt_mutex *lock, int print_owner) in printk_lock() argument
45 if (lock->name) in printk_lock()
47 lock, lock->name); in printk_lock()
50 lock, lock->file, lock->line); in printk_lock()
52 if (print_owner && rt_mutex_owner(lock)) { in printk_lock()
53 printk(".. ->owner: %p\n", lock->owner); in printk_lock()
55 printk_task(rt_mutex_owner(lock)); in printk_lock()
73 struct rt_mutex *lock) in debug_rt_mutex_deadlock() argument
80 task = rt_mutex_owner(act_waiter->lock); in debug_rt_mutex_deadlock()
83 act_waiter->deadlock_lock = lock; in debug_rt_mutex_deadlock()
[all …]
Dmutex-debug.c30 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument
37 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_wake_waiter() argument
39 lockdep_assert_held(&lock->wait_lock); in debug_mutex_wake_waiter()
40 DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); in debug_mutex_wake_waiter()
51 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in debug_mutex_add_waiter() argument
54 lockdep_assert_held(&lock->wait_lock); in debug_mutex_add_waiter()
60 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, in mutex_remove_waiter() argument
72 void debug_mutex_unlock(struct mutex *lock) in debug_mutex_unlock() argument
75 DEBUG_LOCKS_WARN_ON(lock->magic != lock); in debug_mutex_unlock()
76 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); in debug_mutex_unlock()
[all …]
Drtmutex_common.h31 struct rt_mutex *lock; member
47 static inline int rt_mutex_has_waiters(struct rt_mutex *lock) in rt_mutex_has_waiters() argument
49 return !RB_EMPTY_ROOT(&lock->waiters.rb_root); in rt_mutex_has_waiters()
53 rt_mutex_top_waiter(struct rt_mutex *lock) in rt_mutex_top_waiter() argument
55 struct rb_node *leftmost = rb_first_cached(&lock->waiters); in rt_mutex_top_waiter()
60 BUG_ON(w->lock != lock); in rt_mutex_top_waiter()
79 static inline int rt_mutex_has_waiters(struct rt_mutex *lock) in rt_mutex_has_waiters() argument
85 rt_mutex_top_waiter(struct rt_mutex *lock) in rt_mutex_top_waiter() argument
108 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) in rt_mutex_owner() argument
110 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); in rt_mutex_owner()
[all …]
Dqrwlock.c21 void queued_read_lock_slowpath(struct qrwlock *lock) in queued_read_lock_slowpath() argument
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath()
36 atomic_sub(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath()
41 arch_spin_lock(&lock->wait_lock); in queued_read_lock_slowpath()
42 atomic_add(_QR_BIAS, &lock->cnts); in queued_read_lock_slowpath()
49 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); in queued_read_lock_slowpath()
54 arch_spin_unlock(&lock->wait_lock); in queued_read_lock_slowpath()
62 void queued_write_lock_slowpath(struct qrwlock *lock) in queued_write_lock_slowpath() argument
65 arch_spin_lock(&lock->wait_lock); in queued_write_lock_slowpath()
68 if (!atomic_read(&lock->cnts) && in queued_write_lock_slowpath()
[all …]
Dsemaphore.c57 raw_spin_lock_irqsave(&sem->lock, flags); in down()
62 raw_spin_unlock_irqrestore(&sem->lock, flags); in down()
80 raw_spin_lock_irqsave(&sem->lock, flags); in down_interruptible()
85 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_interruptible()
106 raw_spin_lock_irqsave(&sem->lock, flags); in down_killable()
111 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_killable()
135 raw_spin_lock_irqsave(&sem->lock, flags); in down_trylock()
139 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_trylock()
160 raw_spin_lock_irqsave(&sem->lock, flags); in down_timeout()
165 raw_spin_unlock_irqrestore(&sem->lock, flags); in down_timeout()
[all …]
Dlockdep.c659 static void print_lockdep_cache(struct lockdep_map *lock) in print_lockdep_cache() argument
664 name = lock->name; in print_lockdep_cache()
666 name = __get_key_name(lock->key->subkeys, str); in print_lockdep_cache()
683 struct lock_class *lock = hlock_class(hlock); in print_lock() local
685 if (!lock) { in print_lock()
691 print_lock_name(lock); in print_lock()
791 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) in look_up_lock_class() argument
811 if (unlikely(!lock->key)) in look_up_lock_class()
823 key = lock->key->subkeys + subclass; in look_up_lock_class()
839 WARN_ON_ONCE(class->name != lock->name && in look_up_lock_class()
[all …]
Dosq_lock.c42 osq_wait_next(struct optimistic_spin_queue *lock, in osq_wait_next() argument
58 if (atomic_read(&lock->tail) == curr && in osq_wait_next()
59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next()
90 bool osq_lock(struct optimistic_spin_queue *lock) in osq_lock() argument
107 old = atomic_xchg(&lock->tail, curr); in osq_lock()
188 next = osq_wait_next(lock, node, prev); in osq_lock()
206 void osq_unlock(struct optimistic_spin_queue *lock) in osq_unlock() argument
214 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock()
228 next = osq_wait_next(lock, node, NULL); in osq_unlock()
Dmutex.h13 #define mutex_remove_waiter(lock, waiter, task) \ argument
16 #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) argument
18 #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) argument
19 #define debug_mutex_unlock(lock) do { } while (0) argument
20 #define debug_mutex_init(lock, name, key) do { } while (0) argument
23 debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument
Dmutex-debug.h17 extern void debug_mutex_lock_common(struct mutex *lock,
19 extern void debug_mutex_wake_waiter(struct mutex *lock,
22 extern void debug_mutex_add_waiter(struct mutex *lock,
25 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
27 extern void debug_mutex_unlock(struct mutex *lock);
28 extern void debug_mutex_init(struct mutex *lock, const char *name,
Drtmutex-debug.h15 extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key…
16 extern void debug_rt_mutex_lock(struct rt_mutex *lock);
17 extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
18 extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
20 extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
23 struct rt_mutex *lock);
/kernel/sched/
Dwait.c11 spin_lock_init(&wq_head->lock); in __init_waitqueue_head()
12 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head()
23 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue()
25 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue()
34 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive()
36 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive()
44 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue()
46 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue()
73 lockdep_assert_held(&wq_head->lock); in __wake_up_common()
122 spin_lock_irqsave(&wq_head->lock, flags); in __wake_up_common_lock()
[all …]
Dswait.c10 raw_spin_lock_init(&q->lock); in __init_swait_queue_head()
11 lockdep_set_class_and_name(&q->lock, key, name); in __init_swait_queue_head()
39 raw_spin_lock_irqsave(&q->lock, flags); in swake_up_one()
41 raw_spin_unlock_irqrestore(&q->lock, flags); in swake_up_one()
54 raw_spin_lock_irq(&q->lock); in swake_up_all()
65 raw_spin_unlock_irq(&q->lock); in swake_up_all()
66 raw_spin_lock_irq(&q->lock); in swake_up_all()
68 raw_spin_unlock_irq(&q->lock); in swake_up_all()
83 raw_spin_lock_irqsave(&q->lock, flags); in prepare_to_swait_exclusive()
86 raw_spin_unlock_irqrestore(&q->lock, flags); in prepare_to_swait_exclusive()
[all …]
Dsched.h280 raw_spinlock_t lock; member
333 raw_spinlock_t lock; member
529 raw_spinlock_t lock ____cacheline_aligned;
723 raw_spinlock_t lock; member
859 raw_spinlock_t lock; member
1106 lockdep_assert_held(&rq->lock); in rq_clock()
1114 lockdep_assert_held(&rq->lock); in rq_clock_task()
1122 lockdep_assert_held(&rq->lock); in rq_clock_skip_update()
1132 lockdep_assert_held(&rq->lock); in rq_clock_cancel_skipupdate()
1151 rf->cookie = lockdep_pin_lock(&rq->lock); in rq_pin_lock()
[all …]
Dcompletion.c32 spin_lock_irqsave(&x->wait.lock, flags); in complete()
37 spin_unlock_irqrestore(&x->wait.lock, flags); in complete()
61 spin_lock_irqsave(&x->wait.lock, flags); in complete_all()
64 spin_unlock_irqrestore(&x->wait.lock, flags); in complete_all()
82 spin_unlock_irq(&x->wait.lock); in do_wait_for_common()
84 spin_lock_irq(&x->wait.lock); in do_wait_for_common()
103 spin_lock_irq(&x->wait.lock); in __wait_for_common()
105 spin_unlock_irq(&x->wait.lock); in __wait_for_common()
294 spin_lock_irqsave(&x->wait.lock, flags); in try_wait_for_completion()
299 spin_unlock_irqrestore(&x->wait.lock, flags); in try_wait_for_completion()
[all …]
/kernel/bpf/
Dhelpers.c221 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() argument
223 arch_spinlock_t *l = (void *)lock; in __bpf_spin_lock()
226 arch_spinlock_t lock; in __bpf_spin_lock() member
227 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; in __bpf_spin_lock()
231 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); in __bpf_spin_lock()
235 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) in __bpf_spin_unlock() argument
237 arch_spinlock_t *l = (void *)lock; in __bpf_spin_unlock()
244 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() argument
246 atomic_t *l = (void *)lock; in __bpf_spin_lock()
248 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); in __bpf_spin_lock()
[all …]
/kernel/irq/
Dautoprobe.c46 raw_spin_lock_irq(&desc->lock); in probe_irq_on()
57 raw_spin_unlock_irq(&desc->lock); in probe_irq_on()
69 raw_spin_lock_irq(&desc->lock); in probe_irq_on()
75 raw_spin_unlock_irq(&desc->lock); in probe_irq_on()
87 raw_spin_lock_irq(&desc->lock); in probe_irq_on()
98 raw_spin_unlock_irq(&desc->lock); in probe_irq_on()
124 raw_spin_lock_irq(&desc->lock); in probe_irq_mask()
132 raw_spin_unlock_irq(&desc->lock); in probe_irq_mask()
163 raw_spin_lock_irq(&desc->lock); in probe_irq_off()
174 raw_spin_unlock_irq(&desc->lock); in probe_irq_off()
/kernel/
Dpadata.c69 spin_lock(&pqueue->parallel.lock); in padata_parallel_worker()
71 spin_unlock(&pqueue->parallel.lock); in padata_parallel_worker()
147 spin_lock(&queue->parallel.lock); in padata_do_parallel()
149 spin_unlock(&queue->parallel.lock); in padata_do_parallel()
183 spin_lock(&reorder->lock); in padata_find_next()
185 spin_unlock(&reorder->lock); in padata_find_next()
196 spin_unlock(&reorder->lock); in padata_find_next()
207 spin_unlock(&reorder->lock); in padata_find_next()
229 if (!spin_trylock_bh(&pd->lock)) in padata_reorder()
246 spin_lock(&squeue->serial.lock); in padata_reorder()
[all …]
Ddelayacct.c38 raw_spin_lock_init(&tsk->delays->lock); in __delayacct_tsk_init()
45 static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, in delayacct_end() argument
52 raw_spin_lock_irqsave(lock, flags); in delayacct_end()
55 raw_spin_unlock_irqrestore(lock, flags); in delayacct_end()
82 delayacct_end(&delays->lock, &delays->blkio_start, total, count); in __delayacct_blkio_end()
122 raw_spin_lock_irqsave(&tsk->delays->lock, flags); in __delayacct_add_tsk()
135 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); in __delayacct_add_tsk()
145 raw_spin_lock_irqsave(&tsk->delays->lock, flags); in __delayacct_blkio_ticks()
148 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); in __delayacct_blkio_ticks()
160 &current->delays->lock, in __delayacct_freepages_end()
[all …]
Dkthread.c611 raw_spin_lock_init(&worker->lock); in __kthread_init_worker()
612 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker()
653 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
655 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
660 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
667 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
779 lockdep_assert_held(&worker->lock); in queuing_blocked()
787 lockdep_assert_held(&worker->lock); in kthread_insert_work_sanity_check()
824 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_work()
829 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_work()
[all …]

1234