/kernel/locking/ |
D | qspinlock_paravirt.h | 91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock() 121 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 302 if (READ_ONCE(node->locked)) in pv_wait_node() 322 if (!READ_ONCE(node->locked)) { in pv_wait_node() 343 !READ_ONCE(node->locked)); in pv_wait_node() 391 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node() 456 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock() 462 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock() 470 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock() 503 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument [all …]
|
D | osq_lock.c | 97 node->locked = 0; in osq_lock() 143 if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() || in osq_lock() 170 if (smp_load_acquire(&node->locked)) in osq_lock() 226 WRITE_ONCE(next->locked, 1); in osq_unlock() 232 WRITE_ONCE(next->locked, 1); in osq_unlock()
|
D | qspinlock.c | 264 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in set_locked() 383 smp_cond_load_acquire(&lock->locked, !VAL); in queued_spin_lock_slowpath() 437 node->locked = 0; in queued_spin_lock_slowpath() 477 arch_mcs_spin_lock_contended(&node->locked); in queued_spin_lock_slowpath() 512 goto locked; in queued_spin_lock_slowpath() 516 locked: in queued_spin_lock_slowpath() 556 arch_mcs_spin_unlock_contended(&next->locked); in queued_spin_lock_slowpath()
|
D | mcs_spinlock.h | 20 int locked; /* 1 if lock acquired */ member 70 node->locked = 0; in mcs_spin_lock() 94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock()
|
D | mutex.c | 1112 bool locked; in mutex_trylock() local 1116 locked = __mutex_trylock(lock); in mutex_trylock() 1117 if (locked) { in mutex_trylock() 1122 return locked; in mutex_trylock()
|
D | lockdep.c | 6401 int locked; in lockdep_reset_lock_reg() local 6405 locked = graph_lock(); in lockdep_reset_lock_reg() 6406 if (!locked) in lockdep_reset_lock_reg()
|
/kernel/bpf/ |
D | local_storage.c | 68 void *key, bool locked) in cgroup_storage_lookup() argument 73 if (!locked) in cgroup_storage_lookup() 90 if (!locked) in cgroup_storage_lookup() 96 if (!locked) in cgroup_storage_lookup()
|
D | hashtab.c | 1697 bool locked = false; in __htab_map_lookup_and_delete_batch() local 1757 if (locked) { in __htab_map_lookup_and_delete_batch() 1770 if (bucket_cnt && !locked) { in __htab_map_lookup_and_delete_batch() 1771 locked = true; in __htab_map_lookup_and_delete_batch() 1801 if (!locked) in __htab_map_lookup_and_delete_batch() 1855 locked = false; in __htab_map_lookup_and_delete_batch()
|
/kernel/futex/ |
D | requeue.c | 125 static inline void futex_requeue_pi_complete(struct futex_q *q, int locked) in futex_requeue_pi_complete() argument 134 if (locked >= 0) { in futex_requeue_pi_complete() 138 new = Q_REQUEUE_PI_DONE + locked; in futex_requeue_pi_complete()
|
D | pi.c | 884 int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked) in fixup_pi_owner() argument 886 if (locked) { in fixup_pi_owner()
|
D | futex.h | 233 extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked);
|
/kernel/cgroup/ |
D | cgroup-internal.h | 252 bool *locked, 255 void cgroup_procs_write_finish(struct task_struct *task, bool locked)
|
D | cgroup-v1.c | 497 bool locked; in __cgroup1_procs_write() local 503 task = cgroup_procs_write_start(buf, threadgroup, &locked, cgrp); in __cgroup1_procs_write() 528 cgroup_procs_write_finish(task, locked); in __cgroup1_procs_write()
|
/kernel/trace/ |
D | trace_events_user.c | 255 static void user_event_put(struct user_event *user, bool locked) in user_event_put() argument 273 if (!locked) { in user_event_put() 326 if (!locked) in user_event_put() 382 bool locked) in user_event_enabler_destroy() argument 387 user_event_put(enabler->event, locked); in user_event_enabler_destroy()
|
D | ring_buffer.c | 5020 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument 5022 if (likely(locked)) in rb_reader_unlock()
|
/kernel/power/ |
D | Kconfig | 232 locked up attempting to suspend/resume a device.
|
/kernel/events/ |
D | core.c | 6444 unsigned long locked, lock_limit; in perf_mmap() local 6592 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; in perf_mmap() 6594 if ((locked > lock_limit) && perf_is_paranoid() && in perf_mmap()
|