| /include/linux/ |
| D | spinlock_api_up.h | 19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument 27 #define ___LOCK(lock) \ argument 28 do { __acquire(lock); (void)(lock); } while (0) 30 #define __LOCK(lock) \ argument 31 do { preempt_disable(); ___LOCK(lock); } while (0) 33 #define __LOCK_BH(lock) \ argument 34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) 36 #define __LOCK_IRQ(lock) \ argument 37 do { local_irq_disable(); __LOCK(lock); } while (0) 39 #define __LOCK_IRQSAVE(lock, flags) \ argument [all …]
|
| D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 extern int do_raw_read_trylock(rwlock_t *lock); 34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); 36 extern int do_raw_write_trylock(rwlock_t *lock); [all …]
|
| D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 26 __acquires(lock); 27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) [all …]
|
| D | spinlock.h | 101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 104 # define raw_spin_lock_init(lock) \ argument 108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 112 # define raw_spin_lock_init(lock) \ argument 113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 116 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument 119 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument 121 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument 180 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 181 extern int do_raw_spin_trylock(raw_spinlock_t *lock); [all …]
|
| D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
| D | spinlock_rt.h | 10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, 13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument 21 rt_mutex_base_init(&(slock)->lock); \ 34 extern void rt_spin_lock(spinlock_t *lock) __acquires(lock); 35 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); 36 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock… 37 extern void rt_spin_unlock(spinlock_t *lock) __releases(lock); 38 extern void rt_spin_lock_unlock(spinlock_t *lock); 39 extern int rt_spin_trylock_bh(spinlock_t *lock); 40 extern int rt_spin_trylock(spinlock_t *lock); [all …]
|
| D | spinlock_up.h | 29 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 31 lock->slock = 0; in arch_spin_lock() 35 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 37 char oldval = lock->slock; in arch_spin_trylock() 39 lock->slock = 0; in arch_spin_trylock() 45 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 48 lock->slock = 1; in arch_spin_unlock() 54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument 55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument 56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument [all …]
|
| D | local_lock_internal.h | 54 #define __local_lock_init(lock) \ argument 58 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 59 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 62 local_lock_debug_init(lock); \ 65 #define __spinlock_nested_bh_init(lock) \ argument 69 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 70 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 73 local_lock_debug_init(lock); \ 76 #define __local_lock(lock) \ argument 79 local_lock_acquire(this_cpu_ptr(lock)); \ [all …]
|
| D | mutex.h | 42 extern void mutex_destroy(struct mutex *lock); 48 static inline void mutex_destroy(struct mutex *lock) {} in mutex_destroy() argument 89 extern void __mutex_init(struct mutex *lock, const char *name, 98 extern bool mutex_is_locked(struct mutex *lock); 114 extern void __mutex_rt_init(struct mutex *lock, const char *name, 129 int __devm_mutex_init(struct device *dev, struct mutex *lock); 133 static inline int __devm_mutex_init(struct device *dev, struct mutex *lock) in __devm_mutex_init() argument 157 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 158 extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 160 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, [all …]
|
| D | local_lock.h | 10 #define local_lock_init(lock) __local_lock_init(lock) argument 16 #define local_lock(lock) __local_lock(lock) argument 22 #define local_lock_irq(lock) __local_lock_irq(lock) argument 30 #define local_lock_irqsave(lock, flags) \ argument 31 __local_lock_irqsave(lock, flags) 37 #define local_unlock(lock) __local_unlock(lock) argument 43 #define local_unlock_irq(lock) __local_unlock_irq(lock) argument 51 #define local_unlock_irqrestore(lock, flags) \ argument 52 __local_unlock_irqrestore(lock, flags) 61 local_lock_irqsave(_T->lock, _T->flags), [all …]
|
| D | rtmutex.h | 42 static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) in rt_mutex_base_is_locked() argument 44 return READ_ONCE(lock->owner) != NULL; in rt_mutex_base_is_locked() 98 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); 101 extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); 102 extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock); 103 #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) argument 104 #define rt_mutex_lock_nest_lock(lock, nest_lock) \ argument 107 _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 111 extern void rt_mutex_lock(struct rt_mutex *lock); 112 #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) argument [all …]
|
| D | lockdep.h | 90 extern void lockdep_reset_lock(struct lockdep_map *lock); 128 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 132 lockdep_init_map_waits(struct lockdep_map *lock, const char *name, in lockdep_init_map_waits() argument 135 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); in lockdep_init_map_waits() 139 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, in lockdep_init_map_wait() argument 142 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); in lockdep_init_map_wait() 145 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, in lockdep_init_map() argument 148 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); in lockdep_init_map() 157 #define lockdep_set_class(lock, key) \ argument 158 lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ [all …]
|
| D | seqlock.h | 127 __SEQ_LOCK(____s->lock = (_lock)); \ 130 #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) argument 131 #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) argument 132 #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) argument 133 #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) argument 166 __SEQ_LOCK(lockbase##_lock(s->lock)); \ 167 __SEQ_LOCK(lockbase##_unlock(s->lock)); \ 192 __SEQ_LOCK(lockdep_assert_held(s->lock)); \ 240 __SEQ_LOCK(.lock = (assoc_lock)) \ in SEQCOUNT_LOCKNAME() 243 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) argument [all …]
|
| D | osq_lock.h | 23 static inline void osq_lock_init(struct optimistic_spin_queue *lock) in osq_lock_init() argument 25 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); in osq_lock_init() 28 extern bool osq_lock(struct optimistic_spin_queue *lock); 29 extern void osq_unlock(struct optimistic_spin_queue *lock); 31 static inline bool osq_is_locked(struct optimistic_spin_queue *lock) in osq_is_locked() argument 33 return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; in osq_is_locked()
|
| D | rwlock_rt.h | 51 #define read_lock_irqsave(lock, flags) \ argument 54 rt_read_lock(lock); \ 58 #define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) argument 93 #define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock))) argument 107 #define write_lock_irqsave(lock, flags) \ argument 110 rt_write_lock(lock); \ 114 #define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) argument 116 #define write_trylock_irqsave(lock, flags) \ argument 122 __locked = write_trylock(lock); \ 148 #define rwlock_is_contended(lock) (((void)(lock), 0)) argument
|
| D | ww_mutex.h | 98 static inline void ww_mutex_init(struct ww_mutex *lock, in ww_mutex_init() argument 101 ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); in ww_mutex_init() 102 lock->ctx = NULL; in ww_mutex_init() 104 lock->ww_class = ww_class; in ww_mutex_init() 234 extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx); 266 extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, 293 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_slow() argument 299 ret = ww_mutex_lock(lock, ctx); in ww_mutex_lock_slow() 329 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, in ww_mutex_lock_slow_interruptible() argument 335 return ww_mutex_lock_interruptible(lock, ctx); in ww_mutex_lock_slow_interruptible() [all …]
|
| D | fs_struct.h | 11 spinlock_t lock; member 29 spin_lock(&fs->lock); in get_fs_root() 32 spin_unlock(&fs->lock); in get_fs_root() 37 spin_lock(&fs->lock); in get_fs_pwd() 40 spin_unlock(&fs->lock); in get_fs_pwd()
|
| /include/asm-generic/ |
| D | qrwlock.h | 36 extern void queued_read_lock_slowpath(struct qrwlock *lock); 37 extern void queued_write_lock_slowpath(struct qrwlock *lock); 44 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument 48 cnts = atomic_read(&lock->cnts); in queued_read_trylock() 50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock() 53 atomic_sub(_QR_BIAS, &lock->cnts); in queued_read_trylock() 63 static inline int queued_write_trylock(struct qrwlock *lock) in queued_write_trylock() argument 67 cnts = atomic_read(&lock->cnts); in queued_write_trylock() 71 return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, in queued_write_trylock() 78 static inline void queued_read_lock(struct qrwlock *lock) in queued_read_lock() argument [all …]
|
| D | spinlock.h | 33 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 35 u32 val = atomic_fetch_add(1<<16, lock); in arch_spin_lock() 49 atomic_cond_read_acquire(lock, ticket == (u16)VAL); in arch_spin_lock() 53 static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 55 u32 old = atomic_read(lock); in arch_spin_trylock() 60 return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ in arch_spin_trylock() 63 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 65 u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); in arch_spin_unlock() 66 u32 val = atomic_read(lock); in arch_spin_unlock() 71 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument [all …]
|
| D | qspinlock.h | 51 static __always_inline int queued_spin_is_locked(struct qspinlock *lock) in queued_spin_is_locked() argument 57 return atomic_read(&lock->val); in queued_spin_is_locked() 71 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) in queued_spin_value_unlocked() argument 73 return !lock.val.counter; in queued_spin_value_unlocked() 81 static __always_inline int queued_spin_is_contended(struct qspinlock *lock) in queued_spin_is_contended() argument 83 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; in queued_spin_is_contended() 90 static __always_inline int queued_spin_trylock(struct qspinlock *lock) in queued_spin_trylock() argument 92 int val = atomic_read(&lock->val); in queued_spin_trylock() 97 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); in queued_spin_trylock() 100 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); [all …]
|
| /include/trace/events/ |
| D | lock.h | 3 #define TRACE_SYSTEM lock 26 TP_PROTO(struct lockdep_map *lock, unsigned int subclass, 30 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), 34 __string(name, lock->name) 41 __entry->lockdep_addr = lock; 50 DECLARE_EVENT_CLASS(lock, 52 TP_PROTO(struct lockdep_map *lock, unsigned long ip), 54 TP_ARGS(lock, ip), 57 __string( name, lock->name ) 63 __entry->lockdep_addr = lock; [all …]
|
| /include/trace/hooks/ |
| D | dtask.h | 24 TP_PROTO(struct mutex *lock), 25 TP_ARGS(lock)); 27 TP_PROTO(struct mutex *lock), 28 TP_ARGS(lock)); 30 TP_PROTO(struct mutex *lock), 31 TP_ARGS(lock)); 33 TP_PROTO(struct mutex *lock, bool *time_out, int *cnt), 34 TP_ARGS(lock, time_out, cnt)); 36 TP_PROTO(struct mutex *lock, bool taken), 37 TP_ARGS(lock, taken)); [all …]
|
| /include/asm-generic/bitops/ |
| D | ext2-atomic.h | 9 #define ext2_set_bit_atomic(lock, nr, addr) \ argument 12 spin_lock(lock); \ 14 spin_unlock(lock); \ 18 #define ext2_clear_bit_atomic(lock, nr, addr) \ argument 21 spin_lock(lock); \ 23 spin_unlock(lock); \
|
| /include/acpi/platform/ |
| D | aclinuxex.h | 81 spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ 82 if (lock) { \ 83 *(__handle) = lock; \ 86 lock ? AE_OK : AE_NO_MEMORY; \ 92 raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ 93 if (lock) { \ 94 *(__handle) = lock; \ 97 lock ? AE_OK : AE_NO_MEMORY; \
|
| /include/drm/ |
| D | drm_modeset_lock.h | 106 void drm_modeset_lock_init(struct drm_modeset_lock *lock); 112 static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock) in drm_modeset_lock_fini() argument 114 WARN_ON(!list_empty(&lock->head)); in drm_modeset_lock_fini() 121 static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) in drm_modeset_is_locked() argument 123 return ww_mutex_is_locked(&lock->mutex); in drm_modeset_is_locked() 130 static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock) in drm_modeset_lock_assert_held() argument 132 lockdep_assert_held(&lock->mutex.base); in drm_modeset_lock_assert_held() 135 int drm_modeset_lock(struct drm_modeset_lock *lock, 137 int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock); 138 void drm_modeset_unlock(struct drm_modeset_lock *lock);
|