| /kernel/linux/linux-6.6/kernel/locking/ |
| D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init() 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 35 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 40 * Make sure we are not reinitializing a held lock: in __rwlock_init() [all …]
|
| D | rtmutex.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 17 * See Documentation/locking/rt-mutex-design.rst for details. 27 #include <trace/events/lock.h> 36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument 42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument 47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument 52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument [all …]
|
| D | rtmutex_api.c | 1 // SPDX-License-Identifier: GPL-2.0-only 17 * Debug aware fast / slowpath lock,trylock,unlock 22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, in __rt_mutex_lock_common() argument 30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); in __rt_mutex_lock_common() 31 ret = __rt_mutex_lock(&lock->rtmutex, state); in __rt_mutex_lock_common() 33 mutex_release(&lock->dep_map, _RET_IP_); in __rt_mutex_lock_common() 45 * rt_mutex_lock_nested - lock a rt_mutex 47 * @lock: the rt_mutex to be locked 50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) in rt_mutex_lock_nested() argument 52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); in rt_mutex_lock_nested() [all …]
|
| D | mutex.c | 1 // SPDX-License-Identifier: GPL-2.0-only 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 19 * Also see Documentation/locking/mutex-design.rst. 34 #include <trace/events/lock.h> 46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 48 atomic_long_set(&lock->owner, 0); in __mutex_init() 49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init() 50 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 52 osq_lock_init(&lock->osq); in __mutex_init() [all …]
|
| D | spinlock.c | 1 // SPDX-License-Identifier: GPL-2.0 10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) 33 * If lockdep is enabled then we use the non-preemption spin-ops 35 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 46 * Some architectures can relax in favour of the CPU owning the lock. 63 * This could be a long-held lock. We both prepare to spin for a long 65 * towards that other CPU that it should break the lock ASAP. 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ [all …]
|
| /kernel/linux/linux-5.10/kernel/locking/ |
| D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init() 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 39 * Make sure we are not reinitializing a held lock: in __rwlock_init() [all …]
|
| D | rtmutex.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 12 * See Documentation/locking/rt-mutex-design.rst for details. 26 * lock->owner state tracking: 28 * lock->owner holds the task_struct pointer of the owner. Bit 0 29 * is used to keep track of the "lock has waiters" state. 32 * NULL 0 lock is free (fast acquire possible) 33 * NULL 1 lock is free and has waiters and the top waiter [all …]
|
| D | mutex.c | 1 // SPDX-License-Identifier: GPL-2.0-only 14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline 15 * from the -rt tree, where it was originally implemented for rtmutexes 19 * Also see Documentation/locking/mutex-design.rst. 34 # include "mutex-debug.h" 40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 42 atomic_long_set(&lock->owner, 0); in __mutex_init() 43 spin_lock_init(&lock->wait_lock); in __mutex_init() 44 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 46 osq_lock_init(&lock->osq); in __mutex_init() [all …]
|
| D | spinlock.c | 1 // SPDX-License-Identifier: GPL-2.0 10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) 33 * If lockdep is enabled then we use the non-preemption spin-ops 35 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 46 * Some architectures can relax in favour of the CPU owning the lock. 63 * This could be a long-held lock. We both prepare to spin for a long 65 * towards that other CPU that it should break the lock ASAP. 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ [all …]
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 25 __acquires(lock); 26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) 27 __acquires(lock); [all …]
|
| D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
| D | spinlock.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 6 * include/linux/spinlock.h - generic spinlock/rwlock declarations 21 * (also included on UP-debug builds:) 32 * (which is an empty structure on non-debug builds) 39 * builds. (which are NOPs on non-debug, non-preempt 42 * (included on UP-non-debug builds:) 67 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 87 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 96 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 99 # define raw_spin_lock_init(lock) \ argument [all …]
|
| D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument 34 extern int do_raw_read_trylock(rwlock_t *lock); 35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); [all …]
|
| /kernel/linux/linux-6.6/include/linux/ |
| D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 24 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 25 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 26 __acquires(lock); 27 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) [all …]
|
| D | spinlock.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 7 * include/linux/spinlock.h - generic spinlock/rwlock declarations 24 * (also included on UP-debug builds:) 35 * (which is an empty structure on non-debug builds) 44 * builds. (which are NOPs on non-debug, non-preempt 47 * (included on UP-non-debug builds:) 72 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 92 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 101 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 104 # define raw_spin_lock_init(lock) \ argument [all …]
|
| D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
| D | spinlock_rt.h | 1 // SPDX-License-Identifier: GPL-2.0-only 10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, 13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, in __rt_spin_lock_init() argument 23 rt_mutex_base_init(&(slock)->lock); \ 31 rt_mutex_base_init(&(slock)->lock); \ 35 extern void rt_spin_lock(spinlock_t *lock); 36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); 37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); 38 extern void rt_spin_unlock(spinlock_t *lock); 39 extern void rt_spin_lock_unlock(spinlock_t *lock); [all …]
|
| D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 extern int do_raw_read_trylock(rwlock_t *lock); 34 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 35 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); 36 extern int do_raw_write_trylock(rwlock_t *lock); [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/ |
| D | ttm_lock.c | 1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 45 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument 47 spin_lock_init(&lock->lock); in ttm_lock_init() 48 init_waitqueue_head(&lock->queue); in ttm_lock_init() 49 lock->rw = 0; in ttm_lock_init() 50 lock->flags = 0; in ttm_lock_init() 53 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument [all …]
|
| D | ttm_lock.h | 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 33 * of the DRM heavyweight hardware lock. 34 * The lock is a read-write lock. Taking it in read mode and write mode 35 * is relatively fast, and intended for in-kernel use only. 38 * user-space processes from validating buffers. 39 * It's allowed to leave kernel space with the vt lock held. 40 * If a user-space process dies while having the vt-lock, 41 * it will be released during the file descriptor release. The vt lock [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/ |
| D | drm_lock.c | 50 * Take the heavyweight lock. 52 * \param lock lock pointer. 54 * \return one if the lock is held, or zero otherwise. 56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. 63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local 65 spin_lock_bh(&lock_data->spinlock); in drm_lock_take() 67 old = *lock; in drm_lock_take() 72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? in drm_lock_take() 75 prev = cmpxchg(lock, old, new); in drm_lock_take() 77 spin_unlock_bh(&lock_data->spinlock); in drm_lock_take() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/ |
| D | drm_lock.c | 50 * Take the heavyweight lock. 52 * \param lock lock pointer. 54 * \return one if the lock is held, or zero otherwise. 56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. 63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local 65 spin_lock_bh(&lock_data->spinlock); in drm_lock_take() 67 old = *lock; in drm_lock_take() 72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? in drm_lock_take() 75 prev = cmpxchg(lock, old, new); in drm_lock_take() 77 spin_unlock_bh(&lock_data->spinlock); in drm_lock_take() [all …]
|
| /kernel/linux/linux-5.10/fs/ocfs2/dlm/ |
| D | dlmast.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* -*- mode: c; c-basic-offset: 8; -*- 37 struct dlm_lock *lock); 38 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 41 * lock level will obsolete a pending bast. 42 * For example, if dlm_thread queued a bast for an EX lock that 44 * lock owner downconverted to NL, the bast is now obsolete. 46 * This is needed because the lock and convert paths can queue 47 * asts out-of-band (not waiting for dlm_thread) in order to 49 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument [all …]
|
| D | dlmlock.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* -*- mode: c; c-basic-offset: 8; -*- 7 * underlying calls for lock creation 47 struct dlm_lock *lock, int flags); 51 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 59 return -ENOMEM; in dlm_init_lock_cache() 68 /* Tell us whether we can grant a new lock request. 70 * caller needs: res->spinlock 73 * returns: 1 if the lock can be granted, 0 otherwise. 76 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument [all …]
|
| /kernel/linux/linux-6.6/fs/ocfs2/dlm/ |
| D | dlmast.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 35 struct dlm_lock *lock); 36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 39 * lock level will obsolete a pending bast. 40 * For example, if dlm_thread queued a bast for an EX lock that 42 * lock owner downconverted to NL, the bast is now obsolete. 44 * This is needed because the lock and convert paths can queue 45 * asts out-of-band (not waiting for dlm_thread) in order to 47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 49 assert_spin_locked(&dlm->ast_lock); in dlm_should_cancel_bast() [all …]
|