Home
last modified time | relevance | path

Searched full:lock (Results 1 – 25 of 7694) sorted by relevance

12345678910>>...308

/kernel/linux/linux-4.19/include/linux/
Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
[all …]
Dspinlock_api_up.h19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument
24 * flags straight, to suppress compiler warnings of unused lock
27 #define ___LOCK(lock) \ argument
28 do { __acquire(lock); (void)(lock); } while (0)
30 #define __LOCK(lock) \ argument
31 do { preempt_disable(); ___LOCK(lock); } while (0)
33 #define __LOCK_BH(lock) \ argument
34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
36 #define __LOCK_IRQ(lock) \ argument
37 do { local_irq_disable(); __LOCK(lock); } while (0)
[all …]
Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
[all …]
Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
Dspinlock.h65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
96 # define raw_spin_lock_init(lock) \ argument
100 __raw_spin_lock_init((lock), #lock, &__key); \
104 # define raw_spin_lock_init(lock) \ argument
105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
108 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument
111 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument
113 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument
118 * between program-order earlier lock acquisitions and program-order later
[all …]
/kernel/linux/linux-5.10/include/linux/
Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
[all …]
Dspinlock_api_up.h19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument
24 * flags straight, to suppress compiler warnings of unused lock
27 #define ___LOCK(lock) \ argument
28 do { __acquire(lock); (void)(lock); } while (0)
30 #define __LOCK(lock) \ argument
31 do { preempt_disable(); ___LOCK(lock); } while (0)
33 #define __LOCK_BH(lock) \ argument
34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
36 #define __LOCK_IRQ(lock) \ argument
37 do { local_irq_disable(); __LOCK(lock); } while (0)
[all …]
Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
[all …]
Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
Dspinlock.h67 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
96 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
99 # define raw_spin_lock_init(lock) \ argument
103 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
107 # define raw_spin_lock_init(lock) \ argument
108 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
111 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument
116 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument
121 * between program-order earlier lock acquisitions and program-order later
[all …]
Dlocal_lock.h8 * local_lock_init - Runtime initialize a lock instance
10 #define local_lock_init(lock) __local_lock_init(lock) argument
13 * local_lock - Acquire a per CPU local lock
14 * @lock: The lock variable
16 #define local_lock(lock) __local_lock(lock) argument
19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts
20 * @lock: The lock variable
22 #define local_lock_irq(lock) __local_lock_irq(lock) argument
25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable
27 * @lock: The lock variable
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/ttm/
Dttm_lock.c46 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument
48 spin_lock_init(&lock->lock); in ttm_lock_init()
49 init_waitqueue_head(&lock->queue); in ttm_lock_init()
50 lock->rw = 0; in ttm_lock_init()
51 lock->flags = 0; in ttm_lock_init()
52 lock->kill_takers = false; in ttm_lock_init()
53 lock->signal = SIGKILL; in ttm_lock_init()
57 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument
59 spin_lock(&lock->lock); in ttm_read_unlock()
60 if (--lock->rw == 0) in ttm_read_unlock()
[all …]
/kernel/linux/linux-5.10/kernel/locking/
Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
39 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
Drtmutex.c26 * lock->owner state tracking:
28 * lock->owner holds the task_struct pointer of the owner. Bit 0
29 * is used to keep track of the "lock has waiters" state.
32 * NULL 0 lock is free (fast acquire possible)
33 * NULL 1 lock is free and has waiters and the top waiter
34 * is going to take the lock*
35 * taskpointer 0 lock is held (fast release possible)
36 * taskpointer 1 lock is held and has waiters**
39 * possible when bit 0 of lock->owner is 0.
41 * (*) It also can be a transitional state when grabbing the lock
[all …]
Dspinlock.c35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
46 * Some architectures can relax in favour of the CPU owning the lock.
63 * This could be a long-held lock. We both prepare to spin for a long
65 * towards that other CPU that it should break the lock ASAP.
68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
72 if (likely(do_raw_##op##_trylock(lock))) \
76 arch_##op##_relax(&lock->raw_lock); \
80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
87 if (likely(do_raw_##op##_trylock(lock))) \
92 arch_##op##_relax(&lock->raw_lock); \
[all …]
Dmutex.c40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
42 atomic_long_set(&lock->owner, 0); in __mutex_init()
43 spin_lock_init(&lock->wait_lock); in __mutex_init()
44 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
46 osq_lock_init(&lock->osq); in __mutex_init()
49 debug_mutex_init(lock, name, key); in __mutex_init()
54 * @owner: contains: 'struct task_struct *' to the current lock owner,
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
73 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner()
[all …]
/kernel/linux/linux-4.19/kernel/locking/
Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map(&lock->dep_map, name, key, 0); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
39 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
Drtmutex.c25 * lock->owner state tracking:
27 * lock->owner holds the task_struct pointer of the owner. Bit 0
28 * is used to keep track of the "lock has waiters" state.
31 * NULL 0 lock is free (fast acquire possible)
32 * NULL 1 lock is free and has waiters and the top waiter
33 * is going to take the lock*
34 * taskpointer 0 lock is held (fast release possible)
35 * taskpointer 1 lock is held and has waiters**
38 * possible when bit 0 of lock->owner is 0.
40 * (*) It also can be a transitional state when grabbing the lock
[all …]
Dspinlock.c28 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
39 * Some architectures can relax in favour of the CPU owning the lock.
56 * This could be a long-held lock. We both prepare to spin for a long
58 * towards that other CPU that it should break the lock ASAP.
61 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
65 if (likely(do_raw_##op##_trylock(lock))) \
69 arch_##op##_relax(&lock->raw_lock); \
73 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
80 if (likely(do_raw_##op##_trylock(lock))) \
85 arch_##op##_relax(&lock->raw_lock); \
[all …]
Dmutex.c39 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
41 atomic_long_set(&lock->owner, 0); in __mutex_init()
42 spin_lock_init(&lock->wait_lock); in __mutex_init()
43 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
45 osq_lock_init(&lock->osq); in __mutex_init()
48 debug_mutex_init(lock, name, key); in __mutex_init()
53 * @owner: contains: 'struct task_struct *' to the current lock owner,
58 * Bit1 indicates unlock needs to hand the lock to the top-waiter
80 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) in __mutex_trylock_or_owner() argument
84 owner = atomic_long_read(&lock->owner); in __mutex_trylock_or_owner()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/
Dttm_lock.c45 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument
47 spin_lock_init(&lock->lock); in ttm_lock_init()
48 init_waitqueue_head(&lock->queue); in ttm_lock_init()
49 lock->rw = 0; in ttm_lock_init()
50 lock->flags = 0; in ttm_lock_init()
53 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument
55 spin_lock(&lock->lock); in ttm_read_unlock()
56 if (--lock->rw == 0) in ttm_read_unlock()
57 wake_up_all(&lock->queue); in ttm_read_unlock()
58 spin_unlock(&lock->lock); in ttm_read_unlock()
[all …]
Dttm_lock.h33 * of the DRM heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode and write mode
39 * It's allowed to leave kernel space with the vt lock held.
40 * If a user-space process dies while having the vt-lock,
41 * it will be released during the file descriptor release. The vt lock
42 * excludes write lock and read lock.
44 * The suspend mode is used to lock out all TTM users when preparing for
60 * @base: ttm base object used solely to release the lock if the client
61 * holding the lock dies.
62 * @queue: Queue for processes waiting for lock change-of-status.
[all …]
/kernel/linux/linux-4.19/include/drm/ttm/
Dttm_lock.h33 * of the DRM heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode and write mode
39 * It's allowed to leave kernel space with the vt lock held.
40 * If a user-space process dies while having the vt-lock,
41 * it will be released during the file descriptor release. The vt lock
42 * excludes write lock and read lock.
44 * The suspend mode is used to lock out all TTM users when preparing for
60 * @base: ttm base object used solely to release the lock if the client
61 * holding the lock dies.
62 * @queue: Queue for processes waiting for lock change-of-status.
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_lock.c50 * Take the heavyweight lock.
52 * \param lock lock pointer.
54 * \return one if the lock is held, or zero otherwise.
56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
67 old = *lock; in drm_lock_take()
75 prev = cmpxchg(lock, old, new); in drm_lock_take()
82 DRM_ERROR("%d holds heavyweight lock\n", in drm_lock_take()
90 /* Have lock */ in drm_lock_take()
97 * This takes a lock forcibly and hands it to context. Should ONLY be used
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/
Ddrm_lock.c46 * Take the heavyweight lock.
48 * \param lock lock pointer.
50 * \return one if the lock is held, or zero otherwise.
52 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
59 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
63 old = *lock; in drm_lock_take()
71 prev = cmpxchg(lock, old, new); in drm_lock_take()
78 DRM_ERROR("%d holds heavyweight lock\n", in drm_lock_take()
86 /* Have lock */ in drm_lock_take()
93 * This takes a lock forcibly and hands it to context. Should ONLY be used
[all …]

12345678910>>...308