Home
last modified time | relevance | path

Searched +full:lock +full:- +full:- +full:- +full:- (Results 1 – 25 of 1115) sorted by relevance

12345678910>>...45

/kernel/linux/linux-4.19/drivers/gpu/drm/ttm/
Dttm_lock.c1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
46 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument
48 spin_lock_init(&lock->lock); in ttm_lock_init()
49 init_waitqueue_head(&lock->queue); in ttm_lock_init()
50 lock->rw = 0; in ttm_lock_init()
51 lock->flags = 0; in ttm_lock_init()
52 lock->kill_takers = false; in ttm_lock_init()
[all …]
/kernel/linux/linux-5.10/kernel/locking/
Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
39 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
Drtmutex.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
12 * See Documentation/locking/rt-mutex-design.rst for details.
26 * lock->owner state tracking:
28 * lock->owner holds the task_struct pointer of the owner. Bit 0
29 * is used to keep track of the "lock has waiters" state.
32 * NULL 0 lock is free (fast acquire possible)
33 * NULL 1 lock is free and has waiters and the top waiter
[all …]
Dmutex.c1 // SPDX-License-Identifier: GPL-2.0-only
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
19 * Also see Documentation/locking/mutex-design.rst.
34 # include "mutex-debug.h"
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
42 atomic_long_set(&lock->owner, 0); in __mutex_init()
43 spin_lock_init(&lock->wait_lock); in __mutex_init()
44 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
46 osq_lock_init(&lock->osq); in __mutex_init()
[all …]
/kernel/linux/linux-4.19/kernel/locking/
Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init()
24 lockdep_init_map(&lock->dep_map, name, key, 0); in __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init()
29 lock->owner_cpu = -1; in __raw_spin_lock_init()
34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument
39 * Make sure we are not reinitializing a held lock: in __rwlock_init()
[all …]
Drtmutex.c2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
11 * See Documentation/locking/rt-mutex-design.txt for details.
25 * lock->owner state tracking:
27 * lock->owner holds the task_struct pointer of the owner. Bit 0
28 * is used to keep track of the "lock has waiters" state.
31 * NULL 0 lock is free (fast acquire possible)
32 * NULL 1 lock is free and has waiters and the top waiter
33 * is going to take the lock*
[all …]
Dmutex.c13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
18 * Also see Documentation/locking/mutex-design.txt.
33 # include "mutex-debug.h"
39 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument
41 atomic_long_set(&lock->owner, 0); in __mutex_init()
42 spin_lock_init(&lock->wait_lock); in __mutex_init()
43 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
45 osq_lock_init(&lock->osq); in __mutex_init()
48 debug_mutex_init(lock, name, key); in __mutex_init()
[all …]
/kernel/linux/linux-4.19/include/linux/
Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
[all …]
Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
Dspinlock.h1 /* SPDX-License-Identifier: GPL-2.0 */
6 * include/linux/spinlock.h - generic spinlock/rwlock declarations
21 * (also included on UP-debug builds:)
32 * (which is an empty structure on non-debug builds)
39 * builds. (which are NOPs on non-debug, non-preempt
42 * (included on UP-non-debug builds:)
65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
85 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
96 # define raw_spin_lock_init(lock) \ argument
[all …]
Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
[all …]
/kernel/linux/linux-5.10/include/linux/
Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
[all …]
Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock);
26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock);
28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30 __acquires(lock);
32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33 __acquires(lock);
[all …]
Dspinlock.h1 /* SPDX-License-Identifier: GPL-2.0 */
6 * include/linux/spinlock.h - generic spinlock/rwlock declarations
21 * (also included on UP-debug builds:)
32 * (which is an empty structure on non-debug builds)
39 * builds. (which are NOPs on non-debug, non-preempt
42 * (included on UP-non-debug builds:)
67 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
87 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
96 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
99 # define raw_spin_lock_init(lock) \ argument
[all …]
Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \ argument
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \ argument
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/
Dttm_lock.c1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
45 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument
47 spin_lock_init(&lock->lock); in ttm_lock_init()
48 init_waitqueue_head(&lock->queue); in ttm_lock_init()
49 lock->rw = 0; in ttm_lock_init()
50 lock->flags = 0; in ttm_lock_init()
53 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument
[all …]
Dttm_lock.h3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 * of the DRM heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode and write mode
35 * is relatively fast, and intended for in-kernel use only.
38 * user-space processes from validating buffers.
39 * It's allowed to leave kernel space with the vt lock held.
40 * If a user-space process dies while having the vt-lock,
41 * it will be released during the file descriptor release. The vt lock
[all …]
/kernel/linux/linux-4.19/include/drm/ttm/
Dttm_lock.h3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 * of the DRM heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode and write mode
35 * is relatively fast, and intended for in-kernel use only.
38 * user-space processes from validating buffers.
39 * It's allowed to leave kernel space with the vt lock held.
40 * If a user-space process dies while having the vt-lock,
41 * it will be released during the file descriptor release. The vt lock
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_lock.c50 * Take the heavyweight lock.
52 * \param lock lock pointer.
54 * \return one if the lock is held, or zero otherwise.
56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
65 spin_lock_bh(&lock_data->spinlock); in drm_lock_take()
67 old = *lock; in drm_lock_take()
72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? in drm_lock_take()
75 prev = cmpxchg(lock, old, new); in drm_lock_take()
77 spin_unlock_bh(&lock_data->spinlock); in drm_lock_take()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/
Ddrm_lock.c46 * Take the heavyweight lock.
48 * \param lock lock pointer.
50 * \return one if the lock is held, or zero otherwise.
52 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
59 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
61 spin_lock_bh(&lock_data->spinlock); in drm_lock_take()
63 old = *lock; in drm_lock_take()
68 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? in drm_lock_take()
71 prev = cmpxchg(lock, old, new); in drm_lock_take()
73 spin_unlock_bh(&lock_data->spinlock); in drm_lock_take()
[all …]
/kernel/linux/linux-5.10/fs/ocfs2/dlm/
Ddlmast.c1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
37 struct dlm_lock *lock);
38 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
41 * lock level will obsolete a pending bast.
42 * For example, if dlm_thread queued a bast for an EX lock that
44 * lock owner downconverted to NL, the bast is now obsolete.
46 * This is needed because the lock and convert paths can queue
47 * asts out-of-band (not waiting for dlm_thread) in order to
49 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
[all …]
Ddlmlock.c1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
7 * underlying calls for lock creation
47 struct dlm_lock *lock, int flags);
51 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
59 return -ENOMEM; in dlm_init_lock_cache()
68 /* Tell us whether we can grant a new lock request.
70 * caller needs: res->spinlock
73 * returns: 1 if the lock can be granted, 0 otherwise.
76 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument
[all …]
/kernel/linux/linux-4.19/fs/ocfs2/dlm/
Ddlmast.c1 /* -*- mode: c; c-basic-offset: 8; -*-
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
52 struct dlm_lock *lock);
53 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
56 * lock level will obsolete a pending bast.
57 * For example, if dlm_thread queued a bast for an EX lock that
59 * lock owner downconverted to NL, the bast is now obsolete.
61 * This is needed because the lock and convert paths can queue
62 * asts out-of-band (not waiting for dlm_thread) in order to
[all …]
/kernel/linux/linux-4.19/Documentation/locking/
Dlockdep-design.txt7 Lock-class
8 ----------
14 tens of thousands of) instantiations. For example a lock in the inode
16 lock class.
18 The validator tracks the 'state' of lock-classes, and it tracks
19 dependencies between different lock-classes. The validator maintains a
22 Unlike an lock instantiation, the lock-class itself never goes away: when
23 a lock-class is used for the first time after bootup it gets registered,
24 and all subsequent uses of that lock-class will be attached to this
25 lock-class.
[all …]
/kernel/linux/linux-4.19/drivers/md/persistent-data/
Ddm-block-manager.c6 #include "dm-block-manager.h"
7 #include "dm-persistent-data-internal.h"
9 #include <linux/dm-bufio.h>
14 #include <linux/device-mapper.h>
20 /*----------------------------------------------------------------*/
31 * trace is also emitted for the previous lock acquisition.
41 spinlock_t lock; member
58 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument
64 if (lock->holders[i] == task) in __find_holder()
71 /* call this *after* you increment lock->count */
[all …]

12345678910>>...45