/lib/ |
D | atomic64.c | 32 raw_spinlock_t lock; member 36 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), 46 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; in lock_addr() 52 raw_spinlock_t *lock = lock_addr(v); in atomic64_read() local 55 raw_spin_lock_irqsave(lock, flags); in atomic64_read() 57 raw_spin_unlock_irqrestore(lock, flags); in atomic64_read() 65 raw_spinlock_t *lock = lock_addr(v); in atomic64_set() local 67 raw_spin_lock_irqsave(lock, flags); in atomic64_set() 69 raw_spin_unlock_irqrestore(lock, flags); in atomic64_set() 77 raw_spinlock_t *lock = lock_addr(v); \ [all …]
|
D | lockref.c | 14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 48 spin_lock(&lockref->lock); in lockref_get() 50 spin_unlock(&lockref->lock); in lockref_get() 71 spin_lock(&lockref->lock); in lockref_get_not_zero() 77 spin_unlock(&lockref->lock); in lockref_get_not_zero() 98 spin_lock(&lockref->lock); in lockref_get_or_lock() 102 spin_unlock(&lockref->lock); in lockref_get_or_lock() 142 spin_lock(&lockref->lock); in lockref_put_or_lock() 146 spin_unlock(&lockref->lock); in lockref_put_or_lock() 157 assert_spin_locked(&lockref->lock); in lockref_mark_dead() [all …]
|
D | percpu_ida.c | 35 spinlock_t lock; member 84 spin_lock(&remote->lock); in steal_tags() 95 spin_unlock(&remote->lock); in steal_tags() 118 spin_lock(&tags->lock); in alloc_local_tag() 121 spin_unlock(&tags->lock); in alloc_local_tag() 162 spin_lock(&pool->lock); in percpu_ida_alloc() 186 spin_unlock(&pool->lock); in percpu_ida_alloc() 227 spin_lock(&tags->lock); in percpu_ida_free() 231 spin_unlock(&tags->lock); in percpu_ida_free() 240 spin_lock(&pool->lock); in percpu_ida_free() [all …]
|
D | percpu_counter.c | 65 raw_spin_lock_irqsave(&fbc->lock, flags); in percpu_counter_set() 71 raw_spin_unlock_irqrestore(&fbc->lock, flags); in percpu_counter_set() 83 raw_spin_lock_irqsave(&fbc->lock, flags); in __percpu_counter_add() 86 raw_spin_unlock_irqrestore(&fbc->lock, flags); in __percpu_counter_add() 104 raw_spin_lock_irqsave(&fbc->lock, flags); in __percpu_counter_sum() 110 raw_spin_unlock_irqrestore(&fbc->lock, flags); in __percpu_counter_sum() 120 raw_spin_lock_init(&fbc->lock); in __percpu_counter_init() 121 lockdep_set_class(&fbc->lock, key); in __percpu_counter_init() 185 raw_spin_lock_irqsave(&fbc->lock, flags); in percpu_counter_hotcpu_callback() 189 raw_spin_unlock_irqrestore(&fbc->lock, flags); in percpu_counter_hotcpu_callback()
|
D | debugobjects.c | 33 raw_spinlock_t lock; member 227 raw_spin_lock_irqsave(&db->lock, flags); in debug_objects_oom() 229 raw_spin_unlock_irqrestore(&db->lock, flags); in debug_objects_oom() 318 raw_spin_lock_irqsave(&db->lock, flags); in __debug_object_init() 325 raw_spin_unlock_irqrestore(&db->lock, flags); in __debug_object_init() 342 raw_spin_unlock_irqrestore(&db->lock, flags); in __debug_object_init() 353 raw_spin_unlock_irqrestore(&db->lock, flags); in __debug_object_init() 407 raw_spin_lock_irqsave(&db->lock, flags); in debug_object_activate() 421 raw_spin_unlock_irqrestore(&db->lock, flags); in debug_object_activate() 433 raw_spin_unlock_irqrestore(&db->lock, flags); in debug_object_activate() [all …]
|
D | dec_and_lock.c | 20 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) in _atomic_dec_and_lock() argument 27 spin_lock(lock); in _atomic_dec_and_lock() 30 spin_unlock(lock); in _atomic_dec_and_lock()
|
D | iommu-common.c | 81 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 90 spin_lock_init(&(p->lock)); in iommu_tbl_pool_init() 133 spin_lock_irqsave(&pool->lock, flags); in iommu_tbl_range_alloc() 160 spin_unlock(&(pool->lock)); in iommu_tbl_range_alloc() 162 spin_lock(&(pool->lock)); in iommu_tbl_range_alloc() 195 spin_unlock(&(pool->lock)); in iommu_tbl_range_alloc() 198 spin_lock(&(pool->lock)); in iommu_tbl_range_alloc() 222 spin_unlock_irqrestore(&(pool->lock), flags); in iommu_tbl_range_alloc() 262 spin_lock_irqsave(&(pool->lock), flags); in iommu_tbl_range_free() 264 spin_unlock_irqrestore(&(pool->lock), flags); in iommu_tbl_range_free()
|
D | flex_proportions.c | 97 raw_spin_lock_init(&pl->lock); in fprop_local_init_single() 114 raw_spin_lock_irqsave(&pl->lock, flags); in fprop_reflect_period_single() 117 raw_spin_unlock_irqrestore(&pl->lock, flags); in fprop_reflect_period_single() 126 raw_spin_unlock_irqrestore(&pl->lock, flags); in fprop_reflect_period_single() 179 raw_spin_lock_init(&pl->lock); in fprop_local_init_percpu() 197 raw_spin_lock_irqsave(&pl->lock, flags); in fprop_reflect_period_percpu() 200 raw_spin_unlock_irqrestore(&pl->lock, flags); in fprop_reflect_period_percpu() 215 raw_spin_unlock_irqrestore(&pl->lock, flags); in fprop_reflect_period_percpu()
|
D | rhashtable.c | 53 spinlock_t *lock = rht_bucket_lock(tbl, hash); in lockdep_rht_bucket_is_held() local 55 return (debug_locks) ? lockdep_is_held(lock) : 1; in lockdep_rht_bucket_is_held() 260 spin_lock(&ht->lock); in rhashtable_rehash_table() 263 spin_unlock(&ht->lock); in rhashtable_rehash_table() 529 spinlock_t *lock; in rhashtable_try_insert() local 539 lock = rht_bucket_lock(tbl, hash); in rhashtable_try_insert() 540 spin_lock_bh(lock); in rhashtable_try_insert() 545 spin_unlock_bh(lock); in rhashtable_try_insert() 568 spin_unlock_bh(lock); in rhashtable_try_insert() 619 spin_lock(&ht->lock); in rhashtable_walk_enter() [all …]
|
D | ratelimit.c | 42 if (!raw_spin_trylock_irqsave(&rs->lock, flags)) in ___ratelimit() 67 raw_spin_unlock_irqrestore(&rs->lock, flags); in ___ratelimit()
|
D | idr.c | 71 spin_lock_irqsave(&idp->lock, flags); in get_from_free_list() 77 spin_unlock_irqrestore(&idp->lock, flags); in get_from_free_list() 167 spin_lock_irqsave(&idp->lock, flags); in move_to_free_list() 169 spin_unlock_irqrestore(&idp->lock, flags); in move_to_free_list() 327 spin_lock_irqsave(&idp->lock, flags); in idr_get_empty_slot() 335 spin_unlock_irqrestore(&idp->lock, flags); in idr_get_empty_slot() 841 spin_lock_init(&idp->lock); in idr_init() 873 spin_lock_irqsave(&ida->idr.lock, flags); in free_bitmap() 878 spin_unlock_irqrestore(&ida->idr.lock, flags); in free_bitmap() 957 spin_lock_irqsave(&ida->idr.lock, flags); in ida_get_new_above() [all …]
|
D | random32.c | 267 static DEFINE_SPINLOCK(lock); in __prandom_reseed() 279 if (!spin_trylock_irqsave(&lock, flags)) in __prandom_reseed() 288 spin_unlock_irqrestore(&lock, flags); in __prandom_reseed()
|
D | dma-debug.c | 94 spinlock_t lock; member 259 __acquires(&dma_entry_hash[idx].lock) in get_hash_bucket() 264 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); in get_hash_bucket() 274 __releases(&bucket->lock) in put_hash_bucket() 278 spin_unlock_irqrestore(&bucket->lock, __flags); in put_hash_bucket() 423 spin_lock_irqsave(&bucket->lock, flags); in debug_dma_dump_mappings() 437 spin_unlock_irqrestore(&bucket->lock, flags); in debug_dma_dump_mappings() 946 spin_lock(&dma_entry_hash[i].lock); in device_dma_allocations() 953 spin_unlock(&dma_entry_hash[i].lock); in device_dma_allocations() 1024 spin_lock_init(&dma_entry_hash[i].lock); in dma_debug_init()
|
D | genalloc.c | 158 spin_lock_init(&pool->lock); in gen_pool_create() 199 spin_lock(&pool->lock); in gen_pool_add_virt() 201 spin_unlock(&pool->lock); in gen_pool_add_virt()
|
D | Kconfig.debug | 1006 bool "Spinlock and rw-lock debugging: basic checks" 1046 This feature will check whether any held lock (spinlock, rwlock, 1049 vfree(), etc.), whether a live lock is incorrectly reinitialized via 1050 spin_lock_init()/mutex_init()/etc., or whether there is any lock 1116 This feature enables tracking lock contention points 1120 This also enables lock events required by "perf lock", 1122 If you want to use "perf lock", you also need to turn on 1125 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. 1132 If you say Y here, the lock dependency engine will do 1153 lock debugging then those bugs wont be detected of course.) [all …]
|
D | locking-selftest.c | 117 init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \ 120 raw_spin_lock_init(lock); \
|
/lib/raid6/ |
D | altivec.uc | 22 * bracked this with preempt_disable/enable or in a lock)
|