• Home
  • Raw
  • Download

Lines Matching refs:lock

81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)  in pv_hybrid_queued_unfair_trylock()  argument
88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock()
91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock()
109 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
111 WRITE_ONCE(lock->pending, 1); in set_pending()
119 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument
121 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
122 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, in trylock_clear_pending()
126 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
128 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending()
131 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument
133 int val = atomic_read(&lock->val); in trylock_clear_pending()
146 val = atomic_cmpxchg_acquire(&lock->val, old, new); in trylock_clear_pending()
172 struct qspinlock *lock; member
212 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument
214 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_hash()
220 if (!cmpxchg(&he->lock, NULL, lock)) { in pv_hash()
223 return &he->lock; in pv_hash()
239 static struct pv_node *pv_unhash(struct qspinlock *lock) in pv_unhash() argument
241 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_unhash()
246 if (READ_ONCE(he->lock) == lock) { in pv_unhash()
248 WRITE_ONCE(he->lock, NULL); in pv_unhash()
360 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) in pv_kick_node() argument
391 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node()
392 (void)pv_hash(lock, pn); in pv_kick_node()
403 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) in pv_wait_head_or_lock() argument
433 set_pending(lock); in pv_wait_head_or_lock()
435 if (trylock_clear_pending(lock)) in pv_wait_head_or_lock()
439 clear_pending(lock); in pv_wait_head_or_lock()
443 lp = pv_hash(lock, pn); in pv_wait_head_or_lock()
456 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock()
462 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock()
470 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock()
485 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); in pv_wait_head_or_lock()
493 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument
500 (unsigned long)lock, atomic_read(&lock->val)); in __pv_queued_spin_unlock_slowpath()
517 node = pv_unhash(lock); in __pv_queued_spin_unlock_slowpath()
523 smp_store_release(&lock->locked, 0); in __pv_queued_spin_unlock_slowpath()
547 __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) in __pv_queued_spin_unlock() argument
556 locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0); in __pv_queued_spin_unlock()
560 __pv_queued_spin_unlock_slowpath(lock, locked); in __pv_queued_spin_unlock()