Lines Matching refs:val
195 atomic_andnot(_Q_PENDING_VAL, &lock->val); in clear_pending()
206 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); in clear_pending_set_locked()
221 u32 old, new, val = atomic_read(&lock->val); in xchg_tail() local
224 new = (val & _Q_LOCKED_PENDING_MASK) | tail; in xchg_tail()
230 old = atomic_cmpxchg_relaxed(&lock->val, val, new); in xchg_tail()
231 if (old == val) in xchg_tail()
234 val = old; in xchg_tail()
250 return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); in queued_fetch_set_pending_acquire()
314 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) in queued_spin_lock_slowpath() argument
334 if (val == _Q_PENDING_VAL) { in queued_spin_lock_slowpath()
336 val = atomic_cond_read_relaxed(&lock->val, in queued_spin_lock_slowpath()
343 if (val & ~_Q_LOCKED_MASK) in queued_spin_lock_slowpath()
351 val = queued_fetch_set_pending_acquire(lock); in queued_spin_lock_slowpath()
360 if (unlikely(val & ~_Q_LOCKED_MASK)) { in queued_spin_lock_slowpath()
363 if (!(val & _Q_PENDING_MASK)) in queued_spin_lock_slowpath()
380 if (val & _Q_LOCKED_MASK) in queued_spin_lock_slowpath()
381 atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK)); in queued_spin_lock_slowpath()
507 if ((val = pv_wait_head_or_lock(lock, node))) in queued_spin_lock_slowpath()
510 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); in queued_spin_lock_slowpath()
534 if ((val & _Q_TAIL_MASK) == tail) { in queued_spin_lock_slowpath()
535 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) in queued_spin_lock_slowpath()