• Home
  • Raw
  • Download

Lines Matching refs:val

196 	atomic_andnot(_Q_PENDING_VAL, &lock->val);  in clear_pending()
207 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); in clear_pending_set_locked()
222 u32 old, new, val = atomic_read(&lock->val); in xchg_tail() local
225 new = (val & _Q_LOCKED_PENDING_MASK) | tail; in xchg_tail()
231 old = atomic_cmpxchg_relaxed(&lock->val, val, new); in xchg_tail()
232 if (old == val) in xchg_tail()
235 val = old; in xchg_tail()
251 return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); in queued_fetch_set_pending_acquire()
315 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) in queued_spin_lock_slowpath() argument
335 if (val == _Q_PENDING_VAL) { in queued_spin_lock_slowpath()
337 val = atomic_cond_read_relaxed(&lock->val, in queued_spin_lock_slowpath()
344 if (val & ~_Q_LOCKED_MASK) in queued_spin_lock_slowpath()
352 val = queued_fetch_set_pending_acquire(lock); in queued_spin_lock_slowpath()
361 if (unlikely(val & ~_Q_LOCKED_MASK)) { in queued_spin_lock_slowpath()
364 if (!(val & _Q_PENDING_MASK)) in queued_spin_lock_slowpath()
381 if (val & _Q_LOCKED_MASK) in queued_spin_lock_slowpath()
382 atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK)); in queued_spin_lock_slowpath()
508 if ((val = pv_wait_head_or_lock(lock, node))) in queued_spin_lock_slowpath()
511 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); in queued_spin_lock_slowpath()
535 if ((val & _Q_TAIL_MASK) == tail) { in queued_spin_lock_slowpath()
536 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) in queued_spin_lock_slowpath()