• Home
  • Raw
  • Download

Lines Matching full:tail

44  * In particular; where the traditional MCS lock consists of a tail pointer
46 * unlock the next pending (next->locked), we compress both these: {tail,
52 * we can encode the tail by combining the 2-bit nesting level with the cpu
53 * number. With one byte for the lock value and 3 bytes for the tail, only a
111 * We must be able to distinguish between no-tail and the tail at 0:0,
117 u32 tail; in encode_tail() local
119 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; in encode_tail()
120 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ in encode_tail()
122 return tail; in encode_tail()
125 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) in decode_tail() argument
127 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail()
128 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in decode_tail()
167 * xchg_tail - Put in the new queue tail code word & retrieve previous one
169 * @tail : The new queue tail code word
170 * Return: The previous queue tail code word
172 * xchg(lock, tail), which heads an address dependency
176 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument
180 * MCS node is properly initialized before updating the tail. in xchg_tail()
182 return (u32)xchg_relaxed(&lock->tail, in xchg_tail()
183 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; in xchg_tail()
211 * xchg_tail - Put in the new queue tail code word & retrieve previous one
213 * @tail : The new queue tail code word
214 * Return: The previous queue tail code word
216 * xchg(lock, tail)
220 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument
225 new = (val & _Q_LOCKED_PENDING_MASK) | tail; in xchg_tail()
229 * tail. in xchg_tail()
299 * (queue tail, pending bit, lock value)
318 u32 old, tail; in queued_spin_lock_slowpath() local
402 tail = encode_tail(smp_processor_id(), idx); in queued_spin_lock_slowpath()
448 * publish the updated tail via xchg_tail() and potentially link in queued_spin_lock_slowpath()
454 * Publish the updated tail. in queued_spin_lock_slowpath()
460 old = xchg_tail(lock, tail); in queued_spin_lock_slowpath()
520 * If the queue head is the only one in the queue (lock value == tail) in queued_spin_lock_slowpath()
521 * and nobody is pending, clear the tail code and grab the lock. in queued_spin_lock_slowpath()
535 if ((val & _Q_TAIL_MASK) == tail) { in queued_spin_lock_slowpath()
542 * which will then detect the remaining tail and queue behind us in queued_spin_lock_slowpath()