• Home
  • Raw
  • Download

Lines Matching refs:count

73 		#c, atomic_long_read(&(sem)->count),		\
207 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned() local
209 if (count & RWSEM_WRITER_MASK) in is_rwsem_reader_owned()
259 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
284 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock()
339 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
412 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter()
465 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
477 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
492 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
546 oldcount = atomic_long_read(&sem->count); in rwsem_mark_wake()
565 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake()
628 long count, new; in rwsem_try_write_lock() local
632 count = atomic_long_read(&sem->count); in rwsem_try_write_lock()
634 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); in rwsem_try_write_lock()
646 new = count; in rwsem_try_write_lock()
648 if (count & RWSEM_LOCK_MASK) { in rwsem_try_write_lock()
666 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock()
712 long count = atomic_long_read(&sem->count); in rwsem_try_write_lock_unqueued() local
714 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) { in rwsem_try_write_lock_unqueued()
715 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, in rwsem_try_write_lock_unqueued()
716 count | RWSEM_WRITER_LOCKED)) { in rwsem_try_write_lock_unqueued()
834 long count = atomic_long_read(&sem->count); in rwsem_rspin_threshold() local
835 int readers = count >> RWSEM_READER_SHIFT; in rwsem_rspin_threshold()
1007 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count, in rwsem_cond_wake_waiter() argument
1012 if (count & RWSEM_WRITER_MASK) in rwsem_cond_wake_waiter()
1015 if (count & RWSEM_READER_MASK) { in rwsem_cond_wake_waiter()
1028 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) in rwsem_down_read_slowpath() argument
1031 long rcnt = (count >> RWSEM_READER_SHIFT); in rwsem_down_read_slowpath()
1044 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED)) in rwsem_down_read_slowpath()
1051 if (steal && !(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) { in rwsem_down_read_slowpath()
1060 if ((rcnt == 1 || rspin) && (count & RWSEM_FLAG_WAITERS)) { in rwsem_down_read_slowpath()
1092 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { in rwsem_down_read_slowpath()
1109 count = atomic_long_add_return(adjustment, &sem->count); in rwsem_down_read_slowpath()
1111 rwsem_cond_wake_waiter(sem, count, &wake_q); in rwsem_down_read_slowpath()
1191 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count), in rwsem_down_write_slowpath()
1203 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1313 long count; in __down_read_common() local
1316 if (!rwsem_read_trylock(sem, &count)) { in __down_read_common()
1317 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) { in __down_read_common()
1351 tmp = atomic_long_read(&sem->count); in __down_read_trylock()
1353 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1424 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); in __up_read()
1452 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); in __up_write()
1476 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); in __downgrade_write()
1592 int count = atomic_read(&sem->rwbase.readers); in is_rwsem_reader_owned() local
1594 return count < 0 && count != READER_BIAS; in is_rwsem_reader_owned()