Lines Matching refs:count
73 #c, atomic_long_read(&(sem)->count), \
195 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned() local
197 if (count & RWSEM_WRITER_MASK) in is_rwsem_reader_owned()
245 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
265 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock()
330 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
401 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter()
454 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
466 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
481 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
535 oldcount = atomic_long_read(&sem->count); in rwsem_mark_wake()
554 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake()
616 long count, new; in rwsem_try_write_lock() local
620 count = atomic_long_read(&sem->count); in rwsem_try_write_lock()
622 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); in rwsem_try_write_lock()
634 new = count; in rwsem_try_write_lock()
636 if (count & RWSEM_LOCK_MASK) { in rwsem_try_write_lock()
654 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock()
700 long count = atomic_long_read(&sem->count); in rwsem_try_write_lock_unqueued() local
702 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) { in rwsem_try_write_lock_unqueued()
703 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, in rwsem_try_write_lock_unqueued()
704 count | RWSEM_WRITER_LOCKED)) { in rwsem_try_write_lock_unqueued()
824 long count = atomic_long_read(&sem->count); in rwsem_rspin_threshold() local
825 int readers = count >> RWSEM_READER_SHIFT; in rwsem_rspin_threshold()
1000 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count, in rwsem_cond_wake_waiter() argument
1005 if (count & RWSEM_WRITER_MASK) in rwsem_cond_wake_waiter()
1008 if (count & RWSEM_READER_MASK) { in rwsem_cond_wake_waiter()
1021 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) in rwsem_down_read_slowpath() argument
1024 long rcnt = (count >> RWSEM_READER_SHIFT); in rwsem_down_read_slowpath()
1035 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED)) in rwsem_down_read_slowpath()
1041 if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) { in rwsem_down_read_slowpath()
1049 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) { in rwsem_down_read_slowpath()
1075 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { in rwsem_down_read_slowpath()
1094 count = atomic_long_add_return(adjustment, &sem->count); in rwsem_down_read_slowpath()
1096 rwsem_cond_wake_waiter(sem, count, &wake_q); in rwsem_down_read_slowpath()
1176 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count), in rwsem_down_write_slowpath()
1188 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1299 long count; in __down_read_common() local
1302 if (!rwsem_read_trylock(sem, &count)) { in __down_read_common()
1303 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) { in __down_read_common()
1337 tmp = atomic_long_read(&sem->count); in __down_read_trylock()
1339 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1392 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); in __up_read()
1420 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); in __up_write()
1443 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); in __downgrade_write()
1552 int count = atomic_read(&sem->rwbase.readers); in is_rwsem_reader_owned() local
1554 return count < 0 && count != READER_BIAS; in is_rwsem_reader_owned()