• Home
  • Raw
  • Download

Lines Matching refs:count

109 		#c, atomic_long_read(&(sem)->count),		\
226 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned() local
228 if (count & RWSEM_WRITER_MASK) in is_rwsem_reader_owned()
276 long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
337 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
436 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
448 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
461 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
527 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF)) in rwsem_mark_wake()
531 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake()
566 long count, new; in rwsem_try_write_lock() local
570 count = atomic_long_read(&sem->count); in rwsem_try_write_lock()
572 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); in rwsem_try_write_lock()
577 new = count; in rwsem_try_write_lock()
579 if (count & RWSEM_LOCK_MASK) { in rwsem_try_write_lock()
591 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock()
612 long count = atomic_long_read(&sem->count); in rwsem_try_read_lock_unqueued() local
614 if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF)) in rwsem_try_read_lock_unqueued()
617 count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_try_read_lock_unqueued()
618 if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) { in rwsem_try_read_lock_unqueued()
625 atomic_long_add(-RWSEM_READER_BIAS, &sem->count); in rwsem_try_read_lock_unqueued()
634 long count = atomic_long_read(&sem->count); in rwsem_try_write_lock_unqueued() local
636 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) { in rwsem_try_write_lock_unqueued()
637 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, in rwsem_try_write_lock_unqueued()
638 count | RWSEM_WRITER_LOCKED)) { in rwsem_try_write_lock_unqueued()
777 long count = atomic_long_read(&sem->count); in rwsem_rspin_threshold() local
778 int readers = count >> RWSEM_READER_SHIFT; in rwsem_rspin_threshold()
997 long count, adjustment = -RWSEM_READER_BIAS; in rwsem_down_read_slowpath() local
1016 atomic_long_add(-RWSEM_READER_BIAS, &sem->count); in rwsem_down_read_slowpath()
1024 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) { in rwsem_down_read_slowpath()
1051 if (adjustment && !(atomic_long_read(&sem->count) & in rwsem_down_read_slowpath()
1066 count = atomic_long_add_return(adjustment, &sem->count); in rwsem_down_read_slowpath()
1068 count = atomic_long_read(&sem->count); in rwsem_down_read_slowpath()
1076 if (!(count & RWSEM_LOCK_MASK)) { in rwsem_down_read_slowpath()
1080 if (wake || (!(count & RWSEM_WRITER_MASK) && in rwsem_down_read_slowpath()
1114 &sem->count); in rwsem_down_read_slowpath()
1141 long count; in rwsem_down_write_slowpath() local
1179 count = atomic_long_read(&sem->count); in rwsem_down_write_slowpath()
1189 if (count & RWSEM_WRITER_MASK) in rwsem_down_write_slowpath()
1192 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK) in rwsem_down_write_slowpath()
1207 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1252 count = atomic_long_read(&sem->count); in rwsem_down_write_slowpath()
1253 if (!(count & RWSEM_LOCK_MASK)) in rwsem_down_write_slowpath()
1284 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count); in rwsem_down_write_slowpath()
1287 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1301 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count) in rwsem_wake() argument
1374 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1390 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_write()
1401 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_write_killable()
1418 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_write_trylock()
1437 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); in __up_read()
1462 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); in __up_write()
1483 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); in __downgrade_write()