Lines Matching refs:sem
72 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
79 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
80 lockdep_init_map(&sem->dep_map, name, key, 0); in __init_rwsem()
82 sem->count = RWSEM_UNLOCKED_VALUE; in __init_rwsem()
83 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem()
84 INIT_LIST_HEAD(&sem->wait_list); in __init_rwsem()
86 sem->owner = NULL; in __init_rwsem()
87 osq_lock_init(&sem->osq); in __init_rwsem()
121 __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) in __rwsem_do_wake() argument
128 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); in __rwsem_do_wake()
148 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; in __rwsem_do_wake()
151 if (rwsem_atomic_update(-adjustment, sem) & in __rwsem_do_wake()
167 if (waiter->list.next == &sem->wait_list) in __rwsem_do_wake()
181 rwsem_atomic_add(adjustment, sem); in __rwsem_do_wake()
183 next = sem->wait_list.next; in __rwsem_do_wake()
195 sem->wait_list.next = next; in __rwsem_do_wake()
196 next->prev = &sem->wait_list; in __rwsem_do_wake()
199 return sem; in __rwsem_do_wake()
206 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) in rwsem_down_read_failed() argument
217 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_failed()
218 if (list_empty(&sem->wait_list)) in rwsem_down_read_failed()
220 list_add_tail(&waiter.list, &sem->wait_list); in rwsem_down_read_failed()
223 count = rwsem_atomic_update(adjustment, sem); in rwsem_down_read_failed()
233 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); in rwsem_down_read_failed()
235 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_failed()
247 return sem; in rwsem_down_read_failed()
251 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) in rwsem_try_write_lock() argument
258 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, in rwsem_try_write_lock()
260 if (!list_is_singular(&sem->wait_list)) in rwsem_try_write_lock()
261 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); in rwsem_try_write_lock()
272 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) in rwsem_try_write_lock_unqueued() argument
274 long old, count = ACCESS_ONCE(sem->count); in rwsem_try_write_lock_unqueued()
280 old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS); in rwsem_try_write_lock_unqueued()
288 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
297 owner = ACCESS_ONCE(sem->owner); in rwsem_can_spin_on_owner()
310 static inline bool owner_running(struct rw_semaphore *sem, in owner_running() argument
313 if (sem->owner != owner) in owner_running()
328 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) in rwsem_spin_on_owner() argument
331 while (owner_running(sem, owner)) { in rwsem_spin_on_owner()
344 return sem->owner == NULL; in rwsem_spin_on_owner()
347 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
355 if (!rwsem_can_spin_on_owner(sem)) in rwsem_optimistic_spin()
358 if (!osq_lock(&sem->osq)) in rwsem_optimistic_spin()
362 owner = ACCESS_ONCE(sem->owner); in rwsem_optimistic_spin()
363 if (owner && !rwsem_spin_on_owner(sem, owner)) in rwsem_optimistic_spin()
367 if (rwsem_try_write_lock_unqueued(sem)) { in rwsem_optimistic_spin()
389 osq_unlock(&sem->osq); in rwsem_optimistic_spin()
396 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
406 struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) in rwsem_down_write_failed() argument
413 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); in rwsem_down_write_failed()
416 if (rwsem_optimistic_spin(sem)) in rwsem_down_write_failed()
417 return sem; in rwsem_down_write_failed()
426 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed()
429 if (list_empty(&sem->wait_list)) in rwsem_down_write_failed()
432 list_add_tail(&waiter.list, &sem->wait_list); in rwsem_down_write_failed()
436 count = ACCESS_ONCE(sem->count); in rwsem_down_write_failed()
444 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS); in rwsem_down_write_failed()
447 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); in rwsem_down_write_failed()
452 if (rwsem_try_write_lock(count, sem)) in rwsem_down_write_failed()
454 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed()
460 } while ((count = sem->count) & RWSEM_ACTIVE_MASK); in rwsem_down_write_failed()
462 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed()
467 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed()
469 return sem; in rwsem_down_write_failed()
478 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) in rwsem_wake() argument
482 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake()
485 if (!list_empty(&sem->wait_list)) in rwsem_wake()
486 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); in rwsem_wake()
488 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake()
490 return sem; in rwsem_wake()
500 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) in rwsem_downgrade_wake() argument
504 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake()
507 if (!list_empty(&sem->wait_list)) in rwsem_downgrade_wake()
508 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); in rwsem_downgrade_wake()
510 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_downgrade_wake()
512 return sem; in rwsem_downgrade_wake()