• Home
  • Raw
  • Download

Lines Matching refs:sem

70 # define DEBUG_RWSEMS_WARN_ON(c, sem)	do {			\  argument
73 #c, atomic_long_read(&(sem)->count), \
74 (unsigned long) sem->magic, \
75 atomic_long_read(&(sem)->owner), (long)current, \
76 list_empty(&(sem)->wait_list) ? "" : "not ")) \
80 # define DEBUG_RWSEMS_WARN_ON(c, sem) argument
142 static inline void rwsem_set_owner(struct rw_semaphore *sem) in rwsem_set_owner() argument
145 atomic_long_set(&sem->owner, (long)current); in rwsem_set_owner()
148 static inline void rwsem_clear_owner(struct rw_semaphore *sem) in rwsem_clear_owner() argument
151 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner()
157 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags) in rwsem_test_oflags() argument
159 return atomic_long_read(&sem->owner) & flags; in rwsem_test_oflags()
172 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, in __rwsem_set_reader_owned() argument
176 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); in __rwsem_set_reader_owned()
178 atomic_long_set(&sem->owner, val); in __rwsem_set_reader_owned()
181 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) in rwsem_set_reader_owned() argument
183 __rwsem_set_reader_owned(sem, current); in rwsem_set_reader_owned()
189 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) in is_rwsem_reader_owned() argument
195 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned()
200 return rwsem_test_oflags(sem, RWSEM_READER_OWNED); in is_rwsem_reader_owned()
210 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) in rwsem_clear_reader_owned() argument
212 unsigned long val = atomic_long_read(&sem->owner); in rwsem_clear_reader_owned()
215 if (atomic_long_try_cmpxchg(&sem->owner, &val, in rwsem_clear_reader_owned()
221 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) in rwsem_clear_reader_owned() argument
230 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem) in rwsem_set_nonspinnable() argument
232 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_set_nonspinnable()
239 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, in rwsem_set_nonspinnable()
243 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp) in rwsem_read_trylock() argument
245 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
248 rwsem_set_nonspinnable(sem); in rwsem_read_trylock()
251 rwsem_set_reader_owned(sem); in rwsem_read_trylock()
259 static inline bool rwsem_write_trylock(struct rw_semaphore *sem) in rwsem_write_trylock() argument
265 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock()
267 rwsem_set_owner(sem); in rwsem_write_trylock()
278 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem) in rwsem_owner() argument
281 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner()
289 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags) in rwsem_owner_flags() argument
291 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_owner_flags()
317 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
324 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
325 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
328 sem->magic = sem; in __init_rwsem()
330 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
331 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem()
332 INIT_LIST_HEAD(&sem->wait_list); in __init_rwsem()
333 atomic_long_set(&sem->owner, 0L); in __init_rwsem()
335 osq_lock_init(&sem->osq); in __init_rwsem()
337 trace_android_vh_rwsem_init(sem); in __init_rwsem()
353 #define rwsem_first_waiter(sem) \ argument
354 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
378 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_add_waiter() argument
380 lockdep_assert_held(&sem->wait_lock); in rwsem_add_waiter()
381 list_add_tail(&waiter->list, &sem->wait_list); in rwsem_add_waiter()
394 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_del_waiter() argument
396 lockdep_assert_held(&sem->wait_lock); in rwsem_del_waiter()
398 if (likely(!list_empty(&sem->wait_list))) in rwsem_del_waiter()
401 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter()
419 static void rwsem_mark_wake(struct rw_semaphore *sem, in rwsem_mark_wake() argument
427 lockdep_assert_held(&sem->wait_lock); in rwsem_mark_wake()
433 waiter = rwsem_first_waiter(sem); in rwsem_mark_wake()
454 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
466 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
481 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
491 __rwsem_set_reader_owned(sem, owner); in rwsem_mark_wake()
518 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { in rwsem_mark_wake()
535 oldcount = atomic_long_read(&sem->count); in rwsem_mark_wake()
536 if (list_empty(&sem->wait_list)) { in rwsem_mark_wake()
554 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake()
585 rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter, in rwsem_del_wake_waiter() argument
587 __releases(&sem->wait_lock) in rwsem_del_wake_waiter()
589 bool first = rwsem_first_waiter(sem) == waiter; in rwsem_del_wake_waiter()
598 if (rwsem_del_waiter(sem, waiter) && first) in rwsem_del_wake_waiter()
599 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q); in rwsem_del_wake_waiter()
600 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_del_wake_waiter()
612 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, in rwsem_try_write_lock() argument
615 struct rwsem_waiter *first = rwsem_first_waiter(sem); in rwsem_try_write_lock()
618 lockdep_assert_held(&sem->wait_lock); in rwsem_try_write_lock()
620 count = atomic_long_read(&sem->count); in rwsem_try_write_lock()
651 if (list_is_singular(&sem->wait_list)) in rwsem_try_write_lock()
654 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock()
672 rwsem_set_owner(sem); in rwsem_try_write_lock()
698 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) in rwsem_try_write_lock_unqueued() argument
700 long count = atomic_long_read(&sem->count); in rwsem_try_write_lock_unqueued()
703 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, in rwsem_try_write_lock_unqueued()
705 rwsem_set_owner(sem); in rwsem_try_write_lock_unqueued()
713 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
729 owner = rwsem_owner_flags(sem, &flags); in rwsem_can_spin_on_owner()
737 trace_android_vh_rwsem_can_spin_on_owner(sem, &ret); in rwsem_can_spin_on_owner()
758 rwsem_spin_on_owner(struct rw_semaphore *sem) in rwsem_spin_on_owner() argument
768 owner = rwsem_owner_flags(sem, &flags); in rwsem_spin_on_owner()
774 trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, true); in rwsem_spin_on_owner()
783 new = rwsem_owner_flags(sem, &new_flags); in rwsem_spin_on_owner()
822 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem) in rwsem_rspin_threshold() argument
824 long count = atomic_long_read(&sem->count); in rwsem_rspin_threshold()
835 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
847 if (!osq_lock(&sem->osq)) in rwsem_optimistic_spin()
859 trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, false); in rwsem_optimistic_spin()
862 owner_state = rwsem_spin_on_owner(sem); in rwsem_optimistic_spin()
869 taken = rwsem_try_write_lock_unqueued(sem); in rwsem_optimistic_spin()
886 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)) in rwsem_optimistic_spin()
888 rspin_threshold = rwsem_rspin_threshold(sem); in rwsem_optimistic_spin()
900 rwsem_set_nonspinnable(sem); in rwsem_optimistic_spin()
954 osq_unlock(&sem->osq); in rwsem_optimistic_spin()
955 trace_android_vh_rwsem_opt_spin_finish(sem, taken); in rwsem_optimistic_spin()
966 static inline void clear_nonspinnable(struct rw_semaphore *sem) in clear_nonspinnable() argument
968 if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))) in clear_nonspinnable()
969 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); in clear_nonspinnable()
973 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
978 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
983 static inline void clear_nonspinnable(struct rw_semaphore *sem) { } in clear_nonspinnable() argument
986 rwsem_spin_on_owner(struct rw_semaphore *sem) in rwsem_spin_on_owner() argument
1000 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count, in rwsem_cond_wake_waiter() argument
1012 clear_nonspinnable(sem); in rwsem_cond_wake_waiter()
1014 rwsem_mark_wake(sem, wake_type, wake_q); in rwsem_cond_wake_waiter()
1021 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) in rwsem_down_read_slowpath() argument
1034 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && in rwsem_down_read_slowpath()
1042 rwsem_set_reader_owned(sem); in rwsem_down_read_slowpath()
1050 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1051 if (!list_empty(&sem->wait_list)) in rwsem_down_read_slowpath()
1052 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, in rwsem_down_read_slowpath()
1054 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1058 return sem; in rwsem_down_read_slowpath()
1067 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1068 if (list_empty(&sem->wait_list)) { in rwsem_down_read_slowpath()
1075 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { in rwsem_down_read_slowpath()
1078 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1079 rwsem_set_reader_owned(sem); in rwsem_down_read_slowpath()
1083 return sem; in rwsem_down_read_slowpath()
1089 sem, &already_on_list); in rwsem_down_read_slowpath()
1091 rwsem_add_waiter(sem, &waiter); in rwsem_down_read_slowpath()
1094 count = atomic_long_add_return(adjustment, &sem->count); in rwsem_down_read_slowpath()
1096 rwsem_cond_wake_waiter(sem, count, &wake_q); in rwsem_down_read_slowpath()
1097 trace_android_vh_rwsem_wake(sem); in rwsem_down_read_slowpath()
1098 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1103 trace_contention_begin(sem, LCB_F_READ); in rwsem_down_read_slowpath()
1106 trace_android_vh_rwsem_read_wait_start(sem); in rwsem_down_read_slowpath()
1114 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1117 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1126 trace_android_vh_rwsem_read_wait_finish(sem); in rwsem_down_read_slowpath()
1128 trace_contention_end(sem, 0); in rwsem_down_read_slowpath()
1130 return sem; in rwsem_down_read_slowpath()
1133 rwsem_del_wake_waiter(sem, &waiter, &wake_q); in rwsem_down_read_slowpath()
1135 trace_android_vh_rwsem_read_wait_finish(sem); in rwsem_down_read_slowpath()
1137 trace_contention_end(sem, -EINTR); in rwsem_down_read_slowpath()
1145 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) in rwsem_down_write_slowpath() argument
1152 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) { in rwsem_down_write_slowpath()
1155 return sem; in rwsem_down_write_slowpath()
1167 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1170 sem, &already_on_list); in rwsem_down_write_slowpath()
1172 rwsem_add_waiter(sem, &waiter); in rwsem_down_write_slowpath()
1175 if (rwsem_first_waiter(sem) != &waiter) { in rwsem_down_write_slowpath()
1176 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count), in rwsem_down_write_slowpath()
1183 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1185 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1188 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1191 trace_android_vh_rwsem_wake(sem); in rwsem_down_write_slowpath()
1193 trace_android_vh_rwsem_write_wait_start(sem); in rwsem_down_write_slowpath()
1195 trace_contention_begin(sem, LCB_F_WRITE); in rwsem_down_write_slowpath()
1198 if (rwsem_try_write_lock(sem, &waiter)) { in rwsem_down_write_slowpath()
1203 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1220 owner_state = rwsem_spin_on_owner(sem); in rwsem_down_write_slowpath()
1231 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1234 trace_android_vh_rwsem_write_wait_finish(sem); in rwsem_down_write_slowpath()
1235 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1237 trace_contention_end(sem, 0); in rwsem_down_write_slowpath()
1239 return sem; in rwsem_down_write_slowpath()
1243 trace_android_vh_rwsem_write_wait_finish(sem); in rwsem_down_write_slowpath()
1244 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1245 rwsem_del_wake_waiter(sem, &waiter, &wake_q); in rwsem_down_write_slowpath()
1247 trace_contention_end(sem, -EINTR); in rwsem_down_write_slowpath()
1255 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) in rwsem_wake() argument
1260 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake()
1262 if (!list_empty(&sem->wait_list)) in rwsem_wake()
1263 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); in rwsem_wake()
1264 trace_android_vh_rwsem_wake_finish(sem); in rwsem_wake()
1266 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake()
1269 return sem; in rwsem_wake()
1277 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) in rwsem_downgrade_wake() argument
1282 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1284 if (!list_empty(&sem->wait_list)) in rwsem_downgrade_wake()
1285 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); in rwsem_downgrade_wake()
1287 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1290 return sem; in rwsem_downgrade_wake()
1296 static __always_inline int __down_read_common(struct rw_semaphore *sem, int state) in __down_read_common() argument
1302 if (!rwsem_read_trylock(sem, &count)) { in __down_read_common()
1303 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) { in __down_read_common()
1307 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in __down_read_common()
1314 static __always_inline void __down_read(struct rw_semaphore *sem) in __down_read() argument
1316 __down_read_common(sem, TASK_UNINTERRUPTIBLE); in __down_read()
1319 static __always_inline int __down_read_interruptible(struct rw_semaphore *sem) in __down_read_interruptible() argument
1321 return __down_read_common(sem, TASK_INTERRUPTIBLE); in __down_read_interruptible()
1324 static __always_inline int __down_read_killable(struct rw_semaphore *sem) in __down_read_killable() argument
1326 return __down_read_common(sem, TASK_KILLABLE); in __down_read_killable()
1329 static inline int __down_read_trylock(struct rw_semaphore *sem) in __down_read_trylock() argument
1334 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_read_trylock()
1337 tmp = atomic_long_read(&sem->count); in __down_read_trylock()
1339 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1341 rwsem_set_reader_owned(sem); in __down_read_trylock()
1354 static inline int __down_write_common(struct rw_semaphore *sem, int state) in __down_write_common() argument
1356 if (unlikely(!rwsem_write_trylock(sem))) { in __down_write_common()
1357 if (IS_ERR(rwsem_down_write_slowpath(sem, state))) in __down_write_common()
1364 static inline void __down_write(struct rw_semaphore *sem) in __down_write() argument
1366 __down_write_common(sem, TASK_UNINTERRUPTIBLE); in __down_write()
1369 static inline int __down_write_killable(struct rw_semaphore *sem) in __down_write_killable() argument
1371 return __down_write_common(sem, TASK_KILLABLE); in __down_write_killable()
1374 static inline int __down_write_trylock(struct rw_semaphore *sem) in __down_write_trylock() argument
1376 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_write_trylock()
1377 return rwsem_write_trylock(sem); in __down_write_trylock()
1383 static inline void __up_read(struct rw_semaphore *sem) in __up_read() argument
1387 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_read()
1388 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in __up_read()
1391 rwsem_clear_reader_owned(sem); in __up_read()
1392 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); in __up_read()
1393 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); in __up_read()
1396 clear_nonspinnable(sem); in __up_read()
1397 rwsem_wake(sem); in __up_read()
1406 static inline void __up_write(struct rw_semaphore *sem) in __up_write() argument
1410 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_write()
1415 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && in __up_write()
1416 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); in __up_write()
1419 rwsem_clear_owner(sem); in __up_write()
1420 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); in __up_write()
1423 rwsem_wake(sem); in __up_write()
1430 static inline void __downgrade_write(struct rw_semaphore *sem) in __downgrade_write() argument
1441 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); in __downgrade_write()
1443 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); in __downgrade_write()
1444 rwsem_set_reader_owned(sem); in __downgrade_write()
1446 rwsem_downgrade_wake(sem); in __downgrade_write()
1480 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
1483 init_rwbase_rt(&(sem)->rwbase); in __init_rwsem()
1486 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
1487 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
1492 static inline void __down_read(struct rw_semaphore *sem) in __down_read() argument
1494 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_read()
1497 static inline int __down_read_interruptible(struct rw_semaphore *sem) in __down_read_interruptible() argument
1499 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE); in __down_read_interruptible()
1502 static inline int __down_read_killable(struct rw_semaphore *sem) in __down_read_killable() argument
1504 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE); in __down_read_killable()
1507 static inline int __down_read_trylock(struct rw_semaphore *sem) in __down_read_trylock() argument
1509 return rwbase_read_trylock(&sem->rwbase); in __down_read_trylock()
1512 static inline void __up_read(struct rw_semaphore *sem) in __up_read() argument
1514 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL); in __up_read()
1517 static inline void __sched __down_write(struct rw_semaphore *sem) in __down_write() argument
1519 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_write()
1522 static inline int __sched __down_write_killable(struct rw_semaphore *sem) in __down_write_killable() argument
1524 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE); in __down_write_killable()
1527 static inline int __down_write_trylock(struct rw_semaphore *sem) in __down_write_trylock() argument
1529 return rwbase_write_trylock(&sem->rwbase); in __down_write_trylock()
1532 static inline void __up_write(struct rw_semaphore *sem) in __up_write() argument
1534 rwbase_write_unlock(&sem->rwbase); in __up_write()
1537 static inline void __downgrade_write(struct rw_semaphore *sem) in __downgrade_write() argument
1539 rwbase_write_downgrade(&sem->rwbase); in __downgrade_write()
1543 #define DEBUG_RWSEMS_WARN_ON(c, sem) argument
1545 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, in __rwsem_set_reader_owned() argument
1550 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) in is_rwsem_reader_owned() argument
1552 int count = atomic_read(&sem->rwbase.readers); in is_rwsem_reader_owned()
1562 void __sched down_read(struct rw_semaphore *sem) in down_read() argument
1565 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read()
1567 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); in down_read()
1571 int __sched down_read_interruptible(struct rw_semaphore *sem) in down_read_interruptible() argument
1574 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_interruptible()
1576 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { in down_read_interruptible()
1577 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_interruptible()
1585 int __sched down_read_killable(struct rw_semaphore *sem) in down_read_killable() argument
1588 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_killable()
1590 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { in down_read_killable()
1591 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable()
1602 int down_read_trylock(struct rw_semaphore *sem) in down_read_trylock() argument
1604 int ret = __down_read_trylock(sem); in down_read_trylock()
1607 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); in down_read_trylock()
1615 void __sched down_write(struct rw_semaphore *sem) in down_write() argument
1618 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write()
1619 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in down_write()
1626 int __sched down_write_killable(struct rw_semaphore *sem) in down_write_killable() argument
1629 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write_killable()
1631 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, in down_write_killable()
1633 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable()
1644 int down_write_trylock(struct rw_semaphore *sem) in down_write_trylock() argument
1646 int ret = __down_write_trylock(sem); in down_write_trylock()
1649 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); in down_write_trylock()
1658 void up_read(struct rw_semaphore *sem) in up_read() argument
1660 rwsem_release(&sem->dep_map, _RET_IP_); in up_read()
1661 __up_read(sem); in up_read()
1668 void up_write(struct rw_semaphore *sem) in up_write() argument
1670 rwsem_release(&sem->dep_map, _RET_IP_); in up_write()
1671 trace_android_vh_rwsem_write_finished(sem); in up_write()
1672 __up_write(sem); in up_write()
1679 void downgrade_write(struct rw_semaphore *sem) in downgrade_write() argument
1681 lock_downgrade(&sem->dep_map, _RET_IP_); in downgrade_write()
1682 trace_android_vh_rwsem_write_finished(sem); in downgrade_write()
1683 __downgrade_write(sem); in downgrade_write()
1689 void down_read_nested(struct rw_semaphore *sem, int subclass) in down_read_nested() argument
1692 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_nested()
1693 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); in down_read_nested()
1697 int down_read_killable_nested(struct rw_semaphore *sem, int subclass) in down_read_killable_nested() argument
1700 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_killable_nested()
1702 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { in down_read_killable_nested()
1703 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable_nested()
1711 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) in _down_write_nest_lock() argument
1714 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); in _down_write_nest_lock()
1715 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in _down_write_nest_lock()
1719 void down_read_non_owner(struct rw_semaphore *sem) in down_read_non_owner() argument
1722 __down_read(sem); in down_read_non_owner()
1729 __rwsem_set_reader_owned(sem, NULL); in down_read_non_owner()
1733 void down_write_nested(struct rw_semaphore *sem, int subclass) in down_write_nested() argument
1736 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_nested()
1737 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in down_write_nested()
1741 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) in down_write_killable_nested() argument
1744 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_killable_nested()
1746 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, in down_write_killable_nested()
1748 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable_nested()
1756 void up_read_non_owner(struct rw_semaphore *sem) in up_read_non_owner() argument
1758 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in up_read_non_owner()
1759 __up_read(sem); in up_read_non_owner()