1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
6 *
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
9 *
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 *
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30
31 #ifndef CONFIG_PREEMPT_RT
32 #include "lock_events.h"
33 #include <trace/hooks/dtask.h>
34 #include <trace/hooks/rwsem.h>
35
36 /*
37 * The least significant 2 bits of the owner value has the following
38 * meanings when set.
39 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
40 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
41 *
42 * When the rwsem is reader-owned and a spinning writer has timed out,
43 * the nonspinnable bit will be set to disable optimistic spinning.
44
45 * When a writer acquires a rwsem, it puts its task_struct pointer
46 * into the owner field. It is cleared after an unlock.
47 *
48 * When a reader acquires a rwsem, it will also puts its task_struct
49 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
50 * On unlock, the owner field will largely be left untouched. So
51 * for a free or reader-owned rwsem, the owner value may contain
52 * information about the last reader that acquires the rwsem.
53 *
54 * That information may be helpful in debugging cases where the system
55 * seems to hang on a reader owned rwsem especially if only one reader
56 * is involved. Ideally we would like to track all the readers that own
57 * a rwsem, but the overhead is simply too big.
58 *
59 * A fast path reader optimistic lock stealing is supported when the rwsem
60 * is previously owned by a writer and the following conditions are met:
61 * - OSQ is empty
62 * - rwsem is not currently writer owned
63 * - the handoff isn't set.
64 */
65 #define RWSEM_READER_OWNED (1UL << 0)
66 #define RWSEM_NONSPINNABLE (1UL << 1)
67 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
68
69 #ifdef CONFIG_DEBUG_RWSEMS
70 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
71 if (!debug_locks_silent && \
72 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
73 #c, atomic_long_read(&(sem)->count), \
74 (unsigned long) sem->magic, \
75 atomic_long_read(&(sem)->owner), (long)current, \
76 list_empty(&(sem)->wait_list) ? "" : "not ")) \
77 debug_locks_off(); \
78 } while (0)
79 #else
80 # define DEBUG_RWSEMS_WARN_ON(c, sem)
81 #endif
82
83 /*
84 * On 64-bit architectures, the bit definitions of the count are:
85 *
86 * Bit 0 - writer locked bit
87 * Bit 1 - waiters present bit
88 * Bit 2 - lock handoff bit
89 * Bits 3-7 - reserved
90 * Bits 8-62 - 55-bit reader count
91 * Bit 63 - read fail bit
92 *
93 * On 32-bit architectures, the bit definitions of the count are:
94 *
95 * Bit 0 - writer locked bit
96 * Bit 1 - waiters present bit
97 * Bit 2 - lock handoff bit
98 * Bits 3-7 - reserved
99 * Bits 8-30 - 23-bit reader count
100 * Bit 31 - read fail bit
101 *
102 * It is not likely that the most significant bit (read fail bit) will ever
103 * be set. This guard bit is still checked anyway in the down_read() fastpath
104 * just in case we need to use up more of the reader bits for other purpose
105 * in the future.
106 *
107 * atomic_long_fetch_add() is used to obtain reader lock, whereas
108 * atomic_long_cmpxchg() will be used to obtain writer lock.
109 *
110 * There are three places where the lock handoff bit may be set or cleared.
111 * 1) rwsem_mark_wake() for readers -- set, clear
112 * 2) rwsem_try_write_lock() for writers -- set, clear
113 * 3) rwsem_del_waiter() -- clear
114 *
115 * For all the above cases, wait_lock will be held. A writer must also
116 * be the first one in the wait_list to be eligible for setting the handoff
117 * bit. So concurrent setting/clearing of handoff bit is not possible.
118 */
119 #define RWSEM_WRITER_LOCKED (1UL << 0)
120 #define RWSEM_FLAG_WAITERS (1UL << 1)
121 #define RWSEM_FLAG_HANDOFF (1UL << 2)
122 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
123
124 #define RWSEM_READER_SHIFT 8
125 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
126 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
127 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
128 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
129 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
130 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
131
132 /*
133 * All writes to owner are protected by WRITE_ONCE() to make sure that
134 * store tearing can't happen as optimistic spinners may read and use
135 * the owner value concurrently without lock. Read from owner, however,
136 * may not need READ_ONCE() as long as the pointer value is only used
137 * for comparison and isn't being dereferenced.
138 */
rwsem_set_owner(struct rw_semaphore * sem)139 static inline void rwsem_set_owner(struct rw_semaphore *sem)
140 {
141 atomic_long_set(&sem->owner, (long)current);
142 }
143
rwsem_clear_owner(struct rw_semaphore * sem)144 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
145 {
146 atomic_long_set(&sem->owner, 0);
147 }
148
149 /*
150 * Test the flags in the owner field.
151 */
rwsem_test_oflags(struct rw_semaphore * sem,long flags)152 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
153 {
154 return atomic_long_read(&sem->owner) & flags;
155 }
156
157 /*
158 * The task_struct pointer of the last owning reader will be left in
159 * the owner field.
160 *
161 * Note that the owner value just indicates the task has owned the rwsem
162 * previously, it may not be the real owner or one of the real owners
163 * anymore when that field is examined, so take it with a grain of salt.
164 *
165 * The reader non-spinnable bit is preserved.
166 */
__rwsem_set_reader_owned(struct rw_semaphore * sem,struct task_struct * owner)167 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
168 struct task_struct *owner)
169 {
170 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
171 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
172
173 atomic_long_set(&sem->owner, val);
174 }
175
rwsem_set_reader_owned(struct rw_semaphore * sem)176 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
177 {
178 __rwsem_set_reader_owned(sem, current);
179 }
180
181 /*
182 * Return true if the rwsem is owned by a reader.
183 */
is_rwsem_reader_owned(struct rw_semaphore * sem)184 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
185 {
186 #ifdef CONFIG_DEBUG_RWSEMS
187 /*
188 * Check the count to see if it is write-locked.
189 */
190 long count = atomic_long_read(&sem->count);
191
192 if (count & RWSEM_WRITER_MASK)
193 return false;
194 #endif
195 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
196 }
197
198 #ifdef CONFIG_DEBUG_RWSEMS
199 /*
200 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
201 * is a task pointer in owner of a reader-owned rwsem, it will be the
202 * real owner or one of the real owners. The only exception is when the
203 * unlock is done by up_read_non_owner().
204 */
rwsem_clear_reader_owned(struct rw_semaphore * sem)205 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
206 {
207 unsigned long val = atomic_long_read(&sem->owner);
208
209 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
210 if (atomic_long_try_cmpxchg(&sem->owner, &val,
211 val & RWSEM_OWNER_FLAGS_MASK))
212 return;
213 }
214 }
215 #else
rwsem_clear_reader_owned(struct rw_semaphore * sem)216 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
217 {
218 }
219 #endif
220
221 /*
222 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
223 * remains set. Otherwise, the operation will be aborted.
224 */
rwsem_set_nonspinnable(struct rw_semaphore * sem)225 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
226 {
227 unsigned long owner = atomic_long_read(&sem->owner);
228
229 do {
230 if (!(owner & RWSEM_READER_OWNED))
231 break;
232 if (owner & RWSEM_NONSPINNABLE)
233 break;
234 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
235 owner | RWSEM_NONSPINNABLE));
236 }
237
rwsem_read_trylock(struct rw_semaphore * sem,long * cntp)238 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
239 {
240 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
241
242 if (WARN_ON_ONCE(*cntp < 0))
243 rwsem_set_nonspinnable(sem);
244
245 if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
246 rwsem_set_reader_owned(sem);
247 trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
248 return true;
249 }
250
251 return false;
252 }
253
rwsem_write_trylock(struct rw_semaphore * sem)254 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
255 {
256 long tmp = RWSEM_UNLOCKED_VALUE;
257
258 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
259 trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
260 rwsem_set_owner(sem);
261 return true;
262 }
263
264 return false;
265 }
266
267 /*
268 * Return just the real task structure pointer of the owner
269 */
rwsem_owner(struct rw_semaphore * sem)270 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
271 {
272 return (struct task_struct *)
273 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
274 }
275
276 /*
277 * Return the real task structure pointer of the owner and the embedded
278 * flags in the owner. pflags must be non-NULL.
279 */
280 static inline struct task_struct *
rwsem_owner_flags(struct rw_semaphore * sem,unsigned long * pflags)281 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
282 {
283 unsigned long owner = atomic_long_read(&sem->owner);
284
285 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
286 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
287 }
288
289 /*
290 * Guide to the rw_semaphore's count field.
291 *
292 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
293 * by a writer.
294 *
295 * The lock is owned by readers when
296 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
297 * (2) some of the reader bits are set in count, and
298 * (3) the owner field has RWSEM_READ_OWNED bit set.
299 *
300 * Having some reader bits set is not enough to guarantee a readers owned
301 * lock as the readers may be in the process of backing out from the count
302 * and a writer has just released the lock. So another writer may steal
303 * the lock immediately after that.
304 */
305
306 /*
307 * Initialize an rwsem:
308 */
__init_rwsem(struct rw_semaphore * sem,const char * name,struct lock_class_key * key)309 void __init_rwsem(struct rw_semaphore *sem, const char *name,
310 struct lock_class_key *key)
311 {
312 #ifdef CONFIG_DEBUG_LOCK_ALLOC
313 /*
314 * Make sure we are not reinitializing a held semaphore:
315 */
316 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
317 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
318 #endif
319 #ifdef CONFIG_DEBUG_RWSEMS
320 sem->magic = sem;
321 #endif
322 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
323 raw_spin_lock_init(&sem->wait_lock);
324 INIT_LIST_HEAD(&sem->wait_list);
325 atomic_long_set(&sem->owner, 0L);
326 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
327 osq_lock_init(&sem->osq);
328 #endif
329 trace_android_vh_rwsem_init(sem);
330 }
331 EXPORT_SYMBOL(__init_rwsem);
332
333 #define rwsem_first_waiter(sem) \
334 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
335
336 enum rwsem_wake_type {
337 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
338 RWSEM_WAKE_READERS, /* Wake readers only */
339 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
340 };
341
342 /*
343 * The typical HZ value is either 250 or 1000. So set the minimum waiting
344 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
345 * queue before initiating the handoff protocol.
346 */
347 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
348
349 /*
350 * Magic number to batch-wakeup waiting readers, even when writers are
351 * also present in the queue. This both limits the amount of work the
352 * waking thread must do and also prevents any potential counter overflow,
353 * however unlikely.
354 */
355 #define MAX_READERS_WAKEUP 0x100
356
357 static inline void
rwsem_add_waiter(struct rw_semaphore * sem,struct rwsem_waiter * waiter)358 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
359 {
360 lockdep_assert_held(&sem->wait_lock);
361 list_add_tail(&waiter->list, &sem->wait_list);
362 /* caller will set RWSEM_FLAG_WAITERS */
363 }
364
365 /*
366 * Remove a waiter from the wait_list and clear flags.
367 *
368 * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
369 * this function. Modify with care.
370 */
371 static inline void
rwsem_del_waiter(struct rw_semaphore * sem,struct rwsem_waiter * waiter)372 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
373 {
374 lockdep_assert_held(&sem->wait_lock);
375 list_del(&waiter->list);
376 if (likely(!list_empty(&sem->wait_list)))
377 return;
378
379 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
380 }
381
382 /*
383 * handle the lock release when processes blocked on it that can now run
384 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
385 * have been set.
386 * - there must be someone on the queue
387 * - the wait_lock must be held by the caller
388 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
389 * to actually wakeup the blocked task(s) and drop the reference count,
390 * preferably when the wait_lock is released
391 * - woken process blocks are discarded from the list after having task zeroed
392 * - writers are only marked woken if downgrading is false
393 *
394 * Implies rwsem_del_waiter() for all woken readers.
395 */
rwsem_mark_wake(struct rw_semaphore * sem,enum rwsem_wake_type wake_type,struct wake_q_head * wake_q)396 static void rwsem_mark_wake(struct rw_semaphore *sem,
397 enum rwsem_wake_type wake_type,
398 struct wake_q_head *wake_q)
399 {
400 struct rwsem_waiter *waiter, *tmp;
401 long oldcount, woken = 0, adjustment = 0;
402 struct list_head wlist;
403
404 lockdep_assert_held(&sem->wait_lock);
405
406 /*
407 * Take a peek at the queue head waiter such that we can determine
408 * the wakeup(s) to perform.
409 */
410 waiter = rwsem_first_waiter(sem);
411
412 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
413 if (wake_type == RWSEM_WAKE_ANY) {
414 /*
415 * Mark writer at the front of the queue for wakeup.
416 * Until the task is actually later awoken later by
417 * the caller, other writers are able to steal it.
418 * Readers, on the other hand, will block as they
419 * will notice the queued writer.
420 */
421 wake_q_add(wake_q, waiter->task);
422 lockevent_inc(rwsem_wake_writer);
423 }
424
425 return;
426 }
427
428 /*
429 * No reader wakeup if there are too many of them already.
430 */
431 if (unlikely(atomic_long_read(&sem->count) < 0))
432 return;
433
434 /*
435 * Writers might steal the lock before we grant it to the next reader.
436 * We prefer to do the first reader grant before counting readers
437 * so we can bail out early if a writer stole the lock.
438 */
439 if (wake_type != RWSEM_WAKE_READ_OWNED) {
440 struct task_struct *owner;
441
442 adjustment = RWSEM_READER_BIAS;
443 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
444 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
445 /*
446 * When we've been waiting "too" long (for writers
447 * to give up the lock), request a HANDOFF to
448 * force the issue.
449 */
450 if (time_after(jiffies, waiter->timeout)) {
451 if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
452 adjustment -= RWSEM_FLAG_HANDOFF;
453 lockevent_inc(rwsem_rlock_handoff);
454 }
455 waiter->handoff_set = true;
456 }
457
458 atomic_long_add(-adjustment, &sem->count);
459 return;
460 }
461 /*
462 * Set it to reader-owned to give spinners an early
463 * indication that readers now have the lock.
464 * The reader nonspinnable bit seen at slowpath entry of
465 * the reader is copied over.
466 */
467 owner = waiter->task;
468 __rwsem_set_reader_owned(sem, owner);
469 }
470
471 /*
472 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
473 * queue. We know that the woken will be at least 1 as we accounted
474 * for above. Note we increment the 'active part' of the count by the
475 * number of readers before waking any processes up.
476 *
477 * This is an adaptation of the phase-fair R/W locks where at the
478 * reader phase (first waiter is a reader), all readers are eligible
479 * to acquire the lock at the same time irrespective of their order
480 * in the queue. The writers acquire the lock according to their
481 * order in the queue.
482 *
483 * We have to do wakeup in 2 passes to prevent the possibility that
484 * the reader count may be decremented before it is incremented. It
485 * is because the to-be-woken waiter may not have slept yet. So it
486 * may see waiter->task got cleared, finish its critical section and
487 * do an unlock before the reader count increment.
488 *
489 * 1) Collect the read-waiters in a separate list, count them and
490 * fully increment the reader count in rwsem.
491 * 2) For each waiters in the new list, clear waiter->task and
492 * put them into wake_q to be woken up later.
493 */
494 INIT_LIST_HEAD(&wlist);
495 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
496 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
497 continue;
498
499 woken++;
500 list_move_tail(&waiter->list, &wlist);
501
502 /*
503 * Limit # of readers that can be woken up per wakeup call.
504 */
505 if (woken >= MAX_READERS_WAKEUP)
506 break;
507 }
508
509 adjustment = woken * RWSEM_READER_BIAS - adjustment;
510 lockevent_cond_inc(rwsem_wake_reader, woken);
511
512 oldcount = atomic_long_read(&sem->count);
513 if (list_empty(&sem->wait_list)) {
514 /*
515 * Combined with list_move_tail() above, this implies
516 * rwsem_del_waiter().
517 */
518 adjustment -= RWSEM_FLAG_WAITERS;
519 if (oldcount & RWSEM_FLAG_HANDOFF)
520 adjustment -= RWSEM_FLAG_HANDOFF;
521 } else if (woken) {
522 /*
523 * When we've woken a reader, we no longer need to force
524 * writers to give up the lock and we can clear HANDOFF.
525 */
526 if (oldcount & RWSEM_FLAG_HANDOFF)
527 adjustment -= RWSEM_FLAG_HANDOFF;
528 }
529
530 if (adjustment)
531 atomic_long_add(adjustment, &sem->count);
532
533 /* 2nd pass */
534 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
535 struct task_struct *tsk;
536
537 tsk = waiter->task;
538 get_task_struct(tsk);
539
540 /*
541 * Ensure calling get_task_struct() before setting the reader
542 * waiter to nil such that rwsem_down_read_slowpath() cannot
543 * race with do_exit() by always holding a reference count
544 * to the task to wakeup.
545 */
546 smp_store_release(&waiter->task, NULL);
547 /*
548 * Ensure issuing the wakeup (either by us or someone else)
549 * after setting the reader waiter to nil.
550 */
551 wake_q_add_safe(wake_q, tsk);
552 }
553 }
554
555 /*
556 * This function must be called with the sem->wait_lock held to prevent
557 * race conditions between checking the rwsem wait list and setting the
558 * sem->count accordingly.
559 *
560 * Implies rwsem_del_waiter() on success.
561 */
rwsem_try_write_lock(struct rw_semaphore * sem,struct rwsem_waiter * waiter)562 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
563 struct rwsem_waiter *waiter)
564 {
565 struct rwsem_waiter *first = rwsem_first_waiter(sem);
566 long count, new;
567
568 lockdep_assert_held(&sem->wait_lock);
569
570 count = atomic_long_read(&sem->count);
571 do {
572 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
573
574 if (has_handoff) {
575 /*
576 * Honor handoff bit and yield only when the first
577 * waiter is the one that set it. Otherwisee, we
578 * still try to acquire the rwsem.
579 */
580 if (first->handoff_set && (waiter != first))
581 return false;
582 }
583
584 new = count;
585
586 if (count & RWSEM_LOCK_MASK) {
587 /*
588 * A waiter (first or not) can set the handoff bit
589 * if it is an RT task or wait in the wait queue
590 * for too long.
591 */
592 if (has_handoff || (!rt_task(waiter->task) &&
593 !time_after(jiffies, waiter->timeout)))
594 return false;
595
596 new |= RWSEM_FLAG_HANDOFF;
597 } else {
598 new |= RWSEM_WRITER_LOCKED;
599 new &= ~RWSEM_FLAG_HANDOFF;
600
601 if (list_is_singular(&sem->wait_list))
602 new &= ~RWSEM_FLAG_WAITERS;
603 }
604 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
605
606 /*
607 * We have either acquired the lock with handoff bit cleared or set
608 * the handoff bit. Only the first waiter can have its handoff_set
609 * set here to enable optimistic spinning in slowpath loop.
610 */
611 if (new & RWSEM_FLAG_HANDOFF) {
612 first->handoff_set = true;
613 lockevent_inc(rwsem_wlock_handoff);
614 return false;
615 }
616
617 /*
618 * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
619 * success.
620 */
621 list_del(&waiter->list);
622 rwsem_set_owner(sem);
623 return true;
624 }
625
626 /*
627 * The rwsem_spin_on_owner() function returns the following 4 values
628 * depending on the lock owner state.
629 * OWNER_NULL : owner is currently NULL
630 * OWNER_WRITER: when owner changes and is a writer
631 * OWNER_READER: when owner changes and the new owner may be a reader.
632 * OWNER_NONSPINNABLE:
633 * when optimistic spinning has to stop because either the
634 * owner stops running, is unknown, or its timeslice has
635 * been used up.
636 */
637 enum owner_state {
638 OWNER_NULL = 1 << 0,
639 OWNER_WRITER = 1 << 1,
640 OWNER_READER = 1 << 2,
641 OWNER_NONSPINNABLE = 1 << 3,
642 };
643
644 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
645 /*
646 * Try to acquire write lock before the writer has been put on wait queue.
647 */
rwsem_try_write_lock_unqueued(struct rw_semaphore * sem)648 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
649 {
650 long count = atomic_long_read(&sem->count);
651
652 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
653 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
654 count | RWSEM_WRITER_LOCKED)) {
655 rwsem_set_owner(sem);
656 lockevent_inc(rwsem_opt_lock);
657 return true;
658 }
659 }
660 return false;
661 }
662
owner_on_cpu(struct task_struct * owner)663 static inline bool owner_on_cpu(struct task_struct *owner)
664 {
665 /*
666 * As lock holder preemption issue, we both skip spinning if
667 * task is not on cpu or its cpu is preempted
668 */
669 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
670 }
671
rwsem_can_spin_on_owner(struct rw_semaphore * sem)672 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
673 {
674 struct task_struct *owner;
675 unsigned long flags;
676 bool ret = true;
677
678 if (need_resched()) {
679 lockevent_inc(rwsem_opt_fail);
680 return false;
681 }
682
683 preempt_disable();
684 rcu_read_lock();
685 owner = rwsem_owner_flags(sem, &flags);
686 /*
687 * Don't check the read-owner as the entry may be stale.
688 */
689 if ((flags & RWSEM_NONSPINNABLE) ||
690 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
691 ret = false;
692 rcu_read_unlock();
693 preempt_enable();
694 trace_android_vh_rwsem_can_spin_on_owner(sem, &ret);
695
696 lockevent_cond_inc(rwsem_opt_fail, !ret);
697 return ret;
698 }
699
700 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
701
702 static inline enum owner_state
rwsem_owner_state(struct task_struct * owner,unsigned long flags)703 rwsem_owner_state(struct task_struct *owner, unsigned long flags)
704 {
705 if (flags & RWSEM_NONSPINNABLE)
706 return OWNER_NONSPINNABLE;
707
708 if (flags & RWSEM_READER_OWNED)
709 return OWNER_READER;
710
711 return owner ? OWNER_WRITER : OWNER_NULL;
712 }
713
714 static noinline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore * sem)715 rwsem_spin_on_owner(struct rw_semaphore *sem)
716 {
717 struct task_struct *new, *owner;
718 unsigned long flags, new_flags;
719 enum owner_state state;
720 int cnt = 0;
721 bool time_out = false;
722
723 owner = rwsem_owner_flags(sem, &flags);
724 state = rwsem_owner_state(owner, flags);
725 if (state != OWNER_WRITER)
726 return state;
727
728 rcu_read_lock();
729 for (;;) {
730 trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, true);
731 if (time_out)
732 break;
733 /*
734 * When a waiting writer set the handoff flag, it may spin
735 * on the owner as well. Once that writer acquires the lock,
736 * we can spin on it. So we don't need to quit even when the
737 * handoff bit is set.
738 */
739 new = rwsem_owner_flags(sem, &new_flags);
740 if ((new != owner) || (new_flags != flags)) {
741 state = rwsem_owner_state(new, new_flags);
742 break;
743 }
744
745 /*
746 * Ensure we emit the owner->on_cpu, dereference _after_
747 * checking sem->owner still matches owner, if that fails,
748 * owner might point to free()d memory, if it still matches,
749 * the rcu_read_lock() ensures the memory stays valid.
750 */
751 barrier();
752
753 if (need_resched() || !owner_on_cpu(owner)) {
754 state = OWNER_NONSPINNABLE;
755 break;
756 }
757
758 cpu_relax();
759 }
760 rcu_read_unlock();
761
762 return state;
763 }
764
765 /*
766 * Calculate reader-owned rwsem spinning threshold for writer
767 *
768 * The more readers own the rwsem, the longer it will take for them to
769 * wind down and free the rwsem. So the empirical formula used to
770 * determine the actual spinning time limit here is:
771 *
772 * Spinning threshold = (10 + nr_readers/2)us
773 *
774 * The limit is capped to a maximum of 25us (30 readers). This is just
775 * a heuristic and is subjected to change in the future.
776 */
rwsem_rspin_threshold(struct rw_semaphore * sem)777 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
778 {
779 long count = atomic_long_read(&sem->count);
780 int readers = count >> RWSEM_READER_SHIFT;
781 u64 delta;
782
783 if (readers > 30)
784 readers = 30;
785 delta = (20 + readers) * NSEC_PER_USEC / 2;
786
787 return sched_clock() + delta;
788 }
789
rwsem_optimistic_spin(struct rw_semaphore * sem)790 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
791 {
792 bool taken = false;
793 int prev_owner_state = OWNER_NULL;
794 int loop = 0;
795 u64 rspin_threshold = 0;
796 int cnt = 0;
797 bool time_out = false;
798
799 preempt_disable();
800
801 /* sem->wait_lock should not be held when doing optimistic spinning */
802 if (!osq_lock(&sem->osq))
803 goto done;
804
805 /*
806 * Optimistically spin on the owner field and attempt to acquire the
807 * lock whenever the owner changes. Spinning will be stopped when:
808 * 1) the owning writer isn't running; or
809 * 2) readers own the lock and spinning time has exceeded limit.
810 */
811 for (;;) {
812 enum owner_state owner_state;
813
814 trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, false);
815 if (time_out)
816 break;
817 owner_state = rwsem_spin_on_owner(sem);
818 if (!(owner_state & OWNER_SPINNABLE))
819 break;
820
821 /*
822 * Try to acquire the lock
823 */
824 taken = rwsem_try_write_lock_unqueued(sem);
825
826 if (taken)
827 break;
828
829 /*
830 * Time-based reader-owned rwsem optimistic spinning
831 */
832 if (owner_state == OWNER_READER) {
833 /*
834 * Re-initialize rspin_threshold every time when
835 * the owner state changes from non-reader to reader.
836 * This allows a writer to steal the lock in between
837 * 2 reader phases and have the threshold reset at
838 * the beginning of the 2nd reader phase.
839 */
840 if (prev_owner_state != OWNER_READER) {
841 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
842 break;
843 rspin_threshold = rwsem_rspin_threshold(sem);
844 loop = 0;
845 }
846
847 /*
848 * Check time threshold once every 16 iterations to
849 * avoid calling sched_clock() too frequently so
850 * as to reduce the average latency between the times
851 * when the lock becomes free and when the spinner
852 * is ready to do a trylock.
853 */
854 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
855 rwsem_set_nonspinnable(sem);
856 lockevent_inc(rwsem_opt_nospin);
857 break;
858 }
859 }
860
861 /*
862 * An RT task cannot do optimistic spinning if it cannot
863 * be sure the lock holder is running or live-lock may
864 * happen if the current task and the lock holder happen
865 * to run in the same CPU. However, aborting optimistic
866 * spinning while a NULL owner is detected may miss some
867 * opportunity where spinning can continue without causing
868 * problem.
869 *
870 * There are 2 possible cases where an RT task may be able
871 * to continue spinning.
872 *
873 * 1) The lock owner is in the process of releasing the
874 * lock, sem->owner is cleared but the lock has not
875 * been released yet.
876 * 2) The lock was free and owner cleared, but another
877 * task just comes in and acquire the lock before
878 * we try to get it. The new owner may be a spinnable
879 * writer.
880 *
881 * To take advantage of two scenarios listed above, the RT
882 * task is made to retry one more time to see if it can
883 * acquire the lock or continue spinning on the new owning
884 * writer. Of course, if the time lag is long enough or the
885 * new owner is not a writer or spinnable, the RT task will
886 * quit spinning.
887 *
888 * If the owner is a writer, the need_resched() check is
889 * done inside rwsem_spin_on_owner(). If the owner is not
890 * a writer, need_resched() check needs to be done here.
891 */
892 if (owner_state != OWNER_WRITER) {
893 if (need_resched())
894 break;
895 if (rt_task(current) &&
896 (prev_owner_state != OWNER_WRITER))
897 break;
898 }
899 prev_owner_state = owner_state;
900
901 /*
902 * The cpu_relax() call is a compiler barrier which forces
903 * everything in this loop to be re-loaded. We don't need
904 * memory barriers as we'll eventually observe the right
905 * values at the cost of a few extra spins.
906 */
907 cpu_relax();
908 }
909 osq_unlock(&sem->osq);
910 trace_android_vh_rwsem_opt_spin_finish(sem, taken);
911 done:
912 preempt_enable();
913 lockevent_cond_inc(rwsem_opt_fail, !taken);
914 return taken;
915 }
916
917 /*
918 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
919 * only be called when the reader count reaches 0.
920 */
clear_nonspinnable(struct rw_semaphore * sem)921 static inline void clear_nonspinnable(struct rw_semaphore *sem)
922 {
923 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
924 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
925 }
926
927 #else
rwsem_can_spin_on_owner(struct rw_semaphore * sem)928 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
929 {
930 return false;
931 }
932
rwsem_optimistic_spin(struct rw_semaphore * sem)933 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
934 {
935 return false;
936 }
937
clear_nonspinnable(struct rw_semaphore * sem)938 static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
939
940 static inline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore * sem)941 rwsem_spin_on_owner(struct rw_semaphore *sem)
942 {
943 return OWNER_NONSPINNABLE;
944 }
945 #endif
946
947 /*
948 * Wait for the read lock to be granted
949 */
950 static struct rw_semaphore __sched *
rwsem_down_read_slowpath(struct rw_semaphore * sem,long count,unsigned int state)951 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
952 {
953 long adjustment = -RWSEM_READER_BIAS;
954 long rcnt = (count >> RWSEM_READER_SHIFT);
955 struct rwsem_waiter waiter;
956 DEFINE_WAKE_Q(wake_q);
957 bool wake = false;
958 bool already_on_list = false;
959
960 /*
961 * To prevent a constant stream of readers from starving a sleeping
962 * waiter, don't attempt optimistic lock stealing if the lock is
963 * currently owned by readers.
964 */
965 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
966 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
967 goto queue;
968
969 /*
970 * Reader optimistic lock stealing.
971 */
972 if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
973 rwsem_set_reader_owned(sem);
974 lockevent_inc(rwsem_rlock_steal);
975
976 /*
977 * Wake up other readers in the wait queue if it is
978 * the first reader.
979 */
980 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
981 raw_spin_lock_irq(&sem->wait_lock);
982 if (!list_empty(&sem->wait_list))
983 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
984 &wake_q);
985 raw_spin_unlock_irq(&sem->wait_lock);
986 wake_up_q(&wake_q);
987 }
988 trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
989 return sem;
990 }
991
992 queue:
993 waiter.task = current;
994 waiter.type = RWSEM_WAITING_FOR_READ;
995 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
996 waiter.handoff_set = false;
997
998 raw_spin_lock_irq(&sem->wait_lock);
999 if (list_empty(&sem->wait_list)) {
1000 /*
1001 * In case the wait queue is empty and the lock isn't owned
1002 * by a writer or has the handoff bit set, this reader can
1003 * exit the slowpath and return immediately as its
1004 * RWSEM_READER_BIAS has already been set in the count.
1005 */
1006 if (!(atomic_long_read(&sem->count) &
1007 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
1008 /* Provide lock ACQUIRE */
1009 smp_acquire__after_ctrl_dep();
1010 raw_spin_unlock_irq(&sem->wait_lock);
1011 rwsem_set_reader_owned(sem);
1012 lockevent_inc(rwsem_rlock_fast);
1013 return sem;
1014 }
1015 adjustment += RWSEM_FLAG_WAITERS;
1016 }
1017
1018 trace_android_vh_alter_rwsem_list_add(
1019 &waiter,
1020 sem, &already_on_list);
1021 if (!already_on_list)
1022 rwsem_add_waiter(sem, &waiter);
1023
1024 /* we're now waiting on the lock, but no longer actively locking */
1025 count = atomic_long_add_return(adjustment, &sem->count);
1026
1027 /*
1028 * If there are no active locks, wake the front queued process(es).
1029 *
1030 * If there are no writers and we are first in the queue,
1031 * wake our own waiter to join the existing active readers !
1032 */
1033 if (!(count & RWSEM_LOCK_MASK)) {
1034 clear_nonspinnable(sem);
1035 wake = true;
1036 }
1037 if (wake || (!(count & RWSEM_WRITER_MASK) &&
1038 (adjustment & RWSEM_FLAG_WAITERS)))
1039 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1040
1041 trace_android_vh_rwsem_wake(sem);
1042 raw_spin_unlock_irq(&sem->wait_lock);
1043 wake_up_q(&wake_q);
1044
1045 /* wait to be given the lock */
1046 trace_android_vh_rwsem_read_wait_start(sem);
1047 for (;;) {
1048 set_current_state(state);
1049 if (!smp_load_acquire(&waiter.task)) {
1050 /* Matches rwsem_mark_wake()'s smp_store_release(). */
1051 break;
1052 }
1053 if (signal_pending_state(state, current)) {
1054 raw_spin_lock_irq(&sem->wait_lock);
1055 if (waiter.task)
1056 goto out_nolock;
1057 raw_spin_unlock_irq(&sem->wait_lock);
1058 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1059 break;
1060 }
1061 schedule_preempt_disabled();
1062 lockevent_inc(rwsem_sleep_reader);
1063 }
1064
1065 __set_current_state(TASK_RUNNING);
1066 trace_android_vh_rwsem_read_wait_finish(sem);
1067 lockevent_inc(rwsem_rlock);
1068 trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1069 return sem;
1070
1071 out_nolock:
1072 rwsem_del_waiter(sem, &waiter);
1073 raw_spin_unlock_irq(&sem->wait_lock);
1074 __set_current_state(TASK_RUNNING);
1075 trace_android_vh_rwsem_read_wait_finish(sem);
1076 lockevent_inc(rwsem_rlock_fail);
1077 return ERR_PTR(-EINTR);
1078 }
1079
1080 /*
1081 * Wait until we successfully acquire the write lock
1082 */
1083 static struct rw_semaphore __sched *
rwsem_down_write_slowpath(struct rw_semaphore * sem,int state)1084 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1085 {
1086 long count;
1087 struct rwsem_waiter waiter;
1088 DEFINE_WAKE_Q(wake_q);
1089 bool already_on_list = false;
1090
1091 /* do optimistic spinning and steal lock if possible */
1092 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1093 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1094 trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1095 return sem;
1096 }
1097
1098 /*
1099 * Optimistic spinning failed, proceed to the slowpath
1100 * and block until we can acquire the sem.
1101 */
1102 waiter.task = current;
1103 waiter.type = RWSEM_WAITING_FOR_WRITE;
1104 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1105 waiter.handoff_set = false;
1106
1107 raw_spin_lock_irq(&sem->wait_lock);
1108
1109 trace_android_vh_alter_rwsem_list_add(
1110 &waiter,
1111 sem, &already_on_list);
1112 if (!already_on_list)
1113 rwsem_add_waiter(sem, &waiter);
1114
1115 /* we're now waiting on the lock */
1116 if (rwsem_first_waiter(sem) != &waiter) {
1117 count = atomic_long_read(&sem->count);
1118
1119 /*
1120 * If there were already threads queued before us and:
1121 * 1) there are no active locks, wake the front
1122 * queued process(es) as the handoff bit might be set.
1123 * 2) there are no active writers and some readers, the lock
1124 * must be read owned; so we try to wake any read lock
1125 * waiters that were queued ahead of us.
1126 */
1127 if (count & RWSEM_WRITER_MASK)
1128 goto wait;
1129
1130 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1131 ? RWSEM_WAKE_READERS
1132 : RWSEM_WAKE_ANY, &wake_q);
1133
1134 if (!wake_q_empty(&wake_q)) {
1135 /*
1136 * We want to minimize wait_lock hold time especially
1137 * when a large number of readers are to be woken up.
1138 */
1139 raw_spin_unlock_irq(&sem->wait_lock);
1140 wake_up_q(&wake_q);
1141 wake_q_init(&wake_q); /* Used again, reinit */
1142 raw_spin_lock_irq(&sem->wait_lock);
1143 }
1144 } else {
1145 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1146 }
1147
1148 wait:
1149 trace_android_vh_rwsem_wake(sem);
1150 /* wait until we successfully acquire the lock */
1151 trace_android_vh_rwsem_write_wait_start(sem);
1152 set_current_state(state);
1153 for (;;) {
1154 if (rwsem_try_write_lock(sem, &waiter)) {
1155 /* rwsem_try_write_lock() implies ACQUIRE on success */
1156 break;
1157 }
1158
1159 raw_spin_unlock_irq(&sem->wait_lock);
1160
1161 if (signal_pending_state(state, current))
1162 goto out_nolock;
1163
1164 /*
1165 * After setting the handoff bit and failing to acquire
1166 * the lock, attempt to spin on owner to accelerate lock
1167 * transfer. If the previous owner is a on-cpu writer and it
1168 * has just released the lock, OWNER_NULL will be returned.
1169 * In this case, we attempt to acquire the lock again
1170 * without sleeping.
1171 */
1172 if (waiter.handoff_set) {
1173 enum owner_state owner_state;
1174
1175 preempt_disable();
1176 owner_state = rwsem_spin_on_owner(sem);
1177 preempt_enable();
1178
1179 if (owner_state == OWNER_NULL)
1180 goto trylock_again;
1181 }
1182
1183 schedule();
1184 lockevent_inc(rwsem_sleep_writer);
1185 set_current_state(state);
1186 trylock_again:
1187 raw_spin_lock_irq(&sem->wait_lock);
1188 }
1189 __set_current_state(TASK_RUNNING);
1190 trace_android_vh_rwsem_write_wait_finish(sem);
1191 raw_spin_unlock_irq(&sem->wait_lock);
1192 lockevent_inc(rwsem_wlock);
1193 trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1194 return sem;
1195
1196 out_nolock:
1197 __set_current_state(TASK_RUNNING);
1198 trace_android_vh_rwsem_write_wait_finish(sem);
1199 raw_spin_lock_irq(&sem->wait_lock);
1200 rwsem_del_waiter(sem, &waiter);
1201 if (!list_empty(&sem->wait_list))
1202 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1203 raw_spin_unlock_irq(&sem->wait_lock);
1204 wake_up_q(&wake_q);
1205 lockevent_inc(rwsem_wlock_fail);
1206 return ERR_PTR(-EINTR);
1207 }
1208
1209 /*
1210 * handle waking up a waiter on the semaphore
1211 * - up_read/up_write has decremented the active part of count if we come here
1212 */
rwsem_wake(struct rw_semaphore * sem)1213 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1214 {
1215 unsigned long flags;
1216 DEFINE_WAKE_Q(wake_q);
1217
1218 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1219
1220 if (!list_empty(&sem->wait_list))
1221 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1222 trace_android_vh_rwsem_wake_finish(sem);
1223
1224 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1225 wake_up_q(&wake_q);
1226
1227 return sem;
1228 }
1229
1230 /*
1231 * downgrade a write lock into a read lock
1232 * - caller incremented waiting part of count and discovered it still negative
1233 * - just wake up any readers at the front of the queue
1234 */
rwsem_downgrade_wake(struct rw_semaphore * sem)1235 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1236 {
1237 unsigned long flags;
1238 DEFINE_WAKE_Q(wake_q);
1239
1240 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1241
1242 if (!list_empty(&sem->wait_list))
1243 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1244
1245 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1246 wake_up_q(&wake_q);
1247
1248 return sem;
1249 }
1250
1251 /*
1252 * lock for reading
1253 */
__down_read_common(struct rw_semaphore * sem,int state)1254 static __always_inline int __down_read_common(struct rw_semaphore *sem, int state)
1255 {
1256 int ret = 0;
1257 long count;
1258
1259 preempt_disable();
1260 if (!rwsem_read_trylock(sem, &count)) {
1261 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
1262 ret = -EINTR;
1263 goto out;
1264 }
1265 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1266 }
1267 out:
1268 preempt_enable();
1269 return ret;
1270 }
1271
__down_read(struct rw_semaphore * sem)1272 static __always_inline void __down_read(struct rw_semaphore *sem)
1273 {
1274 __down_read_common(sem, TASK_UNINTERRUPTIBLE);
1275 }
1276
__down_read_interruptible(struct rw_semaphore * sem)1277 static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
1278 {
1279 return __down_read_common(sem, TASK_INTERRUPTIBLE);
1280 }
1281
__down_read_killable(struct rw_semaphore * sem)1282 static __always_inline int __down_read_killable(struct rw_semaphore *sem)
1283 {
1284 return __down_read_common(sem, TASK_KILLABLE);
1285 }
1286
__down_read_trylock(struct rw_semaphore * sem)1287 static inline int __down_read_trylock(struct rw_semaphore *sem)
1288 {
1289 int ret = 0;
1290 long tmp;
1291
1292 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1293
1294 preempt_disable();
1295 tmp = atomic_long_read(&sem->count);
1296 while (!(tmp & RWSEM_READ_FAILED_MASK)) {
1297 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1298 tmp + RWSEM_READER_BIAS)) {
1299 rwsem_set_reader_owned(sem);
1300 ret = 1;
1301 trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1302 break;
1303 }
1304 }
1305 preempt_enable();
1306 return ret;
1307 }
1308
1309 /*
1310 * lock for writing
1311 */
__down_write_common(struct rw_semaphore * sem,int state)1312 static inline int __down_write_common(struct rw_semaphore *sem, int state)
1313 {
1314 if (unlikely(!rwsem_write_trylock(sem))) {
1315 if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1316 return -EINTR;
1317 }
1318
1319 return 0;
1320 }
1321
__down_write(struct rw_semaphore * sem)1322 static inline void __down_write(struct rw_semaphore *sem)
1323 {
1324 __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1325 }
1326
__down_write_killable(struct rw_semaphore * sem)1327 static inline int __down_write_killable(struct rw_semaphore *sem)
1328 {
1329 return __down_write_common(sem, TASK_KILLABLE);
1330 }
1331
__down_write_trylock(struct rw_semaphore * sem)1332 static inline int __down_write_trylock(struct rw_semaphore *sem)
1333 {
1334 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1335 return rwsem_write_trylock(sem);
1336 }
1337
1338 /*
1339 * unlock after reading
1340 */
__up_read(struct rw_semaphore * sem)1341 static inline void __up_read(struct rw_semaphore *sem)
1342 {
1343 long tmp;
1344
1345 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1346 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1347
1348 preempt_disable();
1349 trace_android_vh_record_rwsem_lock_starttime(current, 0);
1350 rwsem_clear_reader_owned(sem);
1351 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1352 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1353 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1354 RWSEM_FLAG_WAITERS)) {
1355 clear_nonspinnable(sem);
1356 rwsem_wake(sem);
1357 }
1358 preempt_enable();
1359 }
1360
1361 /*
1362 * unlock after writing
1363 */
__up_write(struct rw_semaphore * sem)1364 static inline void __up_write(struct rw_semaphore *sem)
1365 {
1366 long tmp;
1367
1368 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1369 /*
1370 * sem->owner may differ from current if the ownership is transferred
1371 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1372 */
1373 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1374 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1375
1376 trace_android_vh_record_rwsem_lock_starttime(current, 0);
1377 rwsem_clear_owner(sem);
1378 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1379 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1380 rwsem_wake(sem);
1381 }
1382
1383 /*
1384 * downgrade write lock to read lock
1385 */
__downgrade_write(struct rw_semaphore * sem)1386 static inline void __downgrade_write(struct rw_semaphore *sem)
1387 {
1388 long tmp;
1389
1390 /*
1391 * When downgrading from exclusive to shared ownership,
1392 * anything inside the write-locked region cannot leak
1393 * into the read side. In contrast, anything in the
1394 * read-locked region is ok to be re-ordered into the
1395 * write side. As such, rely on RELEASE semantics.
1396 */
1397 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1398 tmp = atomic_long_fetch_add_release(
1399 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1400 rwsem_set_reader_owned(sem);
1401 if (tmp & RWSEM_FLAG_WAITERS)
1402 rwsem_downgrade_wake(sem);
1403 }
1404
1405 #else /* !CONFIG_PREEMPT_RT */
1406
1407 #define RT_MUTEX_BUILD_MUTEX
1408 #include "rtmutex.c"
1409
1410 #define rwbase_set_and_save_current_state(state) \
1411 set_current_state(state)
1412
1413 #define rwbase_restore_current_state() \
1414 __set_current_state(TASK_RUNNING)
1415
1416 #define rwbase_rtmutex_lock_state(rtm, state) \
1417 __rt_mutex_lock(rtm, state)
1418
1419 #define rwbase_rtmutex_slowlock_locked(rtm, state) \
1420 __rt_mutex_slowlock_locked(rtm, NULL, state)
1421
1422 #define rwbase_rtmutex_unlock(rtm) \
1423 __rt_mutex_unlock(rtm)
1424
1425 #define rwbase_rtmutex_trylock(rtm) \
1426 __rt_mutex_trylock(rtm)
1427
1428 #define rwbase_signal_pending_state(state, current) \
1429 signal_pending_state(state, current)
1430
1431 #define rwbase_schedule() \
1432 schedule()
1433
1434 #include "rwbase_rt.c"
1435
__init_rwsem(struct rw_semaphore * sem,const char * name,struct lock_class_key * key)1436 void __init_rwsem(struct rw_semaphore *sem, const char *name,
1437 struct lock_class_key *key)
1438 {
1439 init_rwbase_rt(&(sem)->rwbase);
1440
1441 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1442 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1443 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1444 #endif
1445 }
1446 EXPORT_SYMBOL(__init_rwsem);
1447
__down_read(struct rw_semaphore * sem)1448 static inline void __down_read(struct rw_semaphore *sem)
1449 {
1450 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1451 }
1452
__down_read_interruptible(struct rw_semaphore * sem)1453 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1454 {
1455 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1456 }
1457
__down_read_killable(struct rw_semaphore * sem)1458 static inline int __down_read_killable(struct rw_semaphore *sem)
1459 {
1460 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1461 }
1462
__down_read_trylock(struct rw_semaphore * sem)1463 static inline int __down_read_trylock(struct rw_semaphore *sem)
1464 {
1465 return rwbase_read_trylock(&sem->rwbase);
1466 }
1467
__up_read(struct rw_semaphore * sem)1468 static inline void __up_read(struct rw_semaphore *sem)
1469 {
1470 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1471 }
1472
__down_write(struct rw_semaphore * sem)1473 static inline void __sched __down_write(struct rw_semaphore *sem)
1474 {
1475 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1476 }
1477
__down_write_killable(struct rw_semaphore * sem)1478 static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1479 {
1480 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1481 }
1482
__down_write_trylock(struct rw_semaphore * sem)1483 static inline int __down_write_trylock(struct rw_semaphore *sem)
1484 {
1485 return rwbase_write_trylock(&sem->rwbase);
1486 }
1487
__up_write(struct rw_semaphore * sem)1488 static inline void __up_write(struct rw_semaphore *sem)
1489 {
1490 rwbase_write_unlock(&sem->rwbase);
1491 }
1492
__downgrade_write(struct rw_semaphore * sem)1493 static inline void __downgrade_write(struct rw_semaphore *sem)
1494 {
1495 rwbase_write_downgrade(&sem->rwbase);
1496 }
1497
1498 /* Debug stubs for the common API */
1499 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1500
__rwsem_set_reader_owned(struct rw_semaphore * sem,struct task_struct * owner)1501 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1502 struct task_struct *owner)
1503 {
1504 }
1505
is_rwsem_reader_owned(struct rw_semaphore * sem)1506 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1507 {
1508 int count = atomic_read(&sem->rwbase.readers);
1509
1510 return count < 0 && count != READER_BIAS;
1511 }
1512
1513 #endif /* CONFIG_PREEMPT_RT */
1514
1515 /*
1516 * lock for reading
1517 */
down_read(struct rw_semaphore * sem)1518 void __sched down_read(struct rw_semaphore *sem)
1519 {
1520 might_sleep();
1521 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1522
1523 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1524 }
1525 EXPORT_SYMBOL(down_read);
1526
down_read_interruptible(struct rw_semaphore * sem)1527 int __sched down_read_interruptible(struct rw_semaphore *sem)
1528 {
1529 might_sleep();
1530 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1531
1532 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1533 rwsem_release(&sem->dep_map, _RET_IP_);
1534 return -EINTR;
1535 }
1536
1537 return 0;
1538 }
1539 EXPORT_SYMBOL(down_read_interruptible);
1540
down_read_killable(struct rw_semaphore * sem)1541 int __sched down_read_killable(struct rw_semaphore *sem)
1542 {
1543 might_sleep();
1544 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1545
1546 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1547 rwsem_release(&sem->dep_map, _RET_IP_);
1548 return -EINTR;
1549 }
1550
1551 return 0;
1552 }
1553 EXPORT_SYMBOL(down_read_killable);
1554
1555 /*
1556 * trylock for reading -- returns 1 if successful, 0 if contention
1557 */
down_read_trylock(struct rw_semaphore * sem)1558 int down_read_trylock(struct rw_semaphore *sem)
1559 {
1560 int ret = __down_read_trylock(sem);
1561
1562 if (ret == 1)
1563 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1564 return ret;
1565 }
1566 EXPORT_SYMBOL(down_read_trylock);
1567
1568 /*
1569 * lock for writing
1570 */
down_write(struct rw_semaphore * sem)1571 void __sched down_write(struct rw_semaphore *sem)
1572 {
1573 might_sleep();
1574 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1575 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1576 }
1577 EXPORT_SYMBOL(down_write);
1578
1579 /*
1580 * lock for writing
1581 */
down_write_killable(struct rw_semaphore * sem)1582 int __sched down_write_killable(struct rw_semaphore *sem)
1583 {
1584 might_sleep();
1585 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1586
1587 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1588 __down_write_killable)) {
1589 rwsem_release(&sem->dep_map, _RET_IP_);
1590 return -EINTR;
1591 }
1592
1593 return 0;
1594 }
1595 EXPORT_SYMBOL(down_write_killable);
1596
1597 /*
1598 * trylock for writing -- returns 1 if successful, 0 if contention
1599 */
down_write_trylock(struct rw_semaphore * sem)1600 int down_write_trylock(struct rw_semaphore *sem)
1601 {
1602 int ret = __down_write_trylock(sem);
1603
1604 if (ret == 1)
1605 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1606
1607 return ret;
1608 }
1609 EXPORT_SYMBOL(down_write_trylock);
1610
1611 /*
1612 * release a read lock
1613 */
up_read(struct rw_semaphore * sem)1614 void up_read(struct rw_semaphore *sem)
1615 {
1616 rwsem_release(&sem->dep_map, _RET_IP_);
1617 __up_read(sem);
1618 }
1619 EXPORT_SYMBOL(up_read);
1620
1621 /*
1622 * release a write lock
1623 */
up_write(struct rw_semaphore * sem)1624 void up_write(struct rw_semaphore *sem)
1625 {
1626 rwsem_release(&sem->dep_map, _RET_IP_);
1627 trace_android_vh_rwsem_write_finished(sem);
1628 __up_write(sem);
1629 }
1630 EXPORT_SYMBOL(up_write);
1631
1632 /*
1633 * downgrade write lock to read lock
1634 */
downgrade_write(struct rw_semaphore * sem)1635 void downgrade_write(struct rw_semaphore *sem)
1636 {
1637 lock_downgrade(&sem->dep_map, _RET_IP_);
1638 trace_android_vh_rwsem_write_finished(sem);
1639 __downgrade_write(sem);
1640 }
1641 EXPORT_SYMBOL(downgrade_write);
1642
1643 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1644
down_read_nested(struct rw_semaphore * sem,int subclass)1645 void down_read_nested(struct rw_semaphore *sem, int subclass)
1646 {
1647 might_sleep();
1648 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1649 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1650 }
1651 EXPORT_SYMBOL(down_read_nested);
1652
down_read_killable_nested(struct rw_semaphore * sem,int subclass)1653 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1654 {
1655 might_sleep();
1656 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1657
1658 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1659 rwsem_release(&sem->dep_map, _RET_IP_);
1660 return -EINTR;
1661 }
1662
1663 return 0;
1664 }
1665 EXPORT_SYMBOL(down_read_killable_nested);
1666
_down_write_nest_lock(struct rw_semaphore * sem,struct lockdep_map * nest)1667 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1668 {
1669 might_sleep();
1670 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1671 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1672 }
1673 EXPORT_SYMBOL(_down_write_nest_lock);
1674
down_read_non_owner(struct rw_semaphore * sem)1675 void down_read_non_owner(struct rw_semaphore *sem)
1676 {
1677 might_sleep();
1678 __down_read(sem);
1679 /*
1680 * The owner value for a reader-owned lock is mostly for debugging
1681 * purpose only and is not critical to the correct functioning of
1682 * rwsem. So it is perfectly fine to set it in a preempt-enabled
1683 * context here.
1684 */
1685 __rwsem_set_reader_owned(sem, NULL);
1686 }
1687 EXPORT_SYMBOL(down_read_non_owner);
1688
down_write_nested(struct rw_semaphore * sem,int subclass)1689 void down_write_nested(struct rw_semaphore *sem, int subclass)
1690 {
1691 might_sleep();
1692 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1693 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1694 }
1695 EXPORT_SYMBOL(down_write_nested);
1696
down_write_killable_nested(struct rw_semaphore * sem,int subclass)1697 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1698 {
1699 might_sleep();
1700 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1701
1702 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1703 __down_write_killable)) {
1704 rwsem_release(&sem->dep_map, _RET_IP_);
1705 return -EINTR;
1706 }
1707
1708 return 0;
1709 }
1710 EXPORT_SYMBOL(down_write_killable_nested);
1711
up_read_non_owner(struct rw_semaphore * sem)1712 void up_read_non_owner(struct rw_semaphore *sem)
1713 {
1714 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1715 __up_read(sem);
1716 }
1717 EXPORT_SYMBOL(up_read_non_owner);
1718
1719 #endif
1720