1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
6 *
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
9 *
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 *
13 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30
31 #include "rwsem.h"
32 #include "lock_events.h"
33
34 #include <trace/hooks/dtask.h>
35 #include <trace/hooks/rwsem.h>
36
37 /*
38 * The least significant 3 bits of the owner value has the following
39 * meanings when set.
40 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
41 * - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock.
42 * - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock.
43 *
44 * When the rwsem is either owned by an anonymous writer, or it is
45 * reader-owned, but a spinning writer has timed out, both nonspinnable
46 * bits will be set to disable optimistic spinning by readers and writers.
47 * In the later case, the last unlocking reader should then check the
48 * writer nonspinnable bit and clear it only to give writers preference
49 * to acquire the lock via optimistic spinning, but not readers. Similar
50 * action is also done in the reader slowpath.
51
52 * When a writer acquires a rwsem, it puts its task_struct pointer
53 * into the owner field. It is cleared after an unlock.
54 *
55 * When a reader acquires a rwsem, it will also puts its task_struct
56 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
57 * On unlock, the owner field will largely be left untouched. So
58 * for a free or reader-owned rwsem, the owner value may contain
59 * information about the last reader that acquires the rwsem.
60 *
61 * That information may be helpful in debugging cases where the system
62 * seems to hang on a reader owned rwsem especially if only one reader
63 * is involved. Ideally we would like to track all the readers that own
64 * a rwsem, but the overhead is simply too big.
65 *
66 * Reader optimistic spinning is helpful when the reader critical section
67 * is short and there aren't that many readers around. It makes readers
68 * relatively more preferred than writers. When a writer times out spinning
69 * on a reader-owned lock and set the nospinnable bits, there are two main
70 * reasons for that.
71 *
72 * 1) The reader critical section is long, perhaps the task sleeps after
73 * acquiring the read lock.
74 * 2) There are just too many readers contending the lock causing it to
75 * take a while to service all of them.
76 *
77 * In the former case, long reader critical section will impede the progress
78 * of writers which is usually more important for system performance. In
79 * the later case, reader optimistic spinning tends to make the reader
80 * groups that contain readers that acquire the lock together smaller
81 * leading to more of them. That may hurt performance in some cases. In
82 * other words, the setting of nonspinnable bits indicates that reader
83 * optimistic spinning may not be helpful for those workloads that cause
84 * it.
85 *
86 * Therefore, any writers that had observed the setting of the writer
87 * nonspinnable bit for a given rwsem after they fail to acquire the lock
88 * via optimistic spinning will set the reader nonspinnable bit once they
89 * acquire the write lock. Similarly, readers that observe the setting
90 * of reader nonspinnable bit at slowpath entry will set the reader
91 * nonspinnable bits when they acquire the read lock via the wakeup path.
92 *
93 * Once the reader nonspinnable bit is on, it will only be reset when
94 * a writer is able to acquire the rwsem in the fast path or somehow a
95 * reader or writer in the slowpath doesn't observe the nonspinable bit.
96 *
97 * This is to discourage reader optmistic spinning on that particular
98 * rwsem and make writers more preferred. This adaptive disabling of reader
99 * optimistic spinning will alleviate the negative side effect of this
100 * feature.
101 */
102 #define RWSEM_READER_OWNED (1UL << 0)
103 #define RWSEM_RD_NONSPINNABLE (1UL << 1)
104 #define RWSEM_WR_NONSPINNABLE (1UL << 2)
105 #define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
106 #define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
107
108 #ifdef CONFIG_DEBUG_RWSEMS
109 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
110 if (!debug_locks_silent && \
111 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
112 #c, atomic_long_read(&(sem)->count), \
113 (unsigned long) sem->magic, \
114 atomic_long_read(&(sem)->owner), (long)current, \
115 list_empty(&(sem)->wait_list) ? "" : "not ")) \
116 debug_locks_off(); \
117 } while (0)
118 #else
119 # define DEBUG_RWSEMS_WARN_ON(c, sem)
120 #endif
121
122 /*
123 * On 64-bit architectures, the bit definitions of the count are:
124 *
125 * Bit 0 - writer locked bit
126 * Bit 1 - waiters present bit
127 * Bit 2 - lock handoff bit
128 * Bits 3-7 - reserved
129 * Bits 8-62 - 55-bit reader count
130 * Bit 63 - read fail bit
131 *
132 * On 32-bit architectures, the bit definitions of the count are:
133 *
134 * Bit 0 - writer locked bit
135 * Bit 1 - waiters present bit
136 * Bit 2 - lock handoff bit
137 * Bits 3-7 - reserved
138 * Bits 8-30 - 23-bit reader count
139 * Bit 31 - read fail bit
140 *
141 * It is not likely that the most significant bit (read fail bit) will ever
142 * be set. This guard bit is still checked anyway in the down_read() fastpath
143 * just in case we need to use up more of the reader bits for other purpose
144 * in the future.
145 *
146 * atomic_long_fetch_add() is used to obtain reader lock, whereas
147 * atomic_long_cmpxchg() will be used to obtain writer lock.
148 *
149 * There are three places where the lock handoff bit may be set or cleared.
150 * 1) rwsem_mark_wake() for readers.
151 * 2) rwsem_try_write_lock() for writers.
152 * 3) Error path of rwsem_down_write_slowpath().
153 *
154 * For all the above cases, wait_lock will be held. A writer must also
155 * be the first one in the wait_list to be eligible for setting the handoff
156 * bit. So concurrent setting/clearing of handoff bit is not possible.
157 */
158 #define RWSEM_WRITER_LOCKED (1UL << 0)
159 #define RWSEM_FLAG_WAITERS (1UL << 1)
160 #define RWSEM_FLAG_HANDOFF (1UL << 2)
161 #define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
162
163 #define RWSEM_READER_SHIFT 8
164 #define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
165 #define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
166 #define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
167 #define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
168 #define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
169 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
170
171 /*
172 * All writes to owner are protected by WRITE_ONCE() to make sure that
173 * store tearing can't happen as optimistic spinners may read and use
174 * the owner value concurrently without lock. Read from owner, however,
175 * may not need READ_ONCE() as long as the pointer value is only used
176 * for comparison and isn't being dereferenced.
177 */
rwsem_set_owner(struct rw_semaphore * sem)178 static inline void rwsem_set_owner(struct rw_semaphore *sem)
179 {
180 atomic_long_set(&sem->owner, (long)current);
181 }
182
rwsem_clear_owner(struct rw_semaphore * sem)183 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
184 {
185 atomic_long_set(&sem->owner, 0);
186 }
187
188 /*
189 * Test the flags in the owner field.
190 */
rwsem_test_oflags(struct rw_semaphore * sem,long flags)191 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
192 {
193 return atomic_long_read(&sem->owner) & flags;
194 }
195
196 /*
197 * The task_struct pointer of the last owning reader will be left in
198 * the owner field.
199 *
200 * Note that the owner value just indicates the task has owned the rwsem
201 * previously, it may not be the real owner or one of the real owners
202 * anymore when that field is examined, so take it with a grain of salt.
203 *
204 * The reader non-spinnable bit is preserved.
205 */
__rwsem_set_reader_owned(struct rw_semaphore * sem,struct task_struct * owner)206 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
207 struct task_struct *owner)
208 {
209 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
210 (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
211
212 atomic_long_set(&sem->owner, val);
213 }
214
rwsem_set_reader_owned(struct rw_semaphore * sem)215 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
216 {
217 __rwsem_set_reader_owned(sem, current);
218 }
219
220 /*
221 * Return true if the rwsem is owned by a reader.
222 */
is_rwsem_reader_owned(struct rw_semaphore * sem)223 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
224 {
225 #ifdef CONFIG_DEBUG_RWSEMS
226 /*
227 * Check the count to see if it is write-locked.
228 */
229 long count = atomic_long_read(&sem->count);
230
231 if (count & RWSEM_WRITER_MASK)
232 return false;
233 #endif
234 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
235 }
236
237 #ifdef CONFIG_DEBUG_RWSEMS
238 /*
239 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
240 * is a task pointer in owner of a reader-owned rwsem, it will be the
241 * real owner or one of the real owners. The only exception is when the
242 * unlock is done by up_read_non_owner().
243 */
rwsem_clear_reader_owned(struct rw_semaphore * sem)244 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
245 {
246 unsigned long val = atomic_long_read(&sem->owner);
247
248 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
249 if (atomic_long_try_cmpxchg(&sem->owner, &val,
250 val & RWSEM_OWNER_FLAGS_MASK))
251 return;
252 }
253 }
254 #else
rwsem_clear_reader_owned(struct rw_semaphore * sem)255 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
256 {
257 }
258 #endif
259
260 /*
261 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
262 * remains set. Otherwise, the operation will be aborted.
263 */
rwsem_set_nonspinnable(struct rw_semaphore * sem)264 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
265 {
266 unsigned long owner = atomic_long_read(&sem->owner);
267
268 do {
269 if (!(owner & RWSEM_READER_OWNED))
270 break;
271 if (owner & RWSEM_NONSPINNABLE)
272 break;
273 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
274 owner | RWSEM_NONSPINNABLE));
275 }
276
rwsem_read_trylock(struct rw_semaphore * sem)277 static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
278 {
279 long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
280 if (WARN_ON_ONCE(cnt < 0))
281 rwsem_set_nonspinnable(sem);
282 return !(cnt & RWSEM_READ_FAILED_MASK);
283 }
284
285 /*
286 * Return just the real task structure pointer of the owner
287 */
rwsem_owner(struct rw_semaphore * sem)288 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
289 {
290 return (struct task_struct *)
291 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
292 }
293
294 /*
295 * Return the real task structure pointer of the owner and the embedded
296 * flags in the owner. pflags must be non-NULL.
297 */
298 static inline struct task_struct *
rwsem_owner_flags(struct rw_semaphore * sem,unsigned long * pflags)299 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
300 {
301 unsigned long owner = atomic_long_read(&sem->owner);
302
303 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
304 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
305 }
306
307 /*
308 * Guide to the rw_semaphore's count field.
309 *
310 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
311 * by a writer.
312 *
313 * The lock is owned by readers when
314 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
315 * (2) some of the reader bits are set in count, and
316 * (3) the owner field has RWSEM_READ_OWNED bit set.
317 *
318 * Having some reader bits set is not enough to guarantee a readers owned
319 * lock as the readers may be in the process of backing out from the count
320 * and a writer has just released the lock. So another writer may steal
321 * the lock immediately after that.
322 */
323
324 /*
325 * Initialize an rwsem:
326 */
__init_rwsem(struct rw_semaphore * sem,const char * name,struct lock_class_key * key)327 void __init_rwsem(struct rw_semaphore *sem, const char *name,
328 struct lock_class_key *key)
329 {
330 #ifdef CONFIG_DEBUG_LOCK_ALLOC
331 /*
332 * Make sure we are not reinitializing a held semaphore:
333 */
334 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
335 lockdep_init_map(&sem->dep_map, name, key, 0);
336 #endif
337 #ifdef CONFIG_DEBUG_RWSEMS
338 sem->magic = sem;
339 #endif
340 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
341 raw_spin_lock_init(&sem->wait_lock);
342 INIT_LIST_HEAD(&sem->wait_list);
343 atomic_long_set(&sem->owner, 0L);
344 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
345 osq_lock_init(&sem->osq);
346 #endif
347 trace_android_vh_rwsem_init(sem);
348 }
349 EXPORT_SYMBOL(__init_rwsem);
350
351 enum rwsem_waiter_type {
352 RWSEM_WAITING_FOR_WRITE,
353 RWSEM_WAITING_FOR_READ
354 };
355
356 struct rwsem_waiter {
357 struct list_head list;
358 struct task_struct *task;
359 enum rwsem_waiter_type type;
360 unsigned long timeout;
361 unsigned long last_rowner;
362 };
363 #define rwsem_first_waiter(sem) \
364 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
365
366 enum rwsem_wake_type {
367 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
368 RWSEM_WAKE_READERS, /* Wake readers only */
369 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
370 };
371
372 enum writer_wait_state {
373 WRITER_NOT_FIRST, /* Writer is not first in wait list */
374 WRITER_FIRST, /* Writer is first in wait list */
375 WRITER_HANDOFF /* Writer is first & handoff needed */
376 };
377
378 /*
379 * The typical HZ value is either 250 or 1000. So set the minimum waiting
380 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
381 * queue before initiating the handoff protocol.
382 */
383 #define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
384
385 /*
386 * Magic number to batch-wakeup waiting readers, even when writers are
387 * also present in the queue. This both limits the amount of work the
388 * waking thread must do and also prevents any potential counter overflow,
389 * however unlikely.
390 */
391 #define MAX_READERS_WAKEUP 0x100
392
393 /*
394 * handle the lock release when processes blocked on it that can now run
395 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
396 * have been set.
397 * - there must be someone on the queue
398 * - the wait_lock must be held by the caller
399 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
400 * to actually wakeup the blocked task(s) and drop the reference count,
401 * preferably when the wait_lock is released
402 * - woken process blocks are discarded from the list after having task zeroed
403 * - writers are only marked woken if downgrading is false
404 */
rwsem_mark_wake(struct rw_semaphore * sem,enum rwsem_wake_type wake_type,struct wake_q_head * wake_q)405 static void rwsem_mark_wake(struct rw_semaphore *sem,
406 enum rwsem_wake_type wake_type,
407 struct wake_q_head *wake_q)
408 {
409 struct rwsem_waiter *waiter, *tmp;
410 long oldcount, woken = 0, adjustment = 0;
411 struct list_head wlist;
412
413 lockdep_assert_held(&sem->wait_lock);
414
415 /*
416 * Take a peek at the queue head waiter such that we can determine
417 * the wakeup(s) to perform.
418 */
419 waiter = rwsem_first_waiter(sem);
420
421 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
422 if (wake_type == RWSEM_WAKE_ANY) {
423 /*
424 * Mark writer at the front of the queue for wakeup.
425 * Until the task is actually later awoken later by
426 * the caller, other writers are able to steal it.
427 * Readers, on the other hand, will block as they
428 * will notice the queued writer.
429 */
430 wake_q_add(wake_q, waiter->task);
431 lockevent_inc(rwsem_wake_writer);
432 }
433
434 return;
435 }
436
437 /*
438 * No reader wakeup if there are too many of them already.
439 */
440 if (unlikely(atomic_long_read(&sem->count) < 0))
441 return;
442
443 /*
444 * Writers might steal the lock before we grant it to the next reader.
445 * We prefer to do the first reader grant before counting readers
446 * so we can bail out early if a writer stole the lock.
447 */
448 if (wake_type != RWSEM_WAKE_READ_OWNED) {
449 struct task_struct *owner;
450
451 adjustment = RWSEM_READER_BIAS;
452 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
453 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
454 /*
455 * When we've been waiting "too" long (for writers
456 * to give up the lock), request a HANDOFF to
457 * force the issue.
458 */
459 if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
460 time_after(jiffies, waiter->timeout)) {
461 adjustment -= RWSEM_FLAG_HANDOFF;
462 lockevent_inc(rwsem_rlock_handoff);
463 }
464
465 atomic_long_add(-adjustment, &sem->count);
466 return;
467 }
468 /*
469 * Set it to reader-owned to give spinners an early
470 * indication that readers now have the lock.
471 * The reader nonspinnable bit seen at slowpath entry of
472 * the reader is copied over.
473 */
474 owner = waiter->task;
475 if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) {
476 owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE);
477 lockevent_inc(rwsem_opt_norspin);
478 }
479 __rwsem_set_reader_owned(sem, owner);
480 }
481
482 /*
483 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
484 * queue. We know that the woken will be at least 1 as we accounted
485 * for above. Note we increment the 'active part' of the count by the
486 * number of readers before waking any processes up.
487 *
488 * This is an adaptation of the phase-fair R/W locks where at the
489 * reader phase (first waiter is a reader), all readers are eligible
490 * to acquire the lock at the same time irrespective of their order
491 * in the queue. The writers acquire the lock according to their
492 * order in the queue.
493 *
494 * We have to do wakeup in 2 passes to prevent the possibility that
495 * the reader count may be decremented before it is incremented. It
496 * is because the to-be-woken waiter may not have slept yet. So it
497 * may see waiter->task got cleared, finish its critical section and
498 * do an unlock before the reader count increment.
499 *
500 * 1) Collect the read-waiters in a separate list, count them and
501 * fully increment the reader count in rwsem.
502 * 2) For each waiters in the new list, clear waiter->task and
503 * put them into wake_q to be woken up later.
504 */
505 INIT_LIST_HEAD(&wlist);
506 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
507 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
508 continue;
509
510 woken++;
511 list_move_tail(&waiter->list, &wlist);
512
513 /*
514 * Limit # of readers that can be woken up per wakeup call.
515 */
516 if (woken >= MAX_READERS_WAKEUP)
517 break;
518 }
519
520 adjustment = woken * RWSEM_READER_BIAS - adjustment;
521 lockevent_cond_inc(rwsem_wake_reader, woken);
522 if (list_empty(&sem->wait_list)) {
523 /* hit end of list above */
524 adjustment -= RWSEM_FLAG_WAITERS;
525 }
526
527 /*
528 * When we've woken a reader, we no longer need to force writers
529 * to give up the lock and we can clear HANDOFF.
530 */
531 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
532 adjustment -= RWSEM_FLAG_HANDOFF;
533
534 if (adjustment)
535 atomic_long_add(adjustment, &sem->count);
536
537 /* 2nd pass */
538 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
539 struct task_struct *tsk;
540
541 tsk = waiter->task;
542 get_task_struct(tsk);
543
544 /*
545 * Ensure calling get_task_struct() before setting the reader
546 * waiter to nil such that rwsem_down_read_slowpath() cannot
547 * race with do_exit() by always holding a reference count
548 * to the task to wakeup.
549 */
550 smp_store_release(&waiter->task, NULL);
551 /*
552 * Ensure issuing the wakeup (either by us or someone else)
553 * after setting the reader waiter to nil.
554 */
555 wake_q_add_safe(wake_q, tsk);
556 }
557 }
558
559 /*
560 * This function must be called with the sem->wait_lock held to prevent
561 * race conditions between checking the rwsem wait list and setting the
562 * sem->count accordingly.
563 *
564 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
565 * bit is set or the lock is acquired with handoff bit cleared.
566 */
rwsem_try_write_lock(struct rw_semaphore * sem,enum writer_wait_state wstate)567 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
568 enum writer_wait_state wstate)
569 {
570 long count, new;
571
572 lockdep_assert_held(&sem->wait_lock);
573
574 count = atomic_long_read(&sem->count);
575 do {
576 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
577
578 if (has_handoff && wstate == WRITER_NOT_FIRST)
579 return false;
580
581 new = count;
582
583 if (count & RWSEM_LOCK_MASK) {
584 if (has_handoff || (wstate != WRITER_HANDOFF))
585 return false;
586
587 new |= RWSEM_FLAG_HANDOFF;
588 } else {
589 new |= RWSEM_WRITER_LOCKED;
590 new &= ~RWSEM_FLAG_HANDOFF;
591
592 if (list_is_singular(&sem->wait_list))
593 new &= ~RWSEM_FLAG_WAITERS;
594 }
595 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
596
597 /*
598 * We have either acquired the lock with handoff bit cleared or
599 * set the handoff bit.
600 */
601 if (new & RWSEM_FLAG_HANDOFF)
602 return false;
603
604 rwsem_set_owner(sem);
605 return true;
606 }
607
608 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
609 /*
610 * Try to acquire read lock before the reader is put on wait queue.
611 * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff
612 * is ongoing.
613 */
rwsem_try_read_lock_unqueued(struct rw_semaphore * sem)614 static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
615 {
616 long count = atomic_long_read(&sem->count);
617
618 if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
619 return false;
620
621 count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
622 if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
623 rwsem_set_reader_owned(sem);
624 lockevent_inc(rwsem_opt_rlock);
625 return true;
626 }
627
628 /* Back out the change */
629 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
630 return false;
631 }
632
633 /*
634 * Try to acquire write lock before the writer has been put on wait queue.
635 */
rwsem_try_write_lock_unqueued(struct rw_semaphore * sem)636 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
637 {
638 long count = atomic_long_read(&sem->count);
639
640 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
641 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
642 count | RWSEM_WRITER_LOCKED)) {
643 rwsem_set_owner(sem);
644 lockevent_inc(rwsem_opt_wlock);
645 return true;
646 }
647 }
648 return false;
649 }
650
owner_on_cpu(struct task_struct * owner)651 static inline bool owner_on_cpu(struct task_struct *owner)
652 {
653 /*
654 * As lock holder preemption issue, we both skip spinning if
655 * task is not on cpu or its cpu is preempted
656 */
657 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
658 }
659
rwsem_can_spin_on_owner(struct rw_semaphore * sem,unsigned long nonspinnable)660 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
661 unsigned long nonspinnable)
662 {
663 struct task_struct *owner;
664 unsigned long flags;
665 bool ret = true;
666
667 BUILD_BUG_ON(!(RWSEM_OWNER_UNKNOWN & RWSEM_NONSPINNABLE));
668
669 if (need_resched()) {
670 lockevent_inc(rwsem_opt_fail);
671 return false;
672 }
673
674 preempt_disable();
675 rcu_read_lock();
676 owner = rwsem_owner_flags(sem, &flags);
677 /*
678 * Don't check the read-owner as the entry may be stale.
679 */
680 if ((flags & nonspinnable) ||
681 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
682 ret = false;
683 rcu_read_unlock();
684 preempt_enable();
685
686 lockevent_cond_inc(rwsem_opt_fail, !ret);
687 return ret;
688 }
689
690 /*
691 * The rwsem_spin_on_owner() function returns the folowing 4 values
692 * depending on the lock owner state.
693 * OWNER_NULL : owner is currently NULL
694 * OWNER_WRITER: when owner changes and is a writer
695 * OWNER_READER: when owner changes and the new owner may be a reader.
696 * OWNER_NONSPINNABLE:
697 * when optimistic spinning has to stop because either the
698 * owner stops running, is unknown, or its timeslice has
699 * been used up.
700 */
701 enum owner_state {
702 OWNER_NULL = 1 << 0,
703 OWNER_WRITER = 1 << 1,
704 OWNER_READER = 1 << 2,
705 OWNER_NONSPINNABLE = 1 << 3,
706 };
707 #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
708
709 static inline enum owner_state
rwsem_owner_state(struct task_struct * owner,unsigned long flags,unsigned long nonspinnable)710 rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
711 {
712 if (flags & nonspinnable)
713 return OWNER_NONSPINNABLE;
714
715 if (flags & RWSEM_READER_OWNED)
716 return OWNER_READER;
717
718 return owner ? OWNER_WRITER : OWNER_NULL;
719 }
720
721 static noinline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore * sem,unsigned long nonspinnable)722 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
723 {
724 struct task_struct *new, *owner;
725 unsigned long flags, new_flags;
726 enum owner_state state;
727
728 owner = rwsem_owner_flags(sem, &flags);
729 state = rwsem_owner_state(owner, flags, nonspinnable);
730 if (state != OWNER_WRITER)
731 return state;
732
733 rcu_read_lock();
734 for (;;) {
735 /*
736 * When a waiting writer set the handoff flag, it may spin
737 * on the owner as well. Once that writer acquires the lock,
738 * we can spin on it. So we don't need to quit even when the
739 * handoff bit is set.
740 */
741 new = rwsem_owner_flags(sem, &new_flags);
742 if ((new != owner) || (new_flags != flags)) {
743 state = rwsem_owner_state(new, new_flags, nonspinnable);
744 break;
745 }
746
747 /*
748 * Ensure we emit the owner->on_cpu, dereference _after_
749 * checking sem->owner still matches owner, if that fails,
750 * owner might point to free()d memory, if it still matches,
751 * the rcu_read_lock() ensures the memory stays valid.
752 */
753 barrier();
754
755 if (need_resched() || !owner_on_cpu(owner)) {
756 state = OWNER_NONSPINNABLE;
757 break;
758 }
759
760 cpu_relax();
761 }
762 rcu_read_unlock();
763
764 return state;
765 }
766
767 /*
768 * Calculate reader-owned rwsem spinning threshold for writer
769 *
770 * The more readers own the rwsem, the longer it will take for them to
771 * wind down and free the rwsem. So the empirical formula used to
772 * determine the actual spinning time limit here is:
773 *
774 * Spinning threshold = (10 + nr_readers/2)us
775 *
776 * The limit is capped to a maximum of 25us (30 readers). This is just
777 * a heuristic and is subjected to change in the future.
778 */
rwsem_rspin_threshold(struct rw_semaphore * sem)779 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
780 {
781 long count = atomic_long_read(&sem->count);
782 int readers = count >> RWSEM_READER_SHIFT;
783 u64 delta;
784
785 if (readers > 30)
786 readers = 30;
787 delta = (20 + readers) * NSEC_PER_USEC / 2;
788
789 return sched_clock() + delta;
790 }
791
rwsem_optimistic_spin(struct rw_semaphore * sem,bool wlock)792 static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
793 {
794 bool taken = false;
795 int prev_owner_state = OWNER_NULL;
796 int loop = 0;
797 u64 rspin_threshold = 0;
798 unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
799 : RWSEM_RD_NONSPINNABLE;
800
801 preempt_disable();
802
803 /* sem->wait_lock should not be held when doing optimistic spinning */
804 if (!osq_lock(&sem->osq))
805 goto done;
806
807 /*
808 * Optimistically spin on the owner field and attempt to acquire the
809 * lock whenever the owner changes. Spinning will be stopped when:
810 * 1) the owning writer isn't running; or
811 * 2) readers own the lock and spinning time has exceeded limit.
812 */
813 for (;;) {
814 enum owner_state owner_state;
815
816 owner_state = rwsem_spin_on_owner(sem, nonspinnable);
817 if (!(owner_state & OWNER_SPINNABLE))
818 break;
819
820 /*
821 * Try to acquire the lock
822 */
823 taken = wlock ? rwsem_try_write_lock_unqueued(sem)
824 : rwsem_try_read_lock_unqueued(sem);
825
826 if (taken)
827 break;
828
829 /*
830 * Time-based reader-owned rwsem optimistic spinning
831 */
832 if (wlock && (owner_state == OWNER_READER)) {
833 /*
834 * Re-initialize rspin_threshold every time when
835 * the owner state changes from non-reader to reader.
836 * This allows a writer to steal the lock in between
837 * 2 reader phases and have the threshold reset at
838 * the beginning of the 2nd reader phase.
839 */
840 if (prev_owner_state != OWNER_READER) {
841 if (rwsem_test_oflags(sem, nonspinnable))
842 break;
843 rspin_threshold = rwsem_rspin_threshold(sem);
844 loop = 0;
845 }
846
847 /*
848 * Check time threshold once every 16 iterations to
849 * avoid calling sched_clock() too frequently so
850 * as to reduce the average latency between the times
851 * when the lock becomes free and when the spinner
852 * is ready to do a trylock.
853 */
854 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
855 rwsem_set_nonspinnable(sem);
856 lockevent_inc(rwsem_opt_nospin);
857 break;
858 }
859 }
860
861 /*
862 * An RT task cannot do optimistic spinning if it cannot
863 * be sure the lock holder is running or live-lock may
864 * happen if the current task and the lock holder happen
865 * to run in the same CPU. However, aborting optimistic
866 * spinning while a NULL owner is detected may miss some
867 * opportunity where spinning can continue without causing
868 * problem.
869 *
870 * There are 2 possible cases where an RT task may be able
871 * to continue spinning.
872 *
873 * 1) The lock owner is in the process of releasing the
874 * lock, sem->owner is cleared but the lock has not
875 * been released yet.
876 * 2) The lock was free and owner cleared, but another
877 * task just comes in and acquire the lock before
878 * we try to get it. The new owner may be a spinnable
879 * writer.
880 *
881 * To take advantage of two scenarios listed agove, the RT
882 * task is made to retry one more time to see if it can
883 * acquire the lock or continue spinning on the new owning
884 * writer. Of course, if the time lag is long enough or the
885 * new owner is not a writer or spinnable, the RT task will
886 * quit spinning.
887 *
888 * If the owner is a writer, the need_resched() check is
889 * done inside rwsem_spin_on_owner(). If the owner is not
890 * a writer, need_resched() check needs to be done here.
891 */
892 if (owner_state != OWNER_WRITER) {
893 if (need_resched())
894 break;
895 if (rt_task(current) &&
896 (prev_owner_state != OWNER_WRITER))
897 break;
898 }
899 prev_owner_state = owner_state;
900
901 /*
902 * The cpu_relax() call is a compiler barrier which forces
903 * everything in this loop to be re-loaded. We don't need
904 * memory barriers as we'll eventually observe the right
905 * values at the cost of a few extra spins.
906 */
907 cpu_relax();
908 }
909 osq_unlock(&sem->osq);
910 done:
911 preempt_enable();
912 lockevent_cond_inc(rwsem_opt_fail, !taken);
913 return taken;
914 }
915
916 /*
917 * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should
918 * only be called when the reader count reaches 0.
919 *
920 * This give writers better chance to acquire the rwsem first before
921 * readers when the rwsem was being held by readers for a relatively long
922 * period of time. Race can happen that an optimistic spinner may have
923 * just stolen the rwsem and set the owner, but just clearing the
924 * RWSEM_WR_NONSPINNABLE bit will do no harm anyway.
925 */
clear_wr_nonspinnable(struct rw_semaphore * sem)926 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
927 {
928 if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
929 atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
930 }
931
932 /*
933 * This function is called when the reader fails to acquire the lock via
934 * optimistic spinning. In this case we will still attempt to do a trylock
935 * when comparing the rwsem state right now with the state when entering
936 * the slowpath indicates that the reader is still in a valid reader phase.
937 * This happens when the following conditions are true:
938 *
939 * 1) The lock is currently reader owned, and
940 * 2) The lock is previously not reader-owned or the last read owner changes.
941 *
942 * In the former case, we have transitioned from a writer phase to a
943 * reader-phase while spinning. In the latter case, it means the reader
944 * phase hasn't ended when we entered the optimistic spinning loop. In
945 * both cases, the reader is eligible to acquire the lock. This is the
946 * secondary path where a read lock is acquired optimistically.
947 *
948 * The reader non-spinnable bit wasn't set at time of entry or it will
949 * not be here at all.
950 */
rwsem_reader_phase_trylock(struct rw_semaphore * sem,unsigned long last_rowner)951 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
952 unsigned long last_rowner)
953 {
954 unsigned long owner = atomic_long_read(&sem->owner);
955
956 if (!(owner & RWSEM_READER_OWNED))
957 return false;
958
959 if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) &&
960 rwsem_try_read_lock_unqueued(sem)) {
961 lockevent_inc(rwsem_opt_rlock2);
962 lockevent_add(rwsem_opt_fail, -1);
963 return true;
964 }
965 return false;
966 }
967 #else
rwsem_can_spin_on_owner(struct rw_semaphore * sem,unsigned long nonspinnable)968 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
969 unsigned long nonspinnable)
970 {
971 return false;
972 }
973
rwsem_optimistic_spin(struct rw_semaphore * sem,bool wlock)974 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
975 {
976 return false;
977 }
978
clear_wr_nonspinnable(struct rw_semaphore * sem)979 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
980
rwsem_reader_phase_trylock(struct rw_semaphore * sem,unsigned long last_rowner)981 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
982 unsigned long last_rowner)
983 {
984 return false;
985 }
986
987 static inline int
rwsem_spin_on_owner(struct rw_semaphore * sem,unsigned long nonspinnable)988 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
989 {
990 return 0;
991 }
992 #define OWNER_NULL 1
993 #endif
994
995 /*
996 * Wait for the read lock to be granted
997 */
998 static struct rw_semaphore __sched *
rwsem_down_read_slowpath(struct rw_semaphore * sem,int state)999 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
1000 {
1001 long count, adjustment = -RWSEM_READER_BIAS;
1002 struct rwsem_waiter waiter;
1003 DEFINE_WAKE_Q(wake_q);
1004 bool wake = false;
1005 bool already_on_list = false;
1006
1007 /*
1008 * Save the current read-owner of rwsem, if available, and the
1009 * reader nonspinnable bit.
1010 */
1011 waiter.last_rowner = atomic_long_read(&sem->owner);
1012 if (!(waiter.last_rowner & RWSEM_READER_OWNED))
1013 waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
1014
1015 if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
1016 goto queue;
1017
1018 /*
1019 * Undo read bias from down_read() and do optimistic spinning.
1020 */
1021 atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
1022 adjustment = 0;
1023 if (rwsem_optimistic_spin(sem, false)) {
1024 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1025 /*
1026 * Wake up other readers in the wait list if the front
1027 * waiter is a reader.
1028 */
1029 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
1030 raw_spin_lock_irq(&sem->wait_lock);
1031 if (!list_empty(&sem->wait_list))
1032 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1033 &wake_q);
1034 raw_spin_unlock_irq(&sem->wait_lock);
1035 wake_up_q(&wake_q);
1036 }
1037 return sem;
1038 } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1039 /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
1040 return sem;
1041 }
1042
1043 queue:
1044 waiter.task = current;
1045 waiter.type = RWSEM_WAITING_FOR_READ;
1046 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1047
1048 raw_spin_lock_irq(&sem->wait_lock);
1049 if (list_empty(&sem->wait_list)) {
1050 /*
1051 * In case the wait queue is empty and the lock isn't owned
1052 * by a writer or has the handoff bit set, this reader can
1053 * exit the slowpath and return immediately as its
1054 * RWSEM_READER_BIAS has already been set in the count.
1055 */
1056 if (adjustment && !(atomic_long_read(&sem->count) &
1057 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
1058 /* Provide lock ACQUIRE */
1059 smp_acquire__after_ctrl_dep();
1060 raw_spin_unlock_irq(&sem->wait_lock);
1061 rwsem_set_reader_owned(sem);
1062 lockevent_inc(rwsem_rlock_fast);
1063 return sem;
1064 }
1065 adjustment += RWSEM_FLAG_WAITERS;
1066 }
1067 trace_android_vh_alter_rwsem_list_add(
1068 &waiter,
1069 sem, &already_on_list);
1070 if (!already_on_list)
1071 list_add_tail(&waiter.list, &sem->wait_list);
1072
1073 /* we're now waiting on the lock, but no longer actively locking */
1074 if (adjustment)
1075 count = atomic_long_add_return(adjustment, &sem->count);
1076 else
1077 count = atomic_long_read(&sem->count);
1078
1079 /*
1080 * If there are no active locks, wake the front queued process(es).
1081 *
1082 * If there are no writers and we are first in the queue,
1083 * wake our own waiter to join the existing active readers !
1084 */
1085 if (!(count & RWSEM_LOCK_MASK)) {
1086 clear_wr_nonspinnable(sem);
1087 wake = true;
1088 }
1089 if (wake || (!(count & RWSEM_WRITER_MASK) &&
1090 (adjustment & RWSEM_FLAG_WAITERS)))
1091 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1092
1093 trace_android_vh_rwsem_wake(sem);
1094 raw_spin_unlock_irq(&sem->wait_lock);
1095 wake_up_q(&wake_q);
1096
1097 /* wait to be given the lock */
1098 trace_android_vh_rwsem_read_wait_start(sem);
1099 for (;;) {
1100 set_current_state(state);
1101 if (!smp_load_acquire(&waiter.task)) {
1102 /* Matches rwsem_mark_wake()'s smp_store_release(). */
1103 break;
1104 }
1105 if (signal_pending_state(state, current)) {
1106 raw_spin_lock_irq(&sem->wait_lock);
1107 if (waiter.task)
1108 goto out_nolock;
1109 raw_spin_unlock_irq(&sem->wait_lock);
1110 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1111 break;
1112 }
1113 schedule();
1114 lockevent_inc(rwsem_sleep_reader);
1115 }
1116
1117 __set_current_state(TASK_RUNNING);
1118 trace_android_vh_rwsem_read_wait_finish(sem);
1119 lockevent_inc(rwsem_rlock);
1120 return sem;
1121
1122 out_nolock:
1123 list_del(&waiter.list);
1124 if (list_empty(&sem->wait_list)) {
1125 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
1126 &sem->count);
1127 }
1128 raw_spin_unlock_irq(&sem->wait_lock);
1129 __set_current_state(TASK_RUNNING);
1130 trace_android_vh_rwsem_read_wait_finish(sem);
1131 lockevent_inc(rwsem_rlock_fail);
1132 return ERR_PTR(-EINTR);
1133 }
1134
1135 /*
1136 * This function is called by the a write lock owner. So the owner value
1137 * won't get changed by others.
1138 */
rwsem_disable_reader_optspin(struct rw_semaphore * sem,bool disable)1139 static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
1140 bool disable)
1141 {
1142 if (unlikely(disable)) {
1143 atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
1144 lockevent_inc(rwsem_opt_norspin);
1145 }
1146 }
1147
1148 /*
1149 * Wait until we successfully acquire the write lock
1150 */
1151 static struct rw_semaphore *
rwsem_down_write_slowpath(struct rw_semaphore * sem,int state)1152 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1153 {
1154 long count;
1155 bool disable_rspin;
1156 enum writer_wait_state wstate;
1157 struct rwsem_waiter waiter;
1158 struct rw_semaphore *ret = sem;
1159 DEFINE_WAKE_Q(wake_q);
1160 bool already_on_list = false;
1161
1162 /* do optimistic spinning and steal lock if possible */
1163 if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1164 rwsem_optimistic_spin(sem, true)) {
1165 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1166 return sem;
1167 }
1168
1169 /*
1170 * Disable reader optimistic spinning for this rwsem after
1171 * acquiring the write lock when the setting of the nonspinnable
1172 * bits are observed.
1173 */
1174 disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
1175
1176 /*
1177 * Optimistic spinning failed, proceed to the slowpath
1178 * and block until we can acquire the sem.
1179 */
1180 waiter.task = current;
1181 waiter.type = RWSEM_WAITING_FOR_WRITE;
1182 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1183
1184 raw_spin_lock_irq(&sem->wait_lock);
1185
1186 /* account for this before adding a new element to the list */
1187 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1188
1189 trace_android_vh_alter_rwsem_list_add(
1190 &waiter,
1191 sem, &already_on_list);
1192 if (!already_on_list)
1193 list_add_tail(&waiter.list, &sem->wait_list);
1194
1195 /* we're now waiting on the lock */
1196 if (wstate == WRITER_NOT_FIRST) {
1197 count = atomic_long_read(&sem->count);
1198
1199 /*
1200 * If there were already threads queued before us and:
1201 * 1) there are no no active locks, wake the front
1202 * queued process(es) as the handoff bit might be set.
1203 * 2) there are no active writers and some readers, the lock
1204 * must be read owned; so we try to wake any read lock
1205 * waiters that were queued ahead of us.
1206 */
1207 if (count & RWSEM_WRITER_MASK)
1208 goto wait;
1209
1210 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1211 ? RWSEM_WAKE_READERS
1212 : RWSEM_WAKE_ANY, &wake_q);
1213
1214 if (!wake_q_empty(&wake_q)) {
1215 /*
1216 * We want to minimize wait_lock hold time especially
1217 * when a large number of readers are to be woken up.
1218 */
1219 raw_spin_unlock_irq(&sem->wait_lock);
1220 wake_up_q(&wake_q);
1221 wake_q_init(&wake_q); /* Used again, reinit */
1222 raw_spin_lock_irq(&sem->wait_lock);
1223 }
1224 } else {
1225 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1226 }
1227
1228 wait:
1229 trace_android_vh_rwsem_wake(sem);
1230 /* wait until we successfully acquire the lock */
1231 trace_android_vh_rwsem_write_wait_start(sem);
1232 set_current_state(state);
1233 for (;;) {
1234 if (rwsem_try_write_lock(sem, wstate)) {
1235 /* rwsem_try_write_lock() implies ACQUIRE on success */
1236 break;
1237 }
1238
1239 raw_spin_unlock_irq(&sem->wait_lock);
1240
1241 /*
1242 * After setting the handoff bit and failing to acquire
1243 * the lock, attempt to spin on owner to accelerate lock
1244 * transfer. If the previous owner is a on-cpu writer and it
1245 * has just released the lock, OWNER_NULL will be returned.
1246 * In this case, we attempt to acquire the lock again
1247 * without sleeping.
1248 */
1249 if (wstate == WRITER_HANDOFF &&
1250 rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
1251 goto trylock_again;
1252
1253 /* Block until there are no active lockers. */
1254 for (;;) {
1255 if (signal_pending_state(state, current))
1256 goto out_nolock;
1257
1258 schedule();
1259 lockevent_inc(rwsem_sleep_writer);
1260 set_current_state(state);
1261 /*
1262 * If HANDOFF bit is set, unconditionally do
1263 * a trylock.
1264 */
1265 if (wstate == WRITER_HANDOFF)
1266 break;
1267
1268 if ((wstate == WRITER_NOT_FIRST) &&
1269 (rwsem_first_waiter(sem) == &waiter))
1270 wstate = WRITER_FIRST;
1271
1272 count = atomic_long_read(&sem->count);
1273 if (!(count & RWSEM_LOCK_MASK))
1274 break;
1275
1276 /*
1277 * The setting of the handoff bit is deferred
1278 * until rwsem_try_write_lock() is called.
1279 */
1280 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1281 time_after(jiffies, waiter.timeout))) {
1282 wstate = WRITER_HANDOFF;
1283 lockevent_inc(rwsem_wlock_handoff);
1284 break;
1285 }
1286 }
1287 trylock_again:
1288 raw_spin_lock_irq(&sem->wait_lock);
1289 }
1290 __set_current_state(TASK_RUNNING);
1291 trace_android_vh_rwsem_write_wait_finish(sem);
1292 list_del(&waiter.list);
1293 rwsem_disable_reader_optspin(sem, disable_rspin);
1294 raw_spin_unlock_irq(&sem->wait_lock);
1295 lockevent_inc(rwsem_wlock);
1296
1297 return ret;
1298
1299 out_nolock:
1300 __set_current_state(TASK_RUNNING);
1301 trace_android_vh_rwsem_write_wait_finish(sem);
1302 raw_spin_lock_irq(&sem->wait_lock);
1303 list_del(&waiter.list);
1304
1305 if (unlikely(wstate == WRITER_HANDOFF))
1306 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1307
1308 if (list_empty(&sem->wait_list))
1309 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1310 else
1311 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1312 raw_spin_unlock_irq(&sem->wait_lock);
1313 wake_up_q(&wake_q);
1314 lockevent_inc(rwsem_wlock_fail);
1315
1316 return ERR_PTR(-EINTR);
1317 }
1318
1319 /*
1320 * handle waking up a waiter on the semaphore
1321 * - up_read/up_write has decremented the active part of count if we come here
1322 */
rwsem_wake(struct rw_semaphore * sem,long count)1323 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
1324 {
1325 unsigned long flags;
1326 DEFINE_WAKE_Q(wake_q);
1327
1328 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1329
1330 if (!list_empty(&sem->wait_list))
1331 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1332
1333 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1334 wake_up_q(&wake_q);
1335
1336 return sem;
1337 }
1338
1339 /*
1340 * downgrade a write lock into a read lock
1341 * - caller incremented waiting part of count and discovered it still negative
1342 * - just wake up any readers at the front of the queue
1343 */
rwsem_downgrade_wake(struct rw_semaphore * sem)1344 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1345 {
1346 unsigned long flags;
1347 DEFINE_WAKE_Q(wake_q);
1348
1349 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1350
1351 if (!list_empty(&sem->wait_list))
1352 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1353
1354 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1355 wake_up_q(&wake_q);
1356
1357 return sem;
1358 }
1359
1360 /*
1361 * lock for reading
1362 */
__down_read(struct rw_semaphore * sem)1363 inline void __down_read(struct rw_semaphore *sem)
1364 {
1365 if (!rwsem_read_trylock(sem)) {
1366 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
1367 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1368 } else {
1369 rwsem_set_reader_owned(sem);
1370 }
1371 }
1372
__down_read_killable(struct rw_semaphore * sem)1373 static inline int __down_read_killable(struct rw_semaphore *sem)
1374 {
1375 if (!rwsem_read_trylock(sem)) {
1376 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
1377 return -EINTR;
1378 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1379 } else {
1380 rwsem_set_reader_owned(sem);
1381 }
1382 return 0;
1383 }
1384
__down_read_trylock(struct rw_semaphore * sem)1385 static inline int __down_read_trylock(struct rw_semaphore *sem)
1386 {
1387 long tmp;
1388
1389 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1390
1391 /*
1392 * Optimize for the case when the rwsem is not locked at all.
1393 */
1394 tmp = RWSEM_UNLOCKED_VALUE;
1395 do {
1396 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1397 tmp + RWSEM_READER_BIAS)) {
1398 rwsem_set_reader_owned(sem);
1399 return 1;
1400 }
1401 } while (!(tmp & RWSEM_READ_FAILED_MASK));
1402 return 0;
1403 }
1404
1405 /*
1406 * lock for writing
1407 */
__down_write(struct rw_semaphore * sem)1408 static inline void __down_write(struct rw_semaphore *sem)
1409 {
1410 long tmp = RWSEM_UNLOCKED_VALUE;
1411
1412 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1413 RWSEM_WRITER_LOCKED)))
1414 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
1415 else
1416 rwsem_set_owner(sem);
1417 }
1418
__down_write_killable(struct rw_semaphore * sem)1419 static inline int __down_write_killable(struct rw_semaphore *sem)
1420 {
1421 long tmp = RWSEM_UNLOCKED_VALUE;
1422
1423 if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1424 RWSEM_WRITER_LOCKED))) {
1425 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
1426 return -EINTR;
1427 } else {
1428 rwsem_set_owner(sem);
1429 }
1430 return 0;
1431 }
1432
__down_write_trylock(struct rw_semaphore * sem)1433 static inline int __down_write_trylock(struct rw_semaphore *sem)
1434 {
1435 long tmp;
1436
1437 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1438
1439 tmp = RWSEM_UNLOCKED_VALUE;
1440 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1441 RWSEM_WRITER_LOCKED)) {
1442 rwsem_set_owner(sem);
1443 return true;
1444 }
1445 return false;
1446 }
1447
1448 /*
1449 * unlock after reading
1450 */
__up_read(struct rw_semaphore * sem)1451 inline void __up_read(struct rw_semaphore *sem)
1452 {
1453 long tmp;
1454
1455 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1456 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1457
1458 rwsem_clear_reader_owned(sem);
1459 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1460 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1461 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1462 RWSEM_FLAG_WAITERS)) {
1463 clear_wr_nonspinnable(sem);
1464 rwsem_wake(sem, tmp);
1465 }
1466 }
1467
1468 /*
1469 * unlock after writing
1470 */
__up_write(struct rw_semaphore * sem)1471 static inline void __up_write(struct rw_semaphore *sem)
1472 {
1473 long tmp;
1474
1475 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1476 /*
1477 * sem->owner may differ from current if the ownership is transferred
1478 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1479 */
1480 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1481 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1482
1483 rwsem_clear_owner(sem);
1484 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1485 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1486 rwsem_wake(sem, tmp);
1487 }
1488
1489 /*
1490 * downgrade write lock to read lock
1491 */
__downgrade_write(struct rw_semaphore * sem)1492 static inline void __downgrade_write(struct rw_semaphore *sem)
1493 {
1494 long tmp;
1495
1496 /*
1497 * When downgrading from exclusive to shared ownership,
1498 * anything inside the write-locked region cannot leak
1499 * into the read side. In contrast, anything in the
1500 * read-locked region is ok to be re-ordered into the
1501 * write side. As such, rely on RELEASE semantics.
1502 */
1503 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1504 tmp = atomic_long_fetch_add_release(
1505 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1506 rwsem_set_reader_owned(sem);
1507 if (tmp & RWSEM_FLAG_WAITERS)
1508 rwsem_downgrade_wake(sem);
1509 }
1510
1511 /*
1512 * lock for reading
1513 */
down_read(struct rw_semaphore * sem)1514 void __sched down_read(struct rw_semaphore *sem)
1515 {
1516 might_sleep();
1517 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1518
1519 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1520 }
1521 EXPORT_SYMBOL(down_read);
1522
down_read_killable(struct rw_semaphore * sem)1523 int __sched down_read_killable(struct rw_semaphore *sem)
1524 {
1525 might_sleep();
1526 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1527
1528 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1529 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1530 return -EINTR;
1531 }
1532
1533 return 0;
1534 }
1535 EXPORT_SYMBOL(down_read_killable);
1536
1537 /*
1538 * trylock for reading -- returns 1 if successful, 0 if contention
1539 */
down_read_trylock(struct rw_semaphore * sem)1540 int down_read_trylock(struct rw_semaphore *sem)
1541 {
1542 int ret = __down_read_trylock(sem);
1543
1544 if (ret == 1)
1545 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1546 return ret;
1547 }
1548 EXPORT_SYMBOL(down_read_trylock);
1549
1550 /*
1551 * lock for writing
1552 */
down_write(struct rw_semaphore * sem)1553 void __sched down_write(struct rw_semaphore *sem)
1554 {
1555 might_sleep();
1556 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1557 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1558 }
1559 EXPORT_SYMBOL(down_write);
1560
1561 /*
1562 * lock for writing
1563 */
down_write_killable(struct rw_semaphore * sem)1564 int __sched down_write_killable(struct rw_semaphore *sem)
1565 {
1566 might_sleep();
1567 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1568
1569 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1570 __down_write_killable)) {
1571 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1572 return -EINTR;
1573 }
1574
1575 return 0;
1576 }
1577 EXPORT_SYMBOL(down_write_killable);
1578
1579 /*
1580 * trylock for writing -- returns 1 if successful, 0 if contention
1581 */
down_write_trylock(struct rw_semaphore * sem)1582 int down_write_trylock(struct rw_semaphore *sem)
1583 {
1584 int ret = __down_write_trylock(sem);
1585
1586 if (ret == 1)
1587 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1588
1589 return ret;
1590 }
1591 EXPORT_SYMBOL(down_write_trylock);
1592
1593 /*
1594 * release a read lock
1595 */
up_read(struct rw_semaphore * sem)1596 void up_read(struct rw_semaphore *sem)
1597 {
1598 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1599 __up_read(sem);
1600 }
1601 EXPORT_SYMBOL(up_read);
1602
1603 /*
1604 * release a write lock
1605 */
up_write(struct rw_semaphore * sem)1606 void up_write(struct rw_semaphore *sem)
1607 {
1608 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1609 trace_android_vh_rwsem_write_finished(sem);
1610 __up_write(sem);
1611 }
1612 EXPORT_SYMBOL(up_write);
1613
1614 /*
1615 * downgrade write lock to read lock
1616 */
downgrade_write(struct rw_semaphore * sem)1617 void downgrade_write(struct rw_semaphore *sem)
1618 {
1619 lock_downgrade(&sem->dep_map, _RET_IP_);
1620 trace_android_vh_rwsem_write_finished(sem);
1621 __downgrade_write(sem);
1622 }
1623 EXPORT_SYMBOL(downgrade_write);
1624
1625 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1626
down_read_nested(struct rw_semaphore * sem,int subclass)1627 void down_read_nested(struct rw_semaphore *sem, int subclass)
1628 {
1629 might_sleep();
1630 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1631 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1632 }
1633 EXPORT_SYMBOL(down_read_nested);
1634
_down_write_nest_lock(struct rw_semaphore * sem,struct lockdep_map * nest)1635 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1636 {
1637 might_sleep();
1638 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1639 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1640 }
1641 EXPORT_SYMBOL(_down_write_nest_lock);
1642
down_read_non_owner(struct rw_semaphore * sem)1643 void down_read_non_owner(struct rw_semaphore *sem)
1644 {
1645 might_sleep();
1646 __down_read(sem);
1647 __rwsem_set_reader_owned(sem, NULL);
1648 }
1649 EXPORT_SYMBOL(down_read_non_owner);
1650
down_write_nested(struct rw_semaphore * sem,int subclass)1651 void down_write_nested(struct rw_semaphore *sem, int subclass)
1652 {
1653 might_sleep();
1654 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1655 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1656 }
1657 EXPORT_SYMBOL(down_write_nested);
1658
down_write_killable_nested(struct rw_semaphore * sem,int subclass)1659 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1660 {
1661 might_sleep();
1662 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1663
1664 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1665 __down_write_killable)) {
1666 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1667 return -EINTR;
1668 }
1669
1670 return 0;
1671 }
1672 EXPORT_SYMBOL(down_write_killable_nested);
1673
up_read_non_owner(struct rw_semaphore * sem)1674 void up_read_non_owner(struct rw_semaphore *sem)
1675 {
1676 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1677 __up_read(sem);
1678 }
1679 EXPORT_SYMBOL(up_read_non_owner);
1680
1681 #endif
1682