• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
3  *
4  * Written by David Howells (dhowells@redhat.com).
5  * Derived from asm-i386/semaphore.h
6  *
7  * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8  * and Michel Lespinasse <walken@google.com>
9  *
10  * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11  * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12  *
13  * Rwsem count bit fields re-definition and rwsem rearchitecture by
14  * Waiman Long <longman@redhat.com> and
15  * Peter Zijlstra <peterz@infradead.org>.
16  */
17 
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30 #include <trace/events/lock.h>
31 
32 #ifndef CONFIG_PREEMPT_RT
33 #include "lock_events.h"
34 #include <trace/hooks/dtask.h>
35 #include <trace/hooks/rwsem.h>
36 
37 /*
38  * The least significant 2 bits of the owner value has the following
39  * meanings when set.
40  *  - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
41  *  - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
42  *
43  * When the rwsem is reader-owned and a spinning writer has timed out,
44  * the nonspinnable bit will be set to disable optimistic spinning.
45 
46  * When a writer acquires a rwsem, it puts its task_struct pointer
47  * into the owner field. It is cleared after an unlock.
48  *
49  * When a reader acquires a rwsem, it will also puts its task_struct
50  * pointer into the owner field with the RWSEM_READER_OWNED bit set.
51  * On unlock, the owner field will largely be left untouched. So
52  * for a free or reader-owned rwsem, the owner value may contain
53  * information about the last reader that acquires the rwsem.
54  *
55  * That information may be helpful in debugging cases where the system
56  * seems to hang on a reader owned rwsem especially if only one reader
57  * is involved. Ideally we would like to track all the readers that own
58  * a rwsem, but the overhead is simply too big.
59  *
60  * A fast path reader optimistic lock stealing is supported when the rwsem
61  * is previously owned by a writer and the following conditions are met:
62  *  - rwsem is not currently writer owned
63  *  - the handoff isn't set.
64  */
65 #define RWSEM_READER_OWNED	(1UL << 0)
66 #define RWSEM_NONSPINNABLE	(1UL << 1)
67 #define RWSEM_OWNER_FLAGS_MASK	(RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
68 
69 #ifdef CONFIG_DEBUG_RWSEMS
70 # define DEBUG_RWSEMS_WARN_ON(c, sem)	do {			\
71 	if (!debug_locks_silent &&				\
72 	    WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
73 		#c, atomic_long_read(&(sem)->count),		\
74 		(unsigned long) sem->magic,			\
75 		atomic_long_read(&(sem)->owner), (long)current,	\
76 		list_empty(&(sem)->wait_list) ? "" : "not "))	\
77 			debug_locks_off();			\
78 	} while (0)
79 #else
80 # define DEBUG_RWSEMS_WARN_ON(c, sem)
81 #endif
82 
83 /*
84  * On 64-bit architectures, the bit definitions of the count are:
85  *
86  * Bit  0    - writer locked bit
87  * Bit  1    - waiters present bit
88  * Bit  2    - lock handoff bit
89  * Bits 3-7  - reserved
90  * Bits 8-62 - 55-bit reader count
91  * Bit  63   - read fail bit
92  *
93  * On 32-bit architectures, the bit definitions of the count are:
94  *
95  * Bit  0    - writer locked bit
96  * Bit  1    - waiters present bit
97  * Bit  2    - lock handoff bit
98  * Bits 3-7  - reserved
99  * Bits 8-30 - 23-bit reader count
100  * Bit  31   - read fail bit
101  *
102  * It is not likely that the most significant bit (read fail bit) will ever
103  * be set. This guard bit is still checked anyway in the down_read() fastpath
104  * just in case we need to use up more of the reader bits for other purpose
105  * in the future.
106  *
107  * atomic_long_fetch_add() is used to obtain reader lock, whereas
108  * atomic_long_cmpxchg() will be used to obtain writer lock.
109  *
110  * There are three places where the lock handoff bit may be set or cleared.
111  * 1) rwsem_mark_wake() for readers		-- set, clear
112  * 2) rwsem_try_write_lock() for writers	-- set, clear
113  * 3) rwsem_del_waiter()			-- clear
114  *
115  * For all the above cases, wait_lock will be held. A writer must also
116  * be the first one in the wait_list to be eligible for setting the handoff
117  * bit. So concurrent setting/clearing of handoff bit is not possible.
118  */
119 #define RWSEM_WRITER_LOCKED	(1UL << 0)
120 #define RWSEM_FLAG_WAITERS	(1UL << 1)
121 #define RWSEM_FLAG_HANDOFF	(1UL << 2)
122 #define RWSEM_FLAG_READFAIL	(1UL << (BITS_PER_LONG - 1))
123 
124 #define RWSEM_READER_SHIFT	8
125 #define RWSEM_READER_BIAS	(1UL << RWSEM_READER_SHIFT)
126 #define RWSEM_READER_MASK	(~(RWSEM_READER_BIAS - 1))
127 #define RWSEM_WRITER_MASK	RWSEM_WRITER_LOCKED
128 #define RWSEM_LOCK_MASK		(RWSEM_WRITER_MASK|RWSEM_READER_MASK)
129 #define RWSEM_READ_FAILED_MASK	(RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
130 				 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
131 
132 /*
133  * All writes to owner are protected by WRITE_ONCE() to make sure that
134  * store tearing can't happen as optimistic spinners may read and use
135  * the owner value concurrently without lock. Read from owner, however,
136  * may not need READ_ONCE() as long as the pointer value is only used
137  * for comparison and isn't being dereferenced.
138  *
139  * Both rwsem_{set,clear}_owner() functions should be in the same
140  * preempt disable section as the atomic op that changes sem->count.
141  */
rwsem_set_owner(struct rw_semaphore * sem)142 static inline void rwsem_set_owner(struct rw_semaphore *sem)
143 {
144 	lockdep_assert_preemption_disabled();
145 	atomic_long_set(&sem->owner, (long)current);
146 }
147 
rwsem_clear_owner(struct rw_semaphore * sem)148 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
149 {
150 	lockdep_assert_preemption_disabled();
151 	atomic_long_set(&sem->owner, 0);
152 }
153 
154 /*
155  * Test the flags in the owner field.
156  */
rwsem_test_oflags(struct rw_semaphore * sem,long flags)157 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
158 {
159 	return atomic_long_read(&sem->owner) & flags;
160 }
161 
162 /*
163  * The task_struct pointer of the last owning reader will be left in
164  * the owner field.
165  *
166  * Note that the owner value just indicates the task has owned the rwsem
167  * previously, it may not be the real owner or one of the real owners
168  * anymore when that field is examined, so take it with a grain of salt.
169  *
170  * The reader non-spinnable bit is preserved.
171  */
__rwsem_set_reader_owned(struct rw_semaphore * sem,struct task_struct * owner)172 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
173 					    struct task_struct *owner)
174 {
175 	unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
176 		(atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
177 
178 	atomic_long_set(&sem->owner, val);
179 }
180 
rwsem_set_reader_owned(struct rw_semaphore * sem)181 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
182 {
183 	__rwsem_set_reader_owned(sem, current);
184 }
185 
186 /*
187  * Return true if the rwsem is owned by a reader.
188  */
is_rwsem_reader_owned(struct rw_semaphore * sem)189 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
190 {
191 #ifdef CONFIG_DEBUG_RWSEMS
192 	/*
193 	 * Check the count to see if it is write-locked.
194 	 */
195 	long count = atomic_long_read(&sem->count);
196 
197 	if (count & RWSEM_WRITER_MASK)
198 		return false;
199 #endif
200 	return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
201 }
202 
203 #ifdef CONFIG_DEBUG_RWSEMS
204 /*
205  * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
206  * is a task pointer in owner of a reader-owned rwsem, it will be the
207  * real owner or one of the real owners. The only exception is when the
208  * unlock is done by up_read_non_owner().
209  */
rwsem_clear_reader_owned(struct rw_semaphore * sem)210 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
211 {
212 	unsigned long val = atomic_long_read(&sem->owner);
213 
214 	while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
215 		if (atomic_long_try_cmpxchg(&sem->owner, &val,
216 					    val & RWSEM_OWNER_FLAGS_MASK))
217 			return;
218 	}
219 }
220 #else
rwsem_clear_reader_owned(struct rw_semaphore * sem)221 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
222 {
223 }
224 #endif
225 
226 /*
227  * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
228  * remains set. Otherwise, the operation will be aborted.
229  */
rwsem_set_nonspinnable(struct rw_semaphore * sem)230 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
231 {
232 	unsigned long owner = atomic_long_read(&sem->owner);
233 
234 	do {
235 		if (!(owner & RWSEM_READER_OWNED))
236 			break;
237 		if (owner & RWSEM_NONSPINNABLE)
238 			break;
239 	} while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
240 					  owner | RWSEM_NONSPINNABLE));
241 }
242 
rwsem_read_trylock(struct rw_semaphore * sem,long * cntp)243 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
244 {
245 	*cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
246 
247 	if (WARN_ON_ONCE(*cntp < 0))
248 		rwsem_set_nonspinnable(sem);
249 
250 	if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
251 		rwsem_set_reader_owned(sem);
252 		trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
253 		return true;
254 	}
255 
256 	return false;
257 }
258 
rwsem_write_trylock(struct rw_semaphore * sem)259 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
260 {
261 	long tmp = RWSEM_UNLOCKED_VALUE;
262 	bool ret = false;
263 
264 	preempt_disable();
265 	if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
266 		trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
267 		rwsem_set_owner(sem);
268 		ret = true;
269 	}
270 
271 	preempt_enable();
272 	return ret;
273 }
274 
275 /*
276  * Return just the real task structure pointer of the owner
277  */
rwsem_owner(struct rw_semaphore * sem)278 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
279 {
280 	return (struct task_struct *)
281 		(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
282 }
283 
284 /*
285  * Return the real task structure pointer of the owner and the embedded
286  * flags in the owner. pflags must be non-NULL.
287  */
288 static inline struct task_struct *
rwsem_owner_flags(struct rw_semaphore * sem,unsigned long * pflags)289 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
290 {
291 	unsigned long owner = atomic_long_read(&sem->owner);
292 
293 	*pflags = owner & RWSEM_OWNER_FLAGS_MASK;
294 	return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
295 }
296 
297 /*
298  * Guide to the rw_semaphore's count field.
299  *
300  * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
301  * by a writer.
302  *
303  * The lock is owned by readers when
304  * (1) the RWSEM_WRITER_LOCKED isn't set in count,
305  * (2) some of the reader bits are set in count, and
306  * (3) the owner field has RWSEM_READ_OWNED bit set.
307  *
308  * Having some reader bits set is not enough to guarantee a readers owned
309  * lock as the readers may be in the process of backing out from the count
310  * and a writer has just released the lock. So another writer may steal
311  * the lock immediately after that.
312  */
313 
314 /*
315  * Initialize an rwsem:
316  */
__init_rwsem(struct rw_semaphore * sem,const char * name,struct lock_class_key * key)317 void __init_rwsem(struct rw_semaphore *sem, const char *name,
318 		  struct lock_class_key *key)
319 {
320 #ifdef CONFIG_DEBUG_LOCK_ALLOC
321 	/*
322 	 * Make sure we are not reinitializing a held semaphore:
323 	 */
324 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
325 	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
326 #endif
327 #ifdef CONFIG_DEBUG_RWSEMS
328 	sem->magic = sem;
329 #endif
330 	atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
331 	raw_spin_lock_init(&sem->wait_lock);
332 	INIT_LIST_HEAD(&sem->wait_list);
333 	atomic_long_set(&sem->owner, 0L);
334 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
335 	osq_lock_init(&sem->osq);
336 #endif
337 	trace_android_vh_rwsem_init(sem);
338 }
339 EXPORT_SYMBOL(__init_rwsem);
340 
341 enum rwsem_waiter_type {
342 	RWSEM_WAITING_FOR_WRITE,
343 	RWSEM_WAITING_FOR_READ
344 };
345 
346 struct rwsem_waiter {
347 	struct list_head list;
348 	struct task_struct *task;
349 	enum rwsem_waiter_type type;
350 	unsigned long timeout;
351 	bool handoff_set;
352 };
353 #define rwsem_first_waiter(sem) \
354 	list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
355 
356 enum rwsem_wake_type {
357 	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
358 	RWSEM_WAKE_READERS,	/* Wake readers only */
359 	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
360 };
361 
362 /*
363  * The typical HZ value is either 250 or 1000. So set the minimum waiting
364  * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
365  * queue before initiating the handoff protocol.
366  */
367 #define RWSEM_WAIT_TIMEOUT	DIV_ROUND_UP(HZ, 250)
368 
369 /*
370  * Magic number to batch-wakeup waiting readers, even when writers are
371  * also present in the queue. This both limits the amount of work the
372  * waking thread must do and also prevents any potential counter overflow,
373  * however unlikely.
374  */
375 #define MAX_READERS_WAKEUP	0x100
376 
377 static inline void
rwsem_add_waiter(struct rw_semaphore * sem,struct rwsem_waiter * waiter)378 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
379 {
380 	lockdep_assert_held(&sem->wait_lock);
381 	list_add_tail(&waiter->list, &sem->wait_list);
382 	/* caller will set RWSEM_FLAG_WAITERS */
383 }
384 
385 /*
386  * Remove a waiter from the wait_list and clear flags.
387  *
388  * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
389  * this function. Modify with care.
390  *
391  * Return: true if wait_list isn't empty and false otherwise
392  */
393 static inline bool
rwsem_del_waiter(struct rw_semaphore * sem,struct rwsem_waiter * waiter)394 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
395 {
396 	lockdep_assert_held(&sem->wait_lock);
397 	list_del(&waiter->list);
398 	if (likely(!list_empty(&sem->wait_list)))
399 		return true;
400 
401 	atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
402 	return false;
403 }
404 
405 /*
406  * handle the lock release when processes blocked on it that can now run
407  * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
408  *   have been set.
409  * - there must be someone on the queue
410  * - the wait_lock must be held by the caller
411  * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
412  *   to actually wakeup the blocked task(s) and drop the reference count,
413  *   preferably when the wait_lock is released
414  * - woken process blocks are discarded from the list after having task zeroed
415  * - writers are only marked woken if downgrading is false
416  *
417  * Implies rwsem_del_waiter() for all woken readers.
418  */
rwsem_mark_wake(struct rw_semaphore * sem,enum rwsem_wake_type wake_type,struct wake_q_head * wake_q)419 static void rwsem_mark_wake(struct rw_semaphore *sem,
420 			    enum rwsem_wake_type wake_type,
421 			    struct wake_q_head *wake_q)
422 {
423 	struct rwsem_waiter *waiter, *tmp;
424 	long oldcount, woken = 0, adjustment = 0;
425 	struct list_head wlist;
426 
427 	lockdep_assert_held(&sem->wait_lock);
428 
429 	/*
430 	 * Take a peek at the queue head waiter such that we can determine
431 	 * the wakeup(s) to perform.
432 	 */
433 	waiter = rwsem_first_waiter(sem);
434 
435 	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
436 		if (wake_type == RWSEM_WAKE_ANY) {
437 			/*
438 			 * Mark writer at the front of the queue for wakeup.
439 			 * Until the task is actually later awoken later by
440 			 * the caller, other writers are able to steal it.
441 			 * Readers, on the other hand, will block as they
442 			 * will notice the queued writer.
443 			 */
444 			wake_q_add(wake_q, waiter->task);
445 			lockevent_inc(rwsem_wake_writer);
446 		}
447 
448 		return;
449 	}
450 
451 	/*
452 	 * No reader wakeup if there are too many of them already.
453 	 */
454 	if (unlikely(atomic_long_read(&sem->count) < 0))
455 		return;
456 
457 	/*
458 	 * Writers might steal the lock before we grant it to the next reader.
459 	 * We prefer to do the first reader grant before counting readers
460 	 * so we can bail out early if a writer stole the lock.
461 	 */
462 	if (wake_type != RWSEM_WAKE_READ_OWNED) {
463 		struct task_struct *owner;
464 
465 		adjustment = RWSEM_READER_BIAS;
466 		oldcount = atomic_long_fetch_add(adjustment, &sem->count);
467 		if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
468 			/*
469 			 * When we've been waiting "too" long (for writers
470 			 * to give up the lock), request a HANDOFF to
471 			 * force the issue.
472 			 */
473 			if (time_after(jiffies, waiter->timeout)) {
474 				if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
475 					adjustment -= RWSEM_FLAG_HANDOFF;
476 					lockevent_inc(rwsem_rlock_handoff);
477 				}
478 				waiter->handoff_set = true;
479 			}
480 
481 			atomic_long_add(-adjustment, &sem->count);
482 			return;
483 		}
484 		/*
485 		 * Set it to reader-owned to give spinners an early
486 		 * indication that readers now have the lock.
487 		 * The reader nonspinnable bit seen at slowpath entry of
488 		 * the reader is copied over.
489 		 */
490 		owner = waiter->task;
491 		__rwsem_set_reader_owned(sem, owner);
492 	}
493 
494 	/*
495 	 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
496 	 * queue. We know that the woken will be at least 1 as we accounted
497 	 * for above. Note we increment the 'active part' of the count by the
498 	 * number of readers before waking any processes up.
499 	 *
500 	 * This is an adaptation of the phase-fair R/W locks where at the
501 	 * reader phase (first waiter is a reader), all readers are eligible
502 	 * to acquire the lock at the same time irrespective of their order
503 	 * in the queue. The writers acquire the lock according to their
504 	 * order in the queue.
505 	 *
506 	 * We have to do wakeup in 2 passes to prevent the possibility that
507 	 * the reader count may be decremented before it is incremented. It
508 	 * is because the to-be-woken waiter may not have slept yet. So it
509 	 * may see waiter->task got cleared, finish its critical section and
510 	 * do an unlock before the reader count increment.
511 	 *
512 	 * 1) Collect the read-waiters in a separate list, count them and
513 	 *    fully increment the reader count in rwsem.
514 	 * 2) For each waiters in the new list, clear waiter->task and
515 	 *    put them into wake_q to be woken up later.
516 	 */
517 	INIT_LIST_HEAD(&wlist);
518 	list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
519 		if (waiter->type == RWSEM_WAITING_FOR_WRITE)
520 			continue;
521 
522 		woken++;
523 		list_move_tail(&waiter->list, &wlist);
524 
525 		/*
526 		 * Limit # of readers that can be woken up per wakeup call.
527 		 */
528 		if (unlikely(woken >= MAX_READERS_WAKEUP))
529 			break;
530 	}
531 
532 	adjustment = woken * RWSEM_READER_BIAS - adjustment;
533 	lockevent_cond_inc(rwsem_wake_reader, woken);
534 
535 	oldcount = atomic_long_read(&sem->count);
536 	if (list_empty(&sem->wait_list)) {
537 		/*
538 		 * Combined with list_move_tail() above, this implies
539 		 * rwsem_del_waiter().
540 		 */
541 		adjustment -= RWSEM_FLAG_WAITERS;
542 		if (oldcount & RWSEM_FLAG_HANDOFF)
543 			adjustment -= RWSEM_FLAG_HANDOFF;
544 	} else if (woken) {
545 		/*
546 		 * When we've woken a reader, we no longer need to force
547 		 * writers to give up the lock and we can clear HANDOFF.
548 		 */
549 		if (oldcount & RWSEM_FLAG_HANDOFF)
550 			adjustment -= RWSEM_FLAG_HANDOFF;
551 	}
552 
553 	if (adjustment)
554 		atomic_long_add(adjustment, &sem->count);
555 
556 	/* 2nd pass */
557 	list_for_each_entry_safe(waiter, tmp, &wlist, list) {
558 		struct task_struct *tsk;
559 
560 		tsk = waiter->task;
561 		get_task_struct(tsk);
562 
563 		/*
564 		 * Ensure calling get_task_struct() before setting the reader
565 		 * waiter to nil such that rwsem_down_read_slowpath() cannot
566 		 * race with do_exit() by always holding a reference count
567 		 * to the task to wakeup.
568 		 */
569 		smp_store_release(&waiter->task, NULL);
570 		/*
571 		 * Ensure issuing the wakeup (either by us or someone else)
572 		 * after setting the reader waiter to nil.
573 		 */
574 		wake_q_add_safe(wake_q, tsk);
575 	}
576 }
577 
578 /*
579  * Remove a waiter and try to wake up other waiters in the wait queue
580  * This function is called from the out_nolock path of both the reader and
581  * writer slowpaths with wait_lock held. It releases the wait_lock and
582  * optionally wake up waiters before it returns.
583  */
584 static inline void
rwsem_del_wake_waiter(struct rw_semaphore * sem,struct rwsem_waiter * waiter,struct wake_q_head * wake_q)585 rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
586 		      struct wake_q_head *wake_q)
587 		      __releases(&sem->wait_lock)
588 {
589 	bool first = rwsem_first_waiter(sem) == waiter;
590 
591 	wake_q_init(wake_q);
592 
593 	/*
594 	 * If the wait_list isn't empty and the waiter to be deleted is
595 	 * the first waiter, we wake up the remaining waiters as they may
596 	 * be eligible to acquire or spin on the lock.
597 	 */
598 	if (rwsem_del_waiter(sem, waiter) && first)
599 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q);
600 	raw_spin_unlock_irq(&sem->wait_lock);
601 	if (!wake_q_empty(wake_q))
602 		wake_up_q(wake_q);
603 }
604 
605 /*
606  * This function must be called with the sem->wait_lock held to prevent
607  * race conditions between checking the rwsem wait list and setting the
608  * sem->count accordingly.
609  *
610  * Implies rwsem_del_waiter() on success.
611  */
rwsem_try_write_lock(struct rw_semaphore * sem,struct rwsem_waiter * waiter)612 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
613 					struct rwsem_waiter *waiter)
614 {
615 	struct rwsem_waiter *first = rwsem_first_waiter(sem);
616 	long count, new;
617 
618 	lockdep_assert_held(&sem->wait_lock);
619 
620 	count = atomic_long_read(&sem->count);
621 	do {
622 		bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
623 
624 		if (has_handoff) {
625 			/*
626 			 * Honor handoff bit and yield only when the first
627 			 * waiter is the one that set it. Otherwisee, we
628 			 * still try to acquire the rwsem.
629 			 */
630 			if (first->handoff_set && (waiter != first))
631 				return false;
632 		}
633 
634 		new = count;
635 
636 		if (count & RWSEM_LOCK_MASK) {
637 			/*
638 			 * A waiter (first or not) can set the handoff bit
639 			 * if it is an RT task or wait in the wait queue
640 			 * for too long.
641 			 */
642 			if (has_handoff || (!rt_task(waiter->task) &&
643 					    !time_after(jiffies, waiter->timeout)))
644 				return false;
645 
646 			new |= RWSEM_FLAG_HANDOFF;
647 		} else {
648 			new |= RWSEM_WRITER_LOCKED;
649 			new &= ~RWSEM_FLAG_HANDOFF;
650 
651 			if (list_is_singular(&sem->wait_list))
652 				new &= ~RWSEM_FLAG_WAITERS;
653 		}
654 	} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
655 
656 	/*
657 	 * We have either acquired the lock with handoff bit cleared or set
658 	 * the handoff bit. Only the first waiter can have its handoff_set
659 	 * set here to enable optimistic spinning in slowpath loop.
660 	 */
661 	if (new & RWSEM_FLAG_HANDOFF) {
662 		first->handoff_set = true;
663 		lockevent_inc(rwsem_wlock_handoff);
664 		return false;
665 	}
666 
667 	/*
668 	 * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
669 	 * success.
670 	 */
671 	list_del(&waiter->list);
672 	rwsem_set_owner(sem);
673 	return true;
674 }
675 
676 /*
677  * The rwsem_spin_on_owner() function returns the following 4 values
678  * depending on the lock owner state.
679  *   OWNER_NULL  : owner is currently NULL
680  *   OWNER_WRITER: when owner changes and is a writer
681  *   OWNER_READER: when owner changes and the new owner may be a reader.
682  *   OWNER_NONSPINNABLE:
683  *		   when optimistic spinning has to stop because either the
684  *		   owner stops running, is unknown, or its timeslice has
685  *		   been used up.
686  */
687 enum owner_state {
688 	OWNER_NULL		= 1 << 0,
689 	OWNER_WRITER		= 1 << 1,
690 	OWNER_READER		= 1 << 2,
691 	OWNER_NONSPINNABLE	= 1 << 3,
692 };
693 
694 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
695 /*
696  * Try to acquire write lock before the writer has been put on wait queue.
697  */
rwsem_try_write_lock_unqueued(struct rw_semaphore * sem)698 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
699 {
700 	long count = atomic_long_read(&sem->count);
701 
702 	while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
703 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
704 					count | RWSEM_WRITER_LOCKED)) {
705 			rwsem_set_owner(sem);
706 			lockevent_inc(rwsem_opt_lock);
707 			return true;
708 		}
709 	}
710 	return false;
711 }
712 
rwsem_can_spin_on_owner(struct rw_semaphore * sem)713 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
714 {
715 	struct task_struct *owner;
716 	unsigned long flags;
717 	bool ret = true;
718 
719 	if (need_resched()) {
720 		lockevent_inc(rwsem_opt_fail);
721 		return false;
722 	}
723 
724 	preempt_disable();
725 	/*
726 	 * Disable preemption is equal to the RCU read-side crital section,
727 	 * thus the task_strcut structure won't go away.
728 	 */
729 	owner = rwsem_owner_flags(sem, &flags);
730 	/*
731 	 * Don't check the read-owner as the entry may be stale.
732 	 */
733 	if ((flags & RWSEM_NONSPINNABLE) ||
734 	    (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
735 		ret = false;
736 	preempt_enable();
737 	trace_android_vh_rwsem_can_spin_on_owner(sem, &ret);
738 
739 	lockevent_cond_inc(rwsem_opt_fail, !ret);
740 	return ret;
741 }
742 
743 #define OWNER_SPINNABLE		(OWNER_NULL | OWNER_WRITER | OWNER_READER)
744 
745 static inline enum owner_state
rwsem_owner_state(struct task_struct * owner,unsigned long flags)746 rwsem_owner_state(struct task_struct *owner, unsigned long flags)
747 {
748 	if (flags & RWSEM_NONSPINNABLE)
749 		return OWNER_NONSPINNABLE;
750 
751 	if (flags & RWSEM_READER_OWNED)
752 		return OWNER_READER;
753 
754 	return owner ? OWNER_WRITER : OWNER_NULL;
755 }
756 
757 static noinline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore * sem)758 rwsem_spin_on_owner(struct rw_semaphore *sem)
759 {
760 	struct task_struct *new, *owner;
761 	unsigned long flags, new_flags;
762 	enum owner_state state;
763 	int cnt = 0;
764 	bool time_out = false;
765 
766 	lockdep_assert_preemption_disabled();
767 
768 	owner = rwsem_owner_flags(sem, &flags);
769 	state = rwsem_owner_state(owner, flags);
770 	if (state != OWNER_WRITER)
771 		return state;
772 
773 	for (;;) {
774 		trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, true);
775 		if (time_out)
776 			break;
777 		/*
778 		 * When a waiting writer set the handoff flag, it may spin
779 		 * on the owner as well. Once that writer acquires the lock,
780 		 * we can spin on it. So we don't need to quit even when the
781 		 * handoff bit is set.
782 		 */
783 		new = rwsem_owner_flags(sem, &new_flags);
784 		if ((new != owner) || (new_flags != flags)) {
785 			state = rwsem_owner_state(new, new_flags);
786 			break;
787 		}
788 
789 		/*
790 		 * Ensure we emit the owner->on_cpu, dereference _after_
791 		 * checking sem->owner still matches owner, if that fails,
792 		 * owner might point to free()d memory, if it still matches,
793 		 * our spinning context already disabled preemption which is
794 		 * equal to RCU read-side crital section ensures the memory
795 		 * stays valid.
796 		 */
797 		barrier();
798 
799 		if (need_resched() || !owner_on_cpu(owner)) {
800 			state = OWNER_NONSPINNABLE;
801 			break;
802 		}
803 
804 		cpu_relax();
805 	}
806 
807 	return state;
808 }
809 
810 /*
811  * Calculate reader-owned rwsem spinning threshold for writer
812  *
813  * The more readers own the rwsem, the longer it will take for them to
814  * wind down and free the rwsem. So the empirical formula used to
815  * determine the actual spinning time limit here is:
816  *
817  *   Spinning threshold = (10 + nr_readers/2)us
818  *
819  * The limit is capped to a maximum of 25us (30 readers). This is just
820  * a heuristic and is subjected to change in the future.
821  */
rwsem_rspin_threshold(struct rw_semaphore * sem)822 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
823 {
824 	long count = atomic_long_read(&sem->count);
825 	int readers = count >> RWSEM_READER_SHIFT;
826 	u64 delta;
827 
828 	if (readers > 30)
829 		readers = 30;
830 	delta = (20 + readers) * NSEC_PER_USEC / 2;
831 
832 	return sched_clock() + delta;
833 }
834 
rwsem_optimistic_spin(struct rw_semaphore * sem)835 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
836 {
837 	bool taken = false;
838 	int prev_owner_state = OWNER_NULL;
839 	int loop = 0;
840 	u64 rspin_threshold = 0;
841 	int cnt = 0;
842 	bool time_out = false;
843 
844 	preempt_disable();
845 
846 	/* sem->wait_lock should not be held when doing optimistic spinning */
847 	if (!osq_lock(&sem->osq))
848 		goto done;
849 
850 	/*
851 	 * Optimistically spin on the owner field and attempt to acquire the
852 	 * lock whenever the owner changes. Spinning will be stopped when:
853 	 *  1) the owning writer isn't running; or
854 	 *  2) readers own the lock and spinning time has exceeded limit.
855 	 */
856 	for (;;) {
857 		enum owner_state owner_state;
858 
859 		trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, false);
860 		if (time_out)
861 			break;
862 		owner_state = rwsem_spin_on_owner(sem);
863 		if (!(owner_state & OWNER_SPINNABLE))
864 			break;
865 
866 		/*
867 		 * Try to acquire the lock
868 		 */
869 		taken = rwsem_try_write_lock_unqueued(sem);
870 
871 		if (taken)
872 			break;
873 
874 		/*
875 		 * Time-based reader-owned rwsem optimistic spinning
876 		 */
877 		if (owner_state == OWNER_READER) {
878 			/*
879 			 * Re-initialize rspin_threshold every time when
880 			 * the owner state changes from non-reader to reader.
881 			 * This allows a writer to steal the lock in between
882 			 * 2 reader phases and have the threshold reset at
883 			 * the beginning of the 2nd reader phase.
884 			 */
885 			if (prev_owner_state != OWNER_READER) {
886 				if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
887 					break;
888 				rspin_threshold = rwsem_rspin_threshold(sem);
889 				loop = 0;
890 			}
891 
892 			/*
893 			 * Check time threshold once every 16 iterations to
894 			 * avoid calling sched_clock() too frequently so
895 			 * as to reduce the average latency between the times
896 			 * when the lock becomes free and when the spinner
897 			 * is ready to do a trylock.
898 			 */
899 			else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
900 				rwsem_set_nonspinnable(sem);
901 				lockevent_inc(rwsem_opt_nospin);
902 				break;
903 			}
904 		}
905 
906 		/*
907 		 * An RT task cannot do optimistic spinning if it cannot
908 		 * be sure the lock holder is running or live-lock may
909 		 * happen if the current task and the lock holder happen
910 		 * to run in the same CPU. However, aborting optimistic
911 		 * spinning while a NULL owner is detected may miss some
912 		 * opportunity where spinning can continue without causing
913 		 * problem.
914 		 *
915 		 * There are 2 possible cases where an RT task may be able
916 		 * to continue spinning.
917 		 *
918 		 * 1) The lock owner is in the process of releasing the
919 		 *    lock, sem->owner is cleared but the lock has not
920 		 *    been released yet.
921 		 * 2) The lock was free and owner cleared, but another
922 		 *    task just comes in and acquire the lock before
923 		 *    we try to get it. The new owner may be a spinnable
924 		 *    writer.
925 		 *
926 		 * To take advantage of two scenarios listed above, the RT
927 		 * task is made to retry one more time to see if it can
928 		 * acquire the lock or continue spinning on the new owning
929 		 * writer. Of course, if the time lag is long enough or the
930 		 * new owner is not a writer or spinnable, the RT task will
931 		 * quit spinning.
932 		 *
933 		 * If the owner is a writer, the need_resched() check is
934 		 * done inside rwsem_spin_on_owner(). If the owner is not
935 		 * a writer, need_resched() check needs to be done here.
936 		 */
937 		if (owner_state != OWNER_WRITER) {
938 			if (need_resched())
939 				break;
940 			if (rt_task(current) &&
941 			   (prev_owner_state != OWNER_WRITER))
942 				break;
943 		}
944 		prev_owner_state = owner_state;
945 
946 		/*
947 		 * The cpu_relax() call is a compiler barrier which forces
948 		 * everything in this loop to be re-loaded. We don't need
949 		 * memory barriers as we'll eventually observe the right
950 		 * values at the cost of a few extra spins.
951 		 */
952 		cpu_relax();
953 	}
954 	osq_unlock(&sem->osq);
955 	trace_android_vh_rwsem_opt_spin_finish(sem, taken);
956 done:
957 	preempt_enable();
958 	lockevent_cond_inc(rwsem_opt_fail, !taken);
959 	return taken;
960 }
961 
962 /*
963  * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
964  * only be called when the reader count reaches 0.
965  */
clear_nonspinnable(struct rw_semaphore * sem)966 static inline void clear_nonspinnable(struct rw_semaphore *sem)
967 {
968 	if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)))
969 		atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
970 }
971 
972 #else
rwsem_can_spin_on_owner(struct rw_semaphore * sem)973 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
974 {
975 	return false;
976 }
977 
rwsem_optimistic_spin(struct rw_semaphore * sem)978 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
979 {
980 	return false;
981 }
982 
clear_nonspinnable(struct rw_semaphore * sem)983 static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
984 
985 static inline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore * sem)986 rwsem_spin_on_owner(struct rw_semaphore *sem)
987 {
988 	return OWNER_NONSPINNABLE;
989 }
990 #endif
991 
992 /*
993  * Prepare to wake up waiter(s) in the wait queue by putting them into the
994  * given wake_q if the rwsem lock owner isn't a writer. If rwsem is likely
995  * reader-owned, wake up read lock waiters in queue front or wake up any
996  * front waiter otherwise.
997 
998  * This is being called from both reader and writer slow paths.
999  */
rwsem_cond_wake_waiter(struct rw_semaphore * sem,long count,struct wake_q_head * wake_q)1000 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count,
1001 					  struct wake_q_head *wake_q)
1002 {
1003 	enum rwsem_wake_type wake_type;
1004 
1005 	if (count & RWSEM_WRITER_MASK)
1006 		return;
1007 
1008 	if (count & RWSEM_READER_MASK) {
1009 		wake_type = RWSEM_WAKE_READERS;
1010 	} else {
1011 		wake_type = RWSEM_WAKE_ANY;
1012 		clear_nonspinnable(sem);
1013 	}
1014 	rwsem_mark_wake(sem, wake_type, wake_q);
1015 }
1016 
1017 /*
1018  * Wait for the read lock to be granted
1019  */
1020 static struct rw_semaphore __sched *
rwsem_down_read_slowpath(struct rw_semaphore * sem,long count,unsigned int state)1021 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
1022 {
1023 	long adjustment = -RWSEM_READER_BIAS;
1024 	long rcnt = (count >> RWSEM_READER_SHIFT);
1025 	struct rwsem_waiter waiter;
1026 	DEFINE_WAKE_Q(wake_q);
1027 	bool already_on_list = false;
1028 
1029 	/*
1030 	 * To prevent a constant stream of readers from starving a sleeping
1031 	 * waiter, don't attempt optimistic lock stealing if the lock is
1032 	 * currently owned by readers.
1033 	 */
1034 	if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
1035 	    (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
1036 		goto queue;
1037 
1038 	/*
1039 	 * Reader optimistic lock stealing.
1040 	 */
1041 	if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
1042 		rwsem_set_reader_owned(sem);
1043 		lockevent_inc(rwsem_rlock_steal);
1044 
1045 		/*
1046 		 * Wake up other readers in the wait queue if it is
1047 		 * the first reader.
1048 		 */
1049 		if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
1050 			raw_spin_lock_irq(&sem->wait_lock);
1051 			if (!list_empty(&sem->wait_list))
1052 				rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1053 						&wake_q);
1054 			raw_spin_unlock_irq(&sem->wait_lock);
1055 			wake_up_q(&wake_q);
1056 		}
1057 		trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1058 		return sem;
1059 	}
1060 
1061 queue:
1062 	waiter.task = current;
1063 	waiter.type = RWSEM_WAITING_FOR_READ;
1064 	waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1065 	waiter.handoff_set = false;
1066 
1067 	raw_spin_lock_irq(&sem->wait_lock);
1068 	if (list_empty(&sem->wait_list)) {
1069 		/*
1070 		 * In case the wait queue is empty and the lock isn't owned
1071 		 * by a writer, this reader can exit the slowpath and return
1072 		 * immediately as its RWSEM_READER_BIAS has already been set
1073 		 * in the count.
1074 		 */
1075 		if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) {
1076 			/* Provide lock ACQUIRE */
1077 			smp_acquire__after_ctrl_dep();
1078 			raw_spin_unlock_irq(&sem->wait_lock);
1079 			rwsem_set_reader_owned(sem);
1080 			lockevent_inc(rwsem_rlock_fast);
1081 			trace_android_vh_record_rwsem_lock_starttime(
1082 							current, jiffies);
1083 			return sem;
1084 		}
1085 		adjustment += RWSEM_FLAG_WAITERS;
1086 	}
1087 	trace_android_vh_alter_rwsem_list_add(
1088 					&waiter,
1089 					sem, &already_on_list);
1090 	if (!already_on_list)
1091 		rwsem_add_waiter(sem, &waiter);
1092 
1093 	/* we're now waiting on the lock, but no longer actively locking */
1094 	count = atomic_long_add_return(adjustment, &sem->count);
1095 
1096 	rwsem_cond_wake_waiter(sem, count, &wake_q);
1097 	trace_android_vh_rwsem_wake(sem);
1098 	raw_spin_unlock_irq(&sem->wait_lock);
1099 
1100 	if (!wake_q_empty(&wake_q))
1101 		wake_up_q(&wake_q);
1102 
1103 	trace_contention_begin(sem, LCB_F_READ);
1104 
1105 	/* wait to be given the lock */
1106 	trace_android_vh_rwsem_read_wait_start(sem);
1107 	for (;;) {
1108 		set_current_state(state);
1109 		if (!smp_load_acquire(&waiter.task)) {
1110 			/* Matches rwsem_mark_wake()'s smp_store_release(). */
1111 			break;
1112 		}
1113 		if (signal_pending_state(state, current)) {
1114 			raw_spin_lock_irq(&sem->wait_lock);
1115 			if (waiter.task)
1116 				goto out_nolock;
1117 			raw_spin_unlock_irq(&sem->wait_lock);
1118 			/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1119 			break;
1120 		}
1121 		schedule_preempt_disabled();
1122 		lockevent_inc(rwsem_sleep_reader);
1123 	}
1124 
1125 	__set_current_state(TASK_RUNNING);
1126 	trace_android_vh_rwsem_read_wait_finish(sem);
1127 	lockevent_inc(rwsem_rlock);
1128 	trace_contention_end(sem, 0);
1129 	trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1130 	return sem;
1131 
1132 out_nolock:
1133 	rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1134 	__set_current_state(TASK_RUNNING);
1135 	trace_android_vh_rwsem_read_wait_finish(sem);
1136 	lockevent_inc(rwsem_rlock_fail);
1137 	trace_contention_end(sem, -EINTR);
1138 	return ERR_PTR(-EINTR);
1139 }
1140 
1141 /*
1142  * Wait until we successfully acquire the write lock
1143  */
1144 static struct rw_semaphore __sched *
rwsem_down_write_slowpath(struct rw_semaphore * sem,int state)1145 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1146 {
1147 	struct rwsem_waiter waiter;
1148 	DEFINE_WAKE_Q(wake_q);
1149 	bool already_on_list = false;
1150 
1151 	/* do optimistic spinning and steal lock if possible */
1152 	if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
1153 		/* rwsem_optimistic_spin() implies ACQUIRE on success */
1154 		trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1155 		return sem;
1156 	}
1157 
1158 	/*
1159 	 * Optimistic spinning failed, proceed to the slowpath
1160 	 * and block until we can acquire the sem.
1161 	 */
1162 	waiter.task = current;
1163 	waiter.type = RWSEM_WAITING_FOR_WRITE;
1164 	waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1165 	waiter.handoff_set = false;
1166 
1167 	raw_spin_lock_irq(&sem->wait_lock);
1168 	trace_android_vh_alter_rwsem_list_add(
1169 					&waiter,
1170 					sem, &already_on_list);
1171 	if (!already_on_list)
1172 		rwsem_add_waiter(sem, &waiter);
1173 
1174 	/* we're now waiting on the lock */
1175 	if (rwsem_first_waiter(sem) != &waiter) {
1176 		rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count),
1177 				       &wake_q);
1178 		if (!wake_q_empty(&wake_q)) {
1179 			/*
1180 			 * We want to minimize wait_lock hold time especially
1181 			 * when a large number of readers are to be woken up.
1182 			 */
1183 			raw_spin_unlock_irq(&sem->wait_lock);
1184 			wake_up_q(&wake_q);
1185 			raw_spin_lock_irq(&sem->wait_lock);
1186 		}
1187 	} else {
1188 		atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1189 	}
1190 
1191 	trace_android_vh_rwsem_wake(sem);
1192 	/* wait until we successfully acquire the lock */
1193 	trace_android_vh_rwsem_write_wait_start(sem);
1194 	set_current_state(state);
1195 	trace_contention_begin(sem, LCB_F_WRITE);
1196 
1197 	for (;;) {
1198 		if (rwsem_try_write_lock(sem, &waiter)) {
1199 			/* rwsem_try_write_lock() implies ACQUIRE on success */
1200 			break;
1201 		}
1202 
1203 		raw_spin_unlock_irq(&sem->wait_lock);
1204 
1205 		if (signal_pending_state(state, current))
1206 			goto out_nolock;
1207 
1208 		/*
1209 		 * After setting the handoff bit and failing to acquire
1210 		 * the lock, attempt to spin on owner to accelerate lock
1211 		 * transfer. If the previous owner is a on-cpu writer and it
1212 		 * has just released the lock, OWNER_NULL will be returned.
1213 		 * In this case, we attempt to acquire the lock again
1214 		 * without sleeping.
1215 		 */
1216 		if (waiter.handoff_set) {
1217 			enum owner_state owner_state;
1218 
1219 			preempt_disable();
1220 			owner_state = rwsem_spin_on_owner(sem);
1221 			preempt_enable();
1222 
1223 			if (owner_state == OWNER_NULL)
1224 				goto trylock_again;
1225 		}
1226 
1227 		schedule();
1228 		lockevent_inc(rwsem_sleep_writer);
1229 		set_current_state(state);
1230 trylock_again:
1231 		raw_spin_lock_irq(&sem->wait_lock);
1232 	}
1233 	__set_current_state(TASK_RUNNING);
1234 	trace_android_vh_rwsem_write_wait_finish(sem);
1235 	raw_spin_unlock_irq(&sem->wait_lock);
1236 	lockevent_inc(rwsem_wlock);
1237 	trace_contention_end(sem, 0);
1238 	trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1239 	return sem;
1240 
1241 out_nolock:
1242 	__set_current_state(TASK_RUNNING);
1243 	trace_android_vh_rwsem_write_wait_finish(sem);
1244 	raw_spin_lock_irq(&sem->wait_lock);
1245 	rwsem_del_wake_waiter(sem, &waiter, &wake_q);
1246 	lockevent_inc(rwsem_wlock_fail);
1247 	trace_contention_end(sem, -EINTR);
1248 	return ERR_PTR(-EINTR);
1249 }
1250 
1251 /*
1252  * handle waking up a waiter on the semaphore
1253  * - up_read/up_write has decremented the active part of count if we come here
1254  */
rwsem_wake(struct rw_semaphore * sem)1255 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1256 {
1257 	unsigned long flags;
1258 	DEFINE_WAKE_Q(wake_q);
1259 
1260 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
1261 
1262 	if (!list_empty(&sem->wait_list))
1263 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1264 	trace_android_vh_rwsem_wake_finish(sem);
1265 
1266 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1267 	wake_up_q(&wake_q);
1268 
1269 	return sem;
1270 }
1271 
1272 /*
1273  * downgrade a write lock into a read lock
1274  * - caller incremented waiting part of count and discovered it still negative
1275  * - just wake up any readers at the front of the queue
1276  */
rwsem_downgrade_wake(struct rw_semaphore * sem)1277 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1278 {
1279 	unsigned long flags;
1280 	DEFINE_WAKE_Q(wake_q);
1281 
1282 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
1283 
1284 	if (!list_empty(&sem->wait_list))
1285 		rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1286 
1287 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1288 	wake_up_q(&wake_q);
1289 
1290 	return sem;
1291 }
1292 
1293 /*
1294  * lock for reading
1295  */
__down_read_common(struct rw_semaphore * sem,int state)1296 static __always_inline int __down_read_common(struct rw_semaphore *sem, int state)
1297 {
1298 	int ret = 0;
1299 	long count;
1300 
1301 	preempt_disable();
1302 	if (!rwsem_read_trylock(sem, &count)) {
1303 		if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
1304 			ret = -EINTR;
1305 			goto out;
1306 		}
1307 		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1308 	}
1309 out:
1310 	preempt_enable();
1311 	return ret;
1312 }
1313 
__down_read(struct rw_semaphore * sem)1314 static __always_inline void __down_read(struct rw_semaphore *sem)
1315 {
1316 	__down_read_common(sem, TASK_UNINTERRUPTIBLE);
1317 }
1318 
__down_read_interruptible(struct rw_semaphore * sem)1319 static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
1320 {
1321 	return __down_read_common(sem, TASK_INTERRUPTIBLE);
1322 }
1323 
__down_read_killable(struct rw_semaphore * sem)1324 static __always_inline int __down_read_killable(struct rw_semaphore *sem)
1325 {
1326 	return __down_read_common(sem, TASK_KILLABLE);
1327 }
1328 
__down_read_trylock(struct rw_semaphore * sem)1329 static inline int __down_read_trylock(struct rw_semaphore *sem)
1330 {
1331 	int ret = 0;
1332 	long tmp;
1333 
1334 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1335 
1336 	preempt_disable();
1337 	tmp = atomic_long_read(&sem->count);
1338 	while (!(tmp & RWSEM_READ_FAILED_MASK)) {
1339 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1340 						    tmp + RWSEM_READER_BIAS)) {
1341 			rwsem_set_reader_owned(sem);
1342 			ret = 1;
1343 			trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
1344 			break;
1345 		}
1346 	}
1347 	preempt_enable();
1348 	return ret;
1349 }
1350 
1351 /*
1352  * lock for writing
1353  */
__down_write_common(struct rw_semaphore * sem,int state)1354 static inline int __down_write_common(struct rw_semaphore *sem, int state)
1355 {
1356 	if (unlikely(!rwsem_write_trylock(sem))) {
1357 		if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
1358 			return -EINTR;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
__down_write(struct rw_semaphore * sem)1364 static inline void __down_write(struct rw_semaphore *sem)
1365 {
1366 	__down_write_common(sem, TASK_UNINTERRUPTIBLE);
1367 }
1368 
__down_write_killable(struct rw_semaphore * sem)1369 static inline int __down_write_killable(struct rw_semaphore *sem)
1370 {
1371 	return __down_write_common(sem, TASK_KILLABLE);
1372 }
1373 
__down_write_trylock(struct rw_semaphore * sem)1374 static inline int __down_write_trylock(struct rw_semaphore *sem)
1375 {
1376 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1377 	return rwsem_write_trylock(sem);
1378 }
1379 
1380 /*
1381  * unlock after reading
1382  */
__up_read(struct rw_semaphore * sem)1383 static inline void __up_read(struct rw_semaphore *sem)
1384 {
1385 	long tmp;
1386 
1387 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1388 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1389 
1390 	preempt_disable();
1391 	rwsem_clear_reader_owned(sem);
1392 	tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1393 	DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1394 	if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1395 		      RWSEM_FLAG_WAITERS)) {
1396 		clear_nonspinnable(sem);
1397 		rwsem_wake(sem);
1398 	}
1399 	trace_android_vh_record_rwsem_lock_starttime(current, 0);
1400 	preempt_enable();
1401 }
1402 
1403 /*
1404  * unlock after writing
1405  */
__up_write(struct rw_semaphore * sem)1406 static inline void __up_write(struct rw_semaphore *sem)
1407 {
1408 	long tmp;
1409 
1410 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1411 	/*
1412 	 * sem->owner may differ from current if the ownership is transferred
1413 	 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1414 	 */
1415 	DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1416 			    !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1417 
1418 	preempt_disable();
1419 	rwsem_clear_owner(sem);
1420 	tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1421 	preempt_enable();
1422 	if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1423 		rwsem_wake(sem);
1424 	trace_android_vh_record_rwsem_lock_starttime(current, 0);
1425 }
1426 
1427 /*
1428  * downgrade write lock to read lock
1429  */
__downgrade_write(struct rw_semaphore * sem)1430 static inline void __downgrade_write(struct rw_semaphore *sem)
1431 {
1432 	long tmp;
1433 
1434 	/*
1435 	 * When downgrading from exclusive to shared ownership,
1436 	 * anything inside the write-locked region cannot leak
1437 	 * into the read side. In contrast, anything in the
1438 	 * read-locked region is ok to be re-ordered into the
1439 	 * write side. As such, rely on RELEASE semantics.
1440 	 */
1441 	DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1442 	tmp = atomic_long_fetch_add_release(
1443 		-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1444 	rwsem_set_reader_owned(sem);
1445 	if (tmp & RWSEM_FLAG_WAITERS)
1446 		rwsem_downgrade_wake(sem);
1447 }
1448 
1449 #else /* !CONFIG_PREEMPT_RT */
1450 
1451 #define RT_MUTEX_BUILD_MUTEX
1452 #include "rtmutex.c"
1453 
1454 #define rwbase_set_and_save_current_state(state)	\
1455 	set_current_state(state)
1456 
1457 #define rwbase_restore_current_state()			\
1458 	__set_current_state(TASK_RUNNING)
1459 
1460 #define rwbase_rtmutex_lock_state(rtm, state)		\
1461 	__rt_mutex_lock(rtm, state)
1462 
1463 #define rwbase_rtmutex_slowlock_locked(rtm, state)	\
1464 	__rt_mutex_slowlock_locked(rtm, NULL, state)
1465 
1466 #define rwbase_rtmutex_unlock(rtm)			\
1467 	__rt_mutex_unlock(rtm)
1468 
1469 #define rwbase_rtmutex_trylock(rtm)			\
1470 	__rt_mutex_trylock(rtm)
1471 
1472 #define rwbase_signal_pending_state(state, current)	\
1473 	signal_pending_state(state, current)
1474 
1475 #define rwbase_schedule()				\
1476 	schedule()
1477 
1478 #include "rwbase_rt.c"
1479 
__init_rwsem(struct rw_semaphore * sem,const char * name,struct lock_class_key * key)1480 void __init_rwsem(struct rw_semaphore *sem, const char *name,
1481 		  struct lock_class_key *key)
1482 {
1483 	init_rwbase_rt(&(sem)->rwbase);
1484 
1485 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1486 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1487 	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
1488 #endif
1489 }
1490 EXPORT_SYMBOL(__init_rwsem);
1491 
__down_read(struct rw_semaphore * sem)1492 static inline void __down_read(struct rw_semaphore *sem)
1493 {
1494 	rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1495 }
1496 
__down_read_interruptible(struct rw_semaphore * sem)1497 static inline int __down_read_interruptible(struct rw_semaphore *sem)
1498 {
1499 	return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1500 }
1501 
__down_read_killable(struct rw_semaphore * sem)1502 static inline int __down_read_killable(struct rw_semaphore *sem)
1503 {
1504 	return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1505 }
1506 
__down_read_trylock(struct rw_semaphore * sem)1507 static inline int __down_read_trylock(struct rw_semaphore *sem)
1508 {
1509 	return rwbase_read_trylock(&sem->rwbase);
1510 }
1511 
__up_read(struct rw_semaphore * sem)1512 static inline void __up_read(struct rw_semaphore *sem)
1513 {
1514 	rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1515 }
1516 
__down_write(struct rw_semaphore * sem)1517 static inline void __sched __down_write(struct rw_semaphore *sem)
1518 {
1519 	rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1520 }
1521 
__down_write_killable(struct rw_semaphore * sem)1522 static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1523 {
1524 	return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1525 }
1526 
__down_write_trylock(struct rw_semaphore * sem)1527 static inline int __down_write_trylock(struct rw_semaphore *sem)
1528 {
1529 	return rwbase_write_trylock(&sem->rwbase);
1530 }
1531 
__up_write(struct rw_semaphore * sem)1532 static inline void __up_write(struct rw_semaphore *sem)
1533 {
1534 	rwbase_write_unlock(&sem->rwbase);
1535 }
1536 
__downgrade_write(struct rw_semaphore * sem)1537 static inline void __downgrade_write(struct rw_semaphore *sem)
1538 {
1539 	rwbase_write_downgrade(&sem->rwbase);
1540 }
1541 
1542 /* Debug stubs for the common API */
1543 #define DEBUG_RWSEMS_WARN_ON(c, sem)
1544 
__rwsem_set_reader_owned(struct rw_semaphore * sem,struct task_struct * owner)1545 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1546 					    struct task_struct *owner)
1547 {
1548 }
1549 
is_rwsem_reader_owned(struct rw_semaphore * sem)1550 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1551 {
1552 	int count = atomic_read(&sem->rwbase.readers);
1553 
1554 	return count < 0 && count != READER_BIAS;
1555 }
1556 
1557 #endif /* CONFIG_PREEMPT_RT */
1558 
1559 /*
1560  * lock for reading
1561  */
down_read(struct rw_semaphore * sem)1562 void __sched down_read(struct rw_semaphore *sem)
1563 {
1564 	might_sleep();
1565 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1566 
1567 	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1568 }
1569 EXPORT_SYMBOL(down_read);
1570 
down_read_interruptible(struct rw_semaphore * sem)1571 int __sched down_read_interruptible(struct rw_semaphore *sem)
1572 {
1573 	might_sleep();
1574 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1575 
1576 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1577 		rwsem_release(&sem->dep_map, _RET_IP_);
1578 		return -EINTR;
1579 	}
1580 
1581 	return 0;
1582 }
1583 EXPORT_SYMBOL(down_read_interruptible);
1584 
down_read_killable(struct rw_semaphore * sem)1585 int __sched down_read_killable(struct rw_semaphore *sem)
1586 {
1587 	might_sleep();
1588 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1589 
1590 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1591 		rwsem_release(&sem->dep_map, _RET_IP_);
1592 		return -EINTR;
1593 	}
1594 
1595 	return 0;
1596 }
1597 EXPORT_SYMBOL(down_read_killable);
1598 
1599 /*
1600  * trylock for reading -- returns 1 if successful, 0 if contention
1601  */
down_read_trylock(struct rw_semaphore * sem)1602 int down_read_trylock(struct rw_semaphore *sem)
1603 {
1604 	int ret = __down_read_trylock(sem);
1605 
1606 	if (ret == 1)
1607 		rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1608 	return ret;
1609 }
1610 EXPORT_SYMBOL(down_read_trylock);
1611 
1612 /*
1613  * lock for writing
1614  */
down_write(struct rw_semaphore * sem)1615 void __sched down_write(struct rw_semaphore *sem)
1616 {
1617 	might_sleep();
1618 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1619 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1620 }
1621 EXPORT_SYMBOL(down_write);
1622 
1623 /*
1624  * lock for writing
1625  */
down_write_killable(struct rw_semaphore * sem)1626 int __sched down_write_killable(struct rw_semaphore *sem)
1627 {
1628 	might_sleep();
1629 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1630 
1631 	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1632 				  __down_write_killable)) {
1633 		rwsem_release(&sem->dep_map, _RET_IP_);
1634 		return -EINTR;
1635 	}
1636 
1637 	return 0;
1638 }
1639 EXPORT_SYMBOL(down_write_killable);
1640 
1641 /*
1642  * trylock for writing -- returns 1 if successful, 0 if contention
1643  */
down_write_trylock(struct rw_semaphore * sem)1644 int down_write_trylock(struct rw_semaphore *sem)
1645 {
1646 	int ret = __down_write_trylock(sem);
1647 
1648 	if (ret == 1)
1649 		rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1650 
1651 	return ret;
1652 }
1653 EXPORT_SYMBOL(down_write_trylock);
1654 
1655 /*
1656  * release a read lock
1657  */
up_read(struct rw_semaphore * sem)1658 void up_read(struct rw_semaphore *sem)
1659 {
1660 	rwsem_release(&sem->dep_map, _RET_IP_);
1661 	__up_read(sem);
1662 }
1663 EXPORT_SYMBOL(up_read);
1664 
1665 /*
1666  * release a write lock
1667  */
up_write(struct rw_semaphore * sem)1668 void up_write(struct rw_semaphore *sem)
1669 {
1670 	rwsem_release(&sem->dep_map, _RET_IP_);
1671 	trace_android_vh_rwsem_write_finished(sem);
1672 	__up_write(sem);
1673 }
1674 EXPORT_SYMBOL(up_write);
1675 
1676 /*
1677  * downgrade write lock to read lock
1678  */
downgrade_write(struct rw_semaphore * sem)1679 void downgrade_write(struct rw_semaphore *sem)
1680 {
1681 	lock_downgrade(&sem->dep_map, _RET_IP_);
1682 	trace_android_vh_rwsem_write_finished(sem);
1683 	__downgrade_write(sem);
1684 }
1685 EXPORT_SYMBOL(downgrade_write);
1686 
1687 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1688 
down_read_nested(struct rw_semaphore * sem,int subclass)1689 void down_read_nested(struct rw_semaphore *sem, int subclass)
1690 {
1691 	might_sleep();
1692 	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1693 	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1694 }
1695 EXPORT_SYMBOL(down_read_nested);
1696 
down_read_killable_nested(struct rw_semaphore * sem,int subclass)1697 int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1698 {
1699 	might_sleep();
1700 	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1701 
1702 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1703 		rwsem_release(&sem->dep_map, _RET_IP_);
1704 		return -EINTR;
1705 	}
1706 
1707 	return 0;
1708 }
1709 EXPORT_SYMBOL(down_read_killable_nested);
1710 
_down_write_nest_lock(struct rw_semaphore * sem,struct lockdep_map * nest)1711 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1712 {
1713 	might_sleep();
1714 	rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1715 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1716 }
1717 EXPORT_SYMBOL(_down_write_nest_lock);
1718 
down_read_non_owner(struct rw_semaphore * sem)1719 void down_read_non_owner(struct rw_semaphore *sem)
1720 {
1721 	might_sleep();
1722 	__down_read(sem);
1723 	/*
1724 	 * The owner value for a reader-owned lock is mostly for debugging
1725 	 * purpose only and is not critical to the correct functioning of
1726 	 * rwsem. So it is perfectly fine to set it in a preempt-enabled
1727 	 * context here.
1728 	 */
1729 	__rwsem_set_reader_owned(sem, NULL);
1730 }
1731 EXPORT_SYMBOL(down_read_non_owner);
1732 
down_write_nested(struct rw_semaphore * sem,int subclass)1733 void down_write_nested(struct rw_semaphore *sem, int subclass)
1734 {
1735 	might_sleep();
1736 	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1737 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1738 }
1739 EXPORT_SYMBOL(down_write_nested);
1740 
down_write_killable_nested(struct rw_semaphore * sem,int subclass)1741 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1742 {
1743 	might_sleep();
1744 	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1745 
1746 	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1747 				  __down_write_killable)) {
1748 		rwsem_release(&sem->dep_map, _RET_IP_);
1749 		return -EINTR;
1750 	}
1751 
1752 	return 0;
1753 }
1754 EXPORT_SYMBOL(down_write_killable_nested);
1755 
up_read_non_owner(struct rw_semaphore * sem)1756 void up_read_non_owner(struct rw_semaphore *sem)
1757 {
1758 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1759 	__up_read(sem);
1760 }
1761 EXPORT_SYMBOL(up_read_non_owner);
1762 
1763 #endif
1764