• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <errno.h>
30 #include <stdatomic.h>
31 #include <string.h>
32 
33 #include "pthread_internal.h"
34 #include "private/bionic_futex.h"
35 #include "private/bionic_lock.h"
36 #include "private/bionic_time_conversions.h"
37 
38 /* Technical note:
39  *
40  * Possible states of a read/write lock:
41  *
42  *  - no readers and no writer (unlocked)
43  *  - one or more readers sharing the lock at the same time (read-locked)
44  *  - one writer holding the lock (write-lock)
45  *
46  * Additionally:
47  *  - trying to get the write-lock while there are any readers blocks
48  *  - trying to get the read-lock while there is a writer blocks
49  *  - a single thread can acquire the lock multiple times in read mode
50  *
51  *  - Posix states that behavior is undefined (may deadlock) if a thread tries
52  *    to acquire the lock
53  *      - in write mode while already holding the lock (whether in read or write mode)
54  *      - in read mode while already holding the lock in write mode.
55  *  - This implementation will return EDEADLK in "write after write" and "read after
56  *    write" cases and will deadlock in write after read case.
57  *
58  */
59 
60 // A rwlockattr is implemented as a 32-bit integer which has following fields:
61 //  bits    name              description
62 //   1     rwlock_kind       have rwlock preference like PTHREAD_RWLOCK_PREFER_READER_NP.
63 //   0      process_shared    set to 1 if the rwlock is shared between processes.
64 
65 #define RWLOCKATTR_PSHARED_SHIFT 0
66 #define RWLOCKATTR_KIND_SHIFT    1
67 
68 #define RWLOCKATTR_PSHARED_MASK  1
69 #define RWLOCKATTR_KIND_MASK     2
70 #define RWLOCKATTR_RESERVED_MASK (~3)
71 
__rwlockattr_getpshared(const pthread_rwlockattr_t * attr)72 static inline __always_inline bool __rwlockattr_getpshared(const pthread_rwlockattr_t* attr) {
73   return (*attr & RWLOCKATTR_PSHARED_MASK) >> RWLOCKATTR_PSHARED_SHIFT;
74 }
75 
__rwlockattr_setpshared(pthread_rwlockattr_t * attr,int pshared)76 static inline __always_inline void __rwlockattr_setpshared(pthread_rwlockattr_t* attr,
77                                                            int pshared) {
78   *attr = (*attr & ~RWLOCKATTR_PSHARED_MASK) | (pshared << RWLOCKATTR_PSHARED_SHIFT);
79 }
80 
__rwlockattr_getkind(const pthread_rwlockattr_t * attr)81 static inline __always_inline int __rwlockattr_getkind(const pthread_rwlockattr_t* attr) {
82   return (*attr & RWLOCKATTR_KIND_MASK) >> RWLOCKATTR_KIND_SHIFT;
83 }
84 
__rwlockattr_setkind(pthread_rwlockattr_t * attr,int kind)85 static inline __always_inline void __rwlockattr_setkind(pthread_rwlockattr_t* attr, int kind) {
86   *attr = (*attr & ~RWLOCKATTR_KIND_MASK) | (kind << RWLOCKATTR_KIND_SHIFT);
87 }
88 
89 
pthread_rwlockattr_init(pthread_rwlockattr_t * attr)90 int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) {
91   *attr = 0;
92   return 0;
93 }
94 
pthread_rwlockattr_destroy(pthread_rwlockattr_t * attr)95 int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr) {
96   *attr = -1;
97   return 0;
98 }
99 
pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * attr,int * pshared)100 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
101   if (__rwlockattr_getpshared(attr)) {
102     *pshared = PTHREAD_PROCESS_SHARED;
103   } else {
104     *pshared = PTHREAD_PROCESS_PRIVATE;
105   }
106   return 0;
107 }
108 
pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr,int pshared)109 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) {
110   switch (pshared) {
111     case PTHREAD_PROCESS_PRIVATE:
112       __rwlockattr_setpshared(attr, 0);
113       return 0;
114     case PTHREAD_PROCESS_SHARED:
115       __rwlockattr_setpshared(attr, 1);
116       return 0;
117     default:
118       return EINVAL;
119   }
120 }
121 
pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t * attr,int * pref)122 int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t* attr, int* pref) {
123   *pref = __rwlockattr_getkind(attr);
124   return 0;
125 }
126 
pthread_rwlockattr_setkind_np(pthread_rwlockattr_t * attr,int pref)127 int pthread_rwlockattr_setkind_np(pthread_rwlockattr_t* attr, int pref) {
128   switch (pref) {
129     case PTHREAD_RWLOCK_PREFER_READER_NP:   // Fall through.
130     case PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP:
131       __rwlockattr_setkind(attr, pref);
132       return 0;
133     default:
134       return EINVAL;
135   }
136 }
137 
138 // A rwlock state is implemented as a 32-bit integer which has following rules:
139 //  bits      name                              description
140 //   31      owned_by_writer_flag              set to 1 if the lock is owned by a writer now.
141 //  30-2     reader_count                      the count of readers holding the lock.
142 //   1       have_pending_writers              set to 1 if having pending writers.
143 //   0       have_pending_readers              set to 1 if having pending readers.
144 
145 #define STATE_HAVE_PENDING_READERS_SHIFT    0
146 #define STATE_HAVE_PENDING_WRITERS_SHIFT    1
147 #define STATE_READER_COUNT_SHIFT            2
148 #define STATE_OWNED_BY_WRITER_SHIFT        31
149 
150 #define STATE_HAVE_PENDING_READERS_FLAG     (1 << STATE_HAVE_PENDING_READERS_SHIFT)
151 #define STATE_HAVE_PENDING_WRITERS_FLAG     (1 << STATE_HAVE_PENDING_WRITERS_SHIFT)
152 #define STATE_READER_COUNT_CHANGE_STEP  (1 << STATE_READER_COUNT_SHIFT)
153 #define STATE_OWNED_BY_WRITER_FLAG      (1 << STATE_OWNED_BY_WRITER_SHIFT)
154 
155 #define STATE_HAVE_PENDING_READERS_OR_WRITERS_FLAG \
156           (STATE_HAVE_PENDING_READERS_FLAG | STATE_HAVE_PENDING_WRITERS_FLAG)
157 
158 struct pthread_rwlock_internal_t {
159   atomic_int state;
160   atomic_int writer_tid;
161 
162   bool pshared;
163   bool writer_nonrecursive_preferred;
164   uint16_t __pad;
165 
166 // When a reader thread plans to suspend on the rwlock, it will add STATE_HAVE_PENDING_READERS_FLAG
167 // in state, increase pending_reader_count, and wait on pending_reader_wakeup_serial. After woken
168 // up, the reader thread decreases pending_reader_count, and the last pending reader thread should
169 // remove STATE_HAVE_PENDING_READERS_FLAG in state. A pending writer thread works in a similar way,
170 // except that it uses flag and members for writer threads.
171 
172   Lock pending_lock;  // All pending members below are protected by pending_lock.
173   uint32_t pending_reader_count;  // Count of pending reader threads.
174   uint32_t pending_writer_count;  // Count of pending writer threads.
175   uint32_t pending_reader_wakeup_serial;  // Pending reader threads wait on this address by futex_wait.
176   uint32_t pending_writer_wakeup_serial;  // Pending writer threads wait on this address by futex_wait.
177 
178 #if defined(__LP64__)
179   char __reserved[20];
180 #else
181   char __reserved[4];
182 #endif
183 };
184 
__state_owned_by_writer(int state)185 static inline __always_inline bool __state_owned_by_writer(int state) {
186   return state < 0;
187 }
188 
__state_owned_by_readers(int state)189 static inline __always_inline bool __state_owned_by_readers(int state) {
190   // If state >= 0, the owned_by_writer_flag is not set.
191   // And if state >= STATE_READER_COUNT_CHANGE_STEP, the reader_count field is not empty.
192   return state >= STATE_READER_COUNT_CHANGE_STEP;
193 }
194 
__state_owned_by_readers_or_writer(int state)195 static inline __always_inline bool __state_owned_by_readers_or_writer(int state) {
196   return state < 0 || state >= STATE_READER_COUNT_CHANGE_STEP;
197 }
198 
__state_add_writer_flag(int state)199 static inline __always_inline int __state_add_writer_flag(int state) {
200   return state | STATE_OWNED_BY_WRITER_FLAG;
201 }
202 
__state_is_last_reader(int state)203 static inline __always_inline bool __state_is_last_reader(int state) {
204   return (state >> STATE_READER_COUNT_SHIFT) == 1;
205 }
206 
__state_have_pending_writers(int state)207 static inline __always_inline bool __state_have_pending_writers(int state) {
208   return state & STATE_HAVE_PENDING_WRITERS_FLAG;
209 }
210 
__state_have_pending_readers_or_writers(int state)211 static inline __always_inline bool __state_have_pending_readers_or_writers(int state) {
212   return state & STATE_HAVE_PENDING_READERS_OR_WRITERS_FLAG;
213 }
214 
215 static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t),
216               "pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation.");
217 
218 // For binary compatibility with old version of pthread_rwlock_t, we can't use more strict
219 // alignment than 4-byte alignment.
220 static_assert(alignof(pthread_rwlock_t) == 4,
221              "pthread_rwlock_t should fulfill the alignment requirement of pthread_rwlock_internal_t.");
222 
__get_internal_rwlock(pthread_rwlock_t * rwlock_interface)223 static inline __always_inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) {
224   return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface);
225 }
226 
pthread_rwlock_init(pthread_rwlock_t * rwlock_interface,const pthread_rwlockattr_t * attr)227 int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
228   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
229 
230   memset(rwlock, 0, sizeof(pthread_rwlock_internal_t));
231 
232   if (__predict_false(attr != nullptr)) {
233     rwlock->pshared = __rwlockattr_getpshared(attr);
234     int kind = __rwlockattr_getkind(attr);
235     switch (kind) {
236       case PTHREAD_RWLOCK_PREFER_READER_NP:
237         rwlock->writer_nonrecursive_preferred = false;
238         break;
239       case PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP:
240         rwlock->writer_nonrecursive_preferred = true;
241         break;
242       default:
243         return EINVAL;
244     }
245     if ((*attr & RWLOCKATTR_RESERVED_MASK) != 0) {
246       return EINVAL;
247     }
248   }
249 
250   atomic_init(&rwlock->state, 0);
251   rwlock->pending_lock.init(rwlock->pshared);
252   return 0;
253 }
254 
pthread_rwlock_destroy(pthread_rwlock_t * rwlock_interface)255 int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) {
256   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
257 
258   if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) {
259     return EBUSY;
260   }
261   return 0;
262 }
263 
__can_acquire_read_lock(int old_state,bool writer_nonrecursive_preferred)264 static inline __always_inline bool __can_acquire_read_lock(int old_state,
265                                                              bool writer_nonrecursive_preferred) {
266   // If writer is preferred with nonrecursive reader, we prevent further readers from acquiring
267   // the lock when there are writers waiting for the lock.
268   bool cannot_apply = __state_owned_by_writer(old_state) ||
269                       (writer_nonrecursive_preferred && __state_have_pending_writers(old_state));
270   return !cannot_apply;
271 }
272 
__pthread_rwlock_tryrdlock(pthread_rwlock_internal_t * rwlock)273 static inline __always_inline int __pthread_rwlock_tryrdlock(pthread_rwlock_internal_t* rwlock) {
274   int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
275 
276   while (__predict_true(__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred))) {
277 
278     int new_state = old_state + STATE_READER_COUNT_CHANGE_STEP;
279     if (__predict_false(!__state_owned_by_readers(new_state))) { // Happens when reader count overflows.
280       return EAGAIN;
281     }
282     if (__predict_true(atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, new_state,
283                                               memory_order_acquire, memory_order_relaxed))) {
284       return 0;
285     }
286   }
287   return EBUSY;
288 }
289 
__pthread_rwlock_timedrdlock(pthread_rwlock_internal_t * rwlock,bool use_realtime_clock,const timespec * abs_timeout_or_null)290 static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, bool use_realtime_clock,
291                                         const timespec* abs_timeout_or_null) {
292   if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
293     return EDEADLK;
294   }
295 
296   while (true) {
297     int result = __pthread_rwlock_tryrdlock(rwlock);
298     if (result == 0 || result == EAGAIN) {
299       return result;
300     }
301     result = check_timespec(abs_timeout_or_null, true);
302     if (result != 0) {
303       return result;
304     }
305 
306     int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
307     if (__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
308       continue;
309     }
310 
311     rwlock->pending_lock.lock();
312     rwlock->pending_reader_count++;
313 
314     // We rely on the fact that all atomic exchange operations on the same object (here it is
315     // rwlock->state) always appear to occur in a single total order. If the pending flag is added
316     // before unlocking, the unlocking thread will wakeup the waiter. Otherwise, we will see the
317     // state is unlocked and will not wait anymore.
318     old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_READERS_FLAG,
319                                          memory_order_relaxed);
320 
321     int old_serial = rwlock->pending_reader_wakeup_serial;
322     rwlock->pending_lock.unlock();
323 
324     int futex_result = 0;
325     if (!__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
326       futex_result = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared,
327                                      old_serial, use_realtime_clock, abs_timeout_or_null);
328     }
329 
330     rwlock->pending_lock.lock();
331     rwlock->pending_reader_count--;
332     if (rwlock->pending_reader_count == 0) {
333       atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_READERS_FLAG,
334                                 memory_order_relaxed);
335     }
336     rwlock->pending_lock.unlock();
337 
338     if (futex_result == -ETIMEDOUT) {
339       return ETIMEDOUT;
340     }
341   }
342 }
343 
__can_acquire_write_lock(int old_state)344 static inline __always_inline bool __can_acquire_write_lock(int old_state) {
345   return !__state_owned_by_readers_or_writer(old_state);
346 }
347 
__pthread_rwlock_trywrlock(pthread_rwlock_internal_t * rwlock)348 static inline __always_inline int __pthread_rwlock_trywrlock(pthread_rwlock_internal_t* rwlock) {
349   int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
350 
351   while (__predict_true(__can_acquire_write_lock(old_state))) {
352     if (__predict_true(atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
353           __state_add_writer_flag(old_state), memory_order_acquire, memory_order_relaxed))) {
354 
355       atomic_store_explicit(&rwlock->writer_tid, __get_thread()->tid, memory_order_relaxed);
356       return 0;
357     }
358   }
359   return EBUSY;
360 }
361 
__pthread_rwlock_timedwrlock(pthread_rwlock_internal_t * rwlock,bool use_realtime_clock,const timespec * abs_timeout_or_null)362 static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, bool use_realtime_clock,
363                                         const timespec* abs_timeout_or_null) {
364   if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
365     return EDEADLK;
366   }
367   while (true) {
368     int result = __pthread_rwlock_trywrlock(rwlock);
369     if (result == 0) {
370       return result;
371     }
372     result = check_timespec(abs_timeout_or_null, true);
373     if (result != 0) {
374       return result;
375     }
376 
377     int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
378     if (__can_acquire_write_lock(old_state)) {
379       continue;
380     }
381 
382     rwlock->pending_lock.lock();
383     rwlock->pending_writer_count++;
384 
385     old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_WRITERS_FLAG,
386                                          memory_order_relaxed);
387 
388     int old_serial = rwlock->pending_writer_wakeup_serial;
389     rwlock->pending_lock.unlock();
390 
391     int futex_result = 0;
392     if (!__can_acquire_write_lock(old_state)) {
393       futex_result = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared,
394                                      old_serial, use_realtime_clock, abs_timeout_or_null);
395     }
396 
397     rwlock->pending_lock.lock();
398     rwlock->pending_writer_count--;
399     if (rwlock->pending_writer_count == 0) {
400       atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_WRITERS_FLAG,
401                                 memory_order_relaxed);
402     }
403     rwlock->pending_lock.unlock();
404 
405     if (futex_result == -ETIMEDOUT) {
406       return ETIMEDOUT;
407     }
408   }
409 }
410 
pthread_rwlock_rdlock(pthread_rwlock_t * rwlock_interface)411 int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) {
412   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
413   // Avoid slowing down fast path of rdlock.
414   if (__predict_true(__pthread_rwlock_tryrdlock(rwlock) == 0)) {
415     return 0;
416   }
417   return __pthread_rwlock_timedrdlock(rwlock, false, nullptr);
418 }
419 
pthread_rwlock_timedrdlock(pthread_rwlock_t * rwlock_interface,const timespec * abs_timeout)420 int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
421   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
422 
423   return __pthread_rwlock_timedrdlock(rwlock, true, abs_timeout);
424 }
425 
pthread_rwlock_timedrdlock_monotonic_np(pthread_rwlock_t * rwlock_interface,const timespec * abs_timeout)426 int pthread_rwlock_timedrdlock_monotonic_np(pthread_rwlock_t* rwlock_interface,
427                                             const timespec* abs_timeout) {
428   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
429 
430   return __pthread_rwlock_timedrdlock(rwlock, false, abs_timeout);
431 }
432 
pthread_rwlock_clockrdlock(pthread_rwlock_t * rwlock_interface,clockid_t clock,const struct timespec * abs_timeout)433 int pthread_rwlock_clockrdlock(pthread_rwlock_t* rwlock_interface, clockid_t clock,
434                                const struct timespec* abs_timeout) {
435   switch (clock) {
436     case CLOCK_MONOTONIC:
437       return pthread_rwlock_timedrdlock_monotonic_np(rwlock_interface, abs_timeout);
438     case CLOCK_REALTIME:
439       return pthread_rwlock_timedrdlock(rwlock_interface, abs_timeout);
440     default:
441       return EINVAL;
442   }
443 }
444 
pthread_rwlock_tryrdlock(pthread_rwlock_t * rwlock_interface)445 int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) {
446   return __pthread_rwlock_tryrdlock(__get_internal_rwlock(rwlock_interface));
447 }
448 
pthread_rwlock_wrlock(pthread_rwlock_t * rwlock_interface)449 int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) {
450   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
451   // Avoid slowing down fast path of wrlock.
452   if (__predict_true(__pthread_rwlock_trywrlock(rwlock) == 0)) {
453     return 0;
454   }
455   return __pthread_rwlock_timedwrlock(rwlock, false, nullptr);
456 }
457 
pthread_rwlock_timedwrlock(pthread_rwlock_t * rwlock_interface,const timespec * abs_timeout)458 int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
459   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
460 
461   return __pthread_rwlock_timedwrlock(rwlock, true, abs_timeout);
462 }
463 
pthread_rwlock_timedwrlock_monotonic_np(pthread_rwlock_t * rwlock_interface,const timespec * abs_timeout)464 int pthread_rwlock_timedwrlock_monotonic_np(pthread_rwlock_t* rwlock_interface,
465                                             const timespec* abs_timeout) {
466   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
467 
468   return __pthread_rwlock_timedwrlock(rwlock, false, abs_timeout);
469 }
470 
pthread_rwlock_clockwrlock(pthread_rwlock_t * rwlock_interface,clockid_t clock,const struct timespec * abs_timeout)471 int pthread_rwlock_clockwrlock(pthread_rwlock_t* rwlock_interface, clockid_t clock,
472                                const struct timespec* abs_timeout) {
473   switch (clock) {
474     case CLOCK_MONOTONIC:
475       return pthread_rwlock_timedwrlock_monotonic_np(rwlock_interface, abs_timeout);
476     case CLOCK_REALTIME:
477       return pthread_rwlock_timedwrlock(rwlock_interface, abs_timeout);
478     default:
479       return EINVAL;
480   }
481 }
482 
pthread_rwlock_trywrlock(pthread_rwlock_t * rwlock_interface)483 int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) {
484   return __pthread_rwlock_trywrlock(__get_internal_rwlock(rwlock_interface));
485 }
486 
pthread_rwlock_unlock(pthread_rwlock_t * rwlock_interface)487 int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
488   pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
489 
490   int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
491   if (__state_owned_by_writer(old_state)) {
492     if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) != __get_thread()->tid) {
493       return EPERM;
494     }
495     atomic_store_explicit(&rwlock->writer_tid, 0, memory_order_relaxed);
496     old_state = atomic_fetch_and_explicit(&rwlock->state, ~STATE_OWNED_BY_WRITER_FLAG,
497                                           memory_order_release);
498     if (!__state_have_pending_readers_or_writers(old_state)) {
499       return 0;
500     }
501 
502   } else if (__state_owned_by_readers(old_state)) {
503     old_state = atomic_fetch_sub_explicit(&rwlock->state, STATE_READER_COUNT_CHANGE_STEP,
504                                           memory_order_release);
505     if (!__state_is_last_reader(old_state) || !__state_have_pending_readers_or_writers(old_state)) {
506       return 0;
507     }
508 
509   } else {
510     return EPERM;
511   }
512 
513   // Wake up pending readers or writers.
514   rwlock->pending_lock.lock();
515   if (rwlock->pending_writer_count != 0) {
516     rwlock->pending_writer_wakeup_serial++;
517     rwlock->pending_lock.unlock();
518 
519     __futex_wake_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, 1);
520 
521   } else if (rwlock->pending_reader_count != 0) {
522     rwlock->pending_reader_wakeup_serial++;
523     rwlock->pending_lock.unlock();
524 
525     __futex_wake_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, INT_MAX);
526 
527   } else {
528     // It happens when waiters are woken up by timeout.
529     rwlock->pending_lock.unlock();
530   }
531   return 0;
532 }
533