• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <errno.h>
30 
31 #include "pthread_internal.h"
32 #include "private/bionic_futex.h"
33 
34 /* Technical note:
35  *
36  * Possible states of a read/write lock:
37  *
38  *  - no readers and no writer (unlocked)
39  *  - one or more readers sharing the lock at the same time (read-locked)
40  *  - one writer holding the lock (write-lock)
41  *
42  * Additionally:
43  *  - trying to get the write-lock while there are any readers blocks
44  *  - trying to get the read-lock while there is a writer blocks
45  *  - a single thread can acquire the lock multiple times in read mode
46  *
47  *  - Posix states that behavior is undefined (may deadlock) if a thread tries
48  *    to acquire the lock
49  *      - in write mode while already holding the lock (whether in read or write mode)
50  *      - in read mode while already holding the lock in write mode.
51  *  - This implementation will return EDEADLK in "write after write" and "read after
52  *    write" cases and will deadlock in write after read case.
53  *
54  * TODO: VERY CAREFULLY convert this to use C++11 atomics when possible. All volatile
55  * members of pthread_rwlock_t should be converted to atomics<> and __sync_bool_compare_and_swap
56  * should be changed to compare_exchange_strong accompanied by the proper ordering
57  * constraints (comments have been added with the intending ordering across the code).
58  *
59  * TODO: As it stands now, pending_readers and pending_writers could be merged into a
60  * a single waiters variable.  Keeping them separate adds a bit of clarity and keeps
61  * the door open for a writer-biased implementation.
62  *
63  */
64 
65 #define RWLOCKATTR_DEFAULT     0
66 #define RWLOCKATTR_SHARED_MASK 0x0010
67 
rwlock_is_shared(const pthread_rwlock_t * rwlock)68 static inline bool rwlock_is_shared(const pthread_rwlock_t* rwlock) {
69   return rwlock->attr == PTHREAD_PROCESS_SHARED;
70 }
71 
timespec_from_absolute(timespec * rel_timeout,const timespec * abs_timeout)72 static bool timespec_from_absolute(timespec* rel_timeout, const timespec* abs_timeout) {
73   if (abs_timeout != NULL) {
74     if (__timespec_from_absolute(rel_timeout, abs_timeout, CLOCK_REALTIME) < 0) {
75       return false;
76     }
77   }
78   return true;
79 }
80 
pthread_rwlockattr_init(pthread_rwlockattr_t * attr)81 int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) {
82   *attr = PTHREAD_PROCESS_PRIVATE;
83   return 0;
84 }
85 
pthread_rwlockattr_destroy(pthread_rwlockattr_t * attr)86 int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr) {
87   *attr = -1;
88   return 0;
89 }
90 
pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr,int pshared)91 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) {
92   switch (pshared) {
93     case PTHREAD_PROCESS_PRIVATE:
94     case PTHREAD_PROCESS_SHARED:
95       *attr = pshared;
96       return 0;
97     default:
98       return EINVAL;
99   }
100 }
101 
pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * attr,int * pshared)102 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
103   *pshared = *attr;
104   return 0;
105 }
106 
pthread_rwlock_init(pthread_rwlock_t * rwlock,const pthread_rwlockattr_t * attr)107 int pthread_rwlock_init(pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* attr) {
108   if (attr != NULL) {
109     switch (*attr) {
110       case PTHREAD_PROCESS_SHARED:
111       case PTHREAD_PROCESS_PRIVATE:
112         rwlock->attr= *attr;
113         break;
114       default:
115         return EINVAL;
116     }
117   }
118 
119   rwlock->state = 0;
120   rwlock->pending_readers = 0;
121   rwlock->pending_writers = 0;
122   rwlock->writer_thread_id = 0;
123 
124   return 0;
125 }
126 
pthread_rwlock_destroy(pthread_rwlock_t * rwlock)127 int pthread_rwlock_destroy(pthread_rwlock_t* rwlock) {
128   if (rwlock->state != 0) {
129     return EBUSY;
130   }
131   return 0;
132 }
133 
__pthread_rwlock_timedrdlock(pthread_rwlock_t * rwlock,const timespec * abs_timeout)134 static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
135   if (__predict_false(__get_thread()->tid == rwlock->writer_thread_id)) {
136     return EDEADLK;
137   }
138 
139   timespec ts;
140   timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
141   bool done = false;
142   do {
143     // This is actually a race read as there's nothing that guarantees the atomicity of integer
144     // reads / writes. However, in practice this "never" happens so until we switch to C++11 this
145     // should work fine. The same applies in the other places this idiom is used.
146     int32_t cur_state = rwlock->state;  // C++11 relaxed atomic read
147     if (__predict_true(cur_state >= 0)) {
148       // Add as an extra reader.
149       done = __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1);  // C++11 memory_order_aquire
150     } else {
151       if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
152         return ETIMEDOUT;
153       }
154       // Owner holds it in write mode, hang up.
155       // To avoid losing wake ups the pending_readers update and the state read should be
156       // sequentially consistent. (currently enforced by __sync_fetch_and_add which creates a full barrier)
157       __sync_fetch_and_add(&rwlock->pending_readers, 1);  // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
158       int ret = __futex_wait_ex(&rwlock->state, rwlock_is_shared(rwlock), cur_state, rel_timeout);
159       __sync_fetch_and_sub(&rwlock->pending_readers, 1);  // C++11 memory_order_relaxed
160       if (ret == -ETIMEDOUT) {
161         return ETIMEDOUT;
162       }
163     }
164   } while (!done);
165 
166   return 0;
167 }
168 
__pthread_rwlock_timedwrlock(pthread_rwlock_t * rwlock,const timespec * abs_timeout)169 static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
170   int tid = __get_thread()->tid;
171   if (__predict_false(tid == rwlock->writer_thread_id)) {
172     return EDEADLK;
173   }
174 
175   timespec ts;
176   timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
177   bool done = false;
178   do {
179     int32_t cur_state = rwlock->state;
180     if (__predict_true(cur_state == 0)) {
181       // Change state from 0 to -1.
182       done =  __sync_bool_compare_and_swap(&rwlock->state, 0 /* cur state */, -1 /* new state */);  // C++11 memory_order_aquire
183     } else {
184       if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
185         return ETIMEDOUT;
186       }
187       // Failed to acquire, hang up.
188       // To avoid losing wake ups the pending_writers update and the state read should be
189       // sequentially consistent. (currently enforced by __sync_fetch_and_add which creates a full barrier)
190       __sync_fetch_and_add(&rwlock->pending_writers, 1);  // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
191       int ret = __futex_wait_ex(&rwlock->state, rwlock_is_shared(rwlock), cur_state, rel_timeout);
192       __sync_fetch_and_sub(&rwlock->pending_writers, 1);  // C++11 memory_order_relaxed
193       if (ret == -ETIMEDOUT) {
194         return ETIMEDOUT;
195       }
196     }
197   } while (!done);
198 
199   rwlock->writer_thread_id = tid;
200   return 0;
201 }
202 
pthread_rwlock_rdlock(pthread_rwlock_t * rwlock)203 int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) {
204   return __pthread_rwlock_timedrdlock(rwlock, NULL);
205 }
206 
pthread_rwlock_timedrdlock(pthread_rwlock_t * rwlock,const timespec * abs_timeout)207 int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
208   return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
209 }
210 
pthread_rwlock_tryrdlock(pthread_rwlock_t * rwlock)211 int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock) {
212   int32_t cur_state = rwlock->state;
213   if ((cur_state >= 0) &&
214       __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1)) {  // C++11 memory_order_acquire
215     return 0;
216   }
217   return EBUSY;
218 }
219 
pthread_rwlock_wrlock(pthread_rwlock_t * rwlock)220 int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) {
221   return __pthread_rwlock_timedwrlock(rwlock, NULL);
222 }
223 
pthread_rwlock_timedwrlock(pthread_rwlock_t * rwlock,const timespec * abs_timeout)224 int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
225   return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
226 }
227 
pthread_rwlock_trywrlock(pthread_rwlock_t * rwlock)228 int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock) {
229   int tid = __get_thread()->tid;
230   int32_t cur_state = rwlock->state;
231   if ((cur_state == 0) &&
232       __sync_bool_compare_and_swap(&rwlock->state, 0 /* cur state */, -1 /* new state */)) {  // C++11 memory_order_acquire
233     rwlock->writer_thread_id = tid;
234     return 0;
235   }
236   return EBUSY;
237 }
238 
239 
pthread_rwlock_unlock(pthread_rwlock_t * rwlock)240 int pthread_rwlock_unlock(pthread_rwlock_t* rwlock) {
241   int tid = __get_thread()->tid;
242   bool done = false;
243   do {
244     int32_t cur_state = rwlock->state;
245     if (cur_state == 0) {
246       return EPERM;
247     }
248     if (cur_state == -1) {
249       if (rwlock->writer_thread_id != tid) {
250         return EPERM;
251       }
252       // We're no longer the owner.
253       rwlock->writer_thread_id = 0;
254       // Change state from -1 to 0.
255       // We use __sync_bool_compare_and_swap to achieve sequential consistency of the state store and
256       // the following pendingX loads. A simple store with memory_order_release semantics
257       // is not enough to guarantee that the pendingX loads are not reordered before the
258       // store (which may lead to a lost wakeup).
259       __sync_bool_compare_and_swap( &rwlock->state, -1 /* cur state*/, 0 /* new state */);  // C++11 maybe memory_order_seq_cst?
260 
261       // Wake any waiters.
262       if (__predict_false(rwlock->pending_readers > 0 || rwlock->pending_writers > 0)) {
263         __futex_wake_ex(&rwlock->state, rwlock_is_shared(rwlock), INT_MAX);
264       }
265       done = true;
266     } else { // cur_state > 0
267       // Reduce state by 1.
268       // See the comment above on why we need __sync_bool_compare_and_swap.
269       done = __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state - 1);  // C++11 maybe memory_order_seq_cst?
270       if (done && (cur_state - 1) == 0) {
271         // There are no more readers, wake any waiters.
272         if (__predict_false(rwlock->pending_readers > 0 || rwlock->pending_writers > 0)) {
273           __futex_wake_ex(&rwlock->state, rwlock_is_shared(rwlock), INT_MAX);
274         }
275       }
276     }
277   } while (!done);
278 
279   return 0;
280 }
281