• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <pthread.h>
30 
31 #include <errno.h>
32 #include <limits.h>
33 #include <sys/atomics.h>
34 #include <unistd.h>
35 
36 #include "bionic_atomic_inline.h"
37 #include "bionic_futex.h"
38 #include "bionic_pthread.h"
39 #include "bionic_tls.h"
40 #include "pthread_internal.h"
41 #include "thread_private.h"
42 
43 extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
44 extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
45 
46 extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
47 extern void _exit_thread(int  retCode);
48 
__futex_wake_ex(volatile void * ftx,int pshared,int val)49 int  __futex_wake_ex(volatile void *ftx, int pshared, int val)
50 {
51     return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
52 }
53 
__futex_wait_ex(volatile void * ftx,int pshared,int val,const struct timespec * timeout)54 int  __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
55 {
56     return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
57 }
58 
59 #define  __likely(cond)    __builtin_expect(!!(cond), 1)
60 #define  __unlikely(cond)  __builtin_expect(!!(cond), 0)
61 
62 void*
__get_stack_base(int * p_stack_size)63 __get_stack_base(int  *p_stack_size)
64 {
65     pthread_internal_t*  thread = __get_thread();
66 
67     *p_stack_size = thread->attr.stack_size;
68     return thread->attr.stack_base;
69 }
70 
71 
72 /* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
73  *         and thread cancelation
74  */
75 
__pthread_cleanup_push(__pthread_cleanup_t * c,__pthread_cleanup_func_t routine,void * arg)76 void __pthread_cleanup_push( __pthread_cleanup_t*      c,
77                              __pthread_cleanup_func_t  routine,
78                              void*                     arg )
79 {
80     pthread_internal_t*  thread = __get_thread();
81 
82     c->__cleanup_routine  = routine;
83     c->__cleanup_arg      = arg;
84     c->__cleanup_prev     = thread->cleanup_stack;
85     thread->cleanup_stack = c;
86 }
87 
__pthread_cleanup_pop(__pthread_cleanup_t * c,int execute)88 void __pthread_cleanup_pop( __pthread_cleanup_t*  c, int  execute )
89 {
90     pthread_internal_t*  thread = __get_thread();
91 
92     thread->cleanup_stack = c->__cleanup_prev;
93     if (execute)
94         c->__cleanup_routine(c->__cleanup_arg);
95 }
96 
pthread_exit(void * retval)97 void pthread_exit(void * retval)
98 {
99     pthread_internal_t*  thread     = __get_thread();
100     void*                stack_base = thread->attr.stack_base;
101     int                  stack_size = thread->attr.stack_size;
102     int                  user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
103     sigset_t mask;
104 
105     // call the cleanup handlers first
106     while (thread->cleanup_stack) {
107         __pthread_cleanup_t*  c = thread->cleanup_stack;
108         thread->cleanup_stack   = c->__cleanup_prev;
109         c->__cleanup_routine(c->__cleanup_arg);
110     }
111 
112     // call the TLS destructors, it is important to do that before removing this
113     // thread from the global list. this will ensure that if someone else deletes
114     // a TLS key, the corresponding value will be set to NULL in this thread's TLS
115     // space (see pthread_key_delete)
116     pthread_key_clean_all();
117 
118     // if the thread is detached, destroy the pthread_internal_t
119     // otherwise, keep it in memory and signal any joiners.
120     pthread_mutex_lock(&gThreadListLock);
121     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
122         _pthread_internal_remove_locked(thread);
123     } else {
124        /* make sure that the thread struct doesn't have stale pointers to a stack that
125         * will be unmapped after the exit call below.
126         */
127         if (!user_stack) {
128             thread->attr.stack_base = NULL;
129             thread->attr.stack_size = 0;
130             thread->tls = NULL;
131         }
132 
133        /* the join_count field is used to store the number of threads waiting for
134         * the termination of this thread with pthread_join(),
135         *
136         * if it is positive we need to signal the waiters, and we do not touch
137         * the count (it will be decremented by the waiters, the last one will
138         * also remove/free the thread structure
139         *
140         * if it is zero, we set the count value to -1 to indicate that the
141         * thread is in 'zombie' state: it has stopped executing, and its stack
142         * is gone (as well as its TLS area). when another thread calls pthread_join()
143         * on it, it will immediately free the thread and return.
144         */
145         thread->return_value = retval;
146         if (thread->join_count > 0) {
147             pthread_cond_broadcast(&thread->join_cond);
148         } else {
149             thread->join_count = -1;  /* zombie thread */
150         }
151     }
152     pthread_mutex_unlock(&gThreadListLock);
153 
154     sigfillset(&mask);
155     sigdelset(&mask, SIGSEGV);
156     (void)sigprocmask(SIG_SETMASK, &mask, (sigset_t *)NULL);
157 
158     // destroy the thread stack
159     if (user_stack)
160         _exit_thread((int)retval);
161     else
162         _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
163 }
164 
165 /* a mutex is implemented as a 32-bit integer holding the following fields
166  *
167  * bits:     name     description
168  * 31-16     tid      owner thread's tid (recursive and errorcheck only)
169  * 15-14     type     mutex type
170  * 13        shared   process-shared flag
171  * 12-2      counter  counter of recursive mutexes
172  * 1-0       state    lock state (0, 1 or 2)
173  */
174 
175 /* Convenience macro, creates a mask of 'bits' bits that starts from
176  * the 'shift'-th least significant bit in a 32-bit word.
177  *
178  * Examples: FIELD_MASK(0,4)  -> 0xf
179  *           FIELD_MASK(16,9) -> 0x1ff0000
180  */
181 #define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
182 
183 /* This one is used to create a bit pattern from a given field value */
184 #define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
185 
186 /* And this one does the opposite, i.e. extract a field's value from a bit pattern */
187 #define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
188 
189 /* Mutex state:
190  *
191  * 0 for unlocked
192  * 1 for locked, no waiters
193  * 2 for locked, maybe waiters
194  */
195 #define  MUTEX_STATE_SHIFT      0
196 #define  MUTEX_STATE_LEN        2
197 
198 #define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
199 #define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
200 #define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
201 
202 #define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
203 #define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
204 #define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
205 
206 #define  MUTEX_STATE_FROM_BITS(v)    FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
207 #define  MUTEX_STATE_TO_BITS(v)      FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
208 
209 #define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
210 #define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
211 #define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
212 
213 /* return true iff the mutex if locked with no waiters */
214 #define  MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
215 
216 /* return true iff the mutex if locked with maybe waiters */
217 #define  MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
218 
219 /* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
220 #define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
221 
222 /* Mutex counter:
223  *
224  * We need to check for overflow before incrementing, and we also need to
225  * detect when the counter is 0
226  */
227 #define  MUTEX_COUNTER_SHIFT         2
228 #define  MUTEX_COUNTER_LEN           11
229 #define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
230 
231 #define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
232 #define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
233 
234 /* Used to increment the counter directly after overflow has been checked */
235 #define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1,MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
236 
237 /* Returns true iff the counter is 0 */
238 #define  MUTEX_COUNTER_BITS_ARE_ZERO(v)  (((v) & MUTEX_COUNTER_MASK) == 0)
239 
240 /* Mutex shared bit flag
241  *
242  * This flag is set to indicate that the mutex is shared among processes.
243  * This changes the futex opcode we use for futex wait/wake operations
244  * (non-shared operations are much faster).
245  */
246 #define  MUTEX_SHARED_SHIFT    13
247 #define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
248 
249 /* Mutex type:
250  *
251  * We support normal, recursive and errorcheck mutexes.
252  *
253  * The constants defined here *cannot* be changed because they must match
254  * the C library ABI which defines the following initialization values in
255  * <pthread.h>:
256  *
257  *   __PTHREAD_MUTEX_INIT_VALUE
258  *   __PTHREAD_RECURSIVE_MUTEX_VALUE
259  *   __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
260  */
261 #define  MUTEX_TYPE_SHIFT      14
262 #define  MUTEX_TYPE_LEN        2
263 #define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
264 
265 #define  MUTEX_TYPE_NORMAL          0  /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
266 #define  MUTEX_TYPE_RECURSIVE       1
267 #define  MUTEX_TYPE_ERRORCHECK      2
268 
269 #define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
270 
271 #define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
272 #define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
273 #define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
274 
275 /* Mutex owner field:
276  *
277  * This is only used for recursive and errorcheck mutexes. It holds the
278  * tid of the owning thread. Note that this works because the Linux
279  * kernel _only_ uses 16-bit values for tids.
280  *
281  * More specifically, it will wrap to 10000 when it reaches over 32768 for
282  * application processes. You can check this by running the following inside
283  * an adb shell session:
284  *
285     OLDPID=$$;
286     while true; do
287     NEWPID=$(sh -c 'echo $$')
288     if [ "$NEWPID" -gt 32768 ]; then
289         echo "AARGH: new PID $NEWPID is too high!"
290         exit 1
291     fi
292     if [ "$NEWPID" -lt "$OLDPID" ]; then
293         echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
294     else
295         echo -n "$NEWPID!"
296     fi
297     OLDPID=$NEWPID
298     done
299 
300  * Note that you can run the same example on a desktop Linux system,
301  * the wrapping will also happen at 32768, but will go back to 300 instead.
302  */
303 #define  MUTEX_OWNER_SHIFT     16
304 #define  MUTEX_OWNER_LEN       16
305 
306 #define  MUTEX_OWNER_FROM_BITS(v)    FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
307 #define  MUTEX_OWNER_TO_BITS(v)      FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
308 
309 /* Convenience macros.
310  *
311  * These are used to form or modify the bit pattern of a given mutex value
312  */
313 
314 
315 
316 /* a mutex attribute holds the following fields
317  *
318  * bits:     name       description
319  * 0-3       type       type of mutex
320  * 4         shared     process-shared flag
321  */
322 #define  MUTEXATTR_TYPE_MASK   0x000f
323 #define  MUTEXATTR_SHARED_MASK 0x0010
324 
325 
pthread_mutexattr_init(pthread_mutexattr_t * attr)326 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
327 {
328     if (attr) {
329         *attr = PTHREAD_MUTEX_DEFAULT;
330         return 0;
331     } else {
332         return EINVAL;
333     }
334 }
335 
pthread_mutexattr_destroy(pthread_mutexattr_t * attr)336 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
337 {
338     if (attr) {
339         *attr = -1;
340         return 0;
341     } else {
342         return EINVAL;
343     }
344 }
345 
pthread_mutexattr_gettype(const pthread_mutexattr_t * attr,int * type)346 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
347 {
348     if (attr) {
349         int  atype = (*attr & MUTEXATTR_TYPE_MASK);
350 
351          if (atype >= PTHREAD_MUTEX_NORMAL &&
352              atype <= PTHREAD_MUTEX_ERRORCHECK) {
353             *type = atype;
354             return 0;
355         }
356     }
357     return EINVAL;
358 }
359 
pthread_mutexattr_settype(pthread_mutexattr_t * attr,int type)360 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
361 {
362     if (attr && type >= PTHREAD_MUTEX_NORMAL &&
363                 type <= PTHREAD_MUTEX_ERRORCHECK ) {
364         *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
365         return 0;
366     }
367     return EINVAL;
368 }
369 
370 /* process-shared mutexes are not supported at the moment */
371 
pthread_mutexattr_setpshared(pthread_mutexattr_t * attr,int pshared)372 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
373 {
374     if (!attr)
375         return EINVAL;
376 
377     switch (pshared) {
378     case PTHREAD_PROCESS_PRIVATE:
379         *attr &= ~MUTEXATTR_SHARED_MASK;
380         return 0;
381 
382     case PTHREAD_PROCESS_SHARED:
383         /* our current implementation of pthread actually supports shared
384          * mutexes but won't cleanup if a process dies with the mutex held.
385          * Nevertheless, it's better than nothing. Shared mutexes are used
386          * by surfaceflinger and audioflinger.
387          */
388         *attr |= MUTEXATTR_SHARED_MASK;
389         return 0;
390     }
391     return EINVAL;
392 }
393 
pthread_mutexattr_getpshared(pthread_mutexattr_t * attr,int * pshared)394 int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
395 {
396     if (!attr || !pshared)
397         return EINVAL;
398 
399     *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
400                                                : PTHREAD_PROCESS_PRIVATE;
401     return 0;
402 }
403 
pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * attr)404 int pthread_mutex_init(pthread_mutex_t *mutex,
405                        const pthread_mutexattr_t *attr)
406 {
407     int value = 0;
408 
409     if (mutex == NULL)
410         return EINVAL;
411 
412     if (__likely(attr == NULL)) {
413         mutex->value = MUTEX_TYPE_BITS_NORMAL;
414         return 0;
415     }
416 
417     if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
418         value |= MUTEX_SHARED_MASK;
419 
420     switch (*attr & MUTEXATTR_TYPE_MASK) {
421     case PTHREAD_MUTEX_NORMAL:
422         value |= MUTEX_TYPE_BITS_NORMAL;
423         break;
424     case PTHREAD_MUTEX_RECURSIVE:
425         value |= MUTEX_TYPE_BITS_RECURSIVE;
426         break;
427     case PTHREAD_MUTEX_ERRORCHECK:
428         value |= MUTEX_TYPE_BITS_ERRORCHECK;
429         break;
430     default:
431         return EINVAL;
432     }
433 
434     mutex->value = value;
435     return 0;
436 }
437 
438 
439 /*
440  * Lock a non-recursive mutex.
441  *
442  * As noted above, there are three states:
443  *   0 (unlocked, no contention)
444  *   1 (locked, no contention)
445  *   2 (locked, contention)
446  *
447  * Non-recursive mutexes don't use the thread-id or counter fields, and the
448  * "type" value is zero, so the only bits that will be set are the ones in
449  * the lock state field.
450  */
451 static __inline__ void
_normal_lock(pthread_mutex_t * mutex,int shared)452 _normal_lock(pthread_mutex_t*  mutex, int shared)
453 {
454     /* convenience shortcuts */
455     const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
456     const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
457     /*
458      * The common case is an unlocked mutex, so we begin by trying to
459      * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
460      * __bionic_cmpxchg() returns 0 if it made the swap successfully.
461      * If the result is nonzero, this lock is already held by another thread.
462      */
463     if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
464         const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
465         /*
466          * We want to go to sleep until the mutex is available, which
467          * requires promoting it to state 2 (CONTENDED). We need to
468          * swap in the new state value and then wait until somebody wakes us up.
469          *
470          * __bionic_swap() returns the previous value.  We swap 2 in and
471          * see if we got zero back; if so, we have acquired the lock.  If
472          * not, another thread still holds the lock and we wait again.
473          *
474          * The second argument to the __futex_wait() call is compared
475          * against the current value.  If it doesn't match, __futex_wait()
476          * returns immediately (otherwise, it sleeps for a time specified
477          * by the third argument; 0 means sleep forever).  This ensures
478          * that the mutex is in state 2 when we go to sleep on it, which
479          * guarantees a wake-up call.
480          */
481         while (__bionic_swap(locked_contended, &mutex->value) != unlocked)
482             __futex_wait_ex(&mutex->value, shared, locked_contended, 0);
483     }
484     ANDROID_MEMBAR_FULL();
485 }
486 
487 /*
488  * Release a non-recursive mutex.  The caller is responsible for determining
489  * that we are in fact the owner of this lock.
490  */
491 static __inline__ void
_normal_unlock(pthread_mutex_t * mutex,int shared)492 _normal_unlock(pthread_mutex_t*  mutex, int shared)
493 {
494     ANDROID_MEMBAR_FULL();
495 
496     /*
497      * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
498      * to release the lock.  __bionic_atomic_dec() returns the previous value;
499      * if it wasn't 1 we have to do some additional work.
500      */
501     if (__bionic_atomic_dec(&mutex->value) != (shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED)) {
502         /*
503          * Start by releasing the lock.  The decrement changed it from
504          * "contended lock" to "uncontended lock", which means we still
505          * hold it, and anybody who tries to sneak in will push it back
506          * to state 2.
507          *
508          * Once we set it to zero the lock is up for grabs.  We follow
509          * this with a __futex_wake() to ensure that one of the waiting
510          * threads has a chance to grab it.
511          *
512          * This doesn't cause a race with the swap/wait pair in
513          * _normal_lock(), because the __futex_wait() call there will
514          * return immediately if the mutex value isn't 2.
515          */
516         mutex->value = shared;
517 
518         /*
519          * Wake up one waiting thread.  We don't know which thread will be
520          * woken or when it'll start executing -- futexes make no guarantees
521          * here.  There may not even be a thread waiting.
522          *
523          * The newly-woken thread will replace the 0 we just set above
524          * with 2, which means that when it eventually releases the mutex
525          * it will also call FUTEX_WAKE.  This results in one extra wake
526          * call whenever a lock is contended, but lets us avoid forgetting
527          * anyone without requiring us to track the number of sleepers.
528          *
529          * It's possible for another thread to sneak in and grab the lock
530          * between the zero assignment above and the wake call below.  If
531          * the new thread is "slow" and holds the lock for a while, we'll
532          * wake up a sleeper, which will swap in a 2 and then go back to
533          * sleep since the lock is still held.  If the new thread is "fast",
534          * running to completion before we call wake, the thread we
535          * eventually wake will find an unlocked mutex and will execute.
536          * Either way we have correct behavior and nobody is orphaned on
537          * the wait queue.
538          */
539         __futex_wake_ex(&mutex->value, shared, 1);
540     }
541 }
542 
543 /* This common inlined function is used to increment the counter of an
544  * errorcheck or recursive mutex.
545  *
546  * For errorcheck mutexes, it will return EDEADLK
547  * If the counter overflows, it will return EAGAIN
548  * Otherwise, it atomically increments the counter and returns 0
549  * after providing an acquire barrier.
550  *
551  * mtype is the current mutex type
552  * mvalue is the current mutex value (already loaded)
553  * mutex pointers to the mutex.
554  */
555 static __inline__ __attribute__((always_inline)) int
_recursive_increment(pthread_mutex_t * mutex,int mvalue,int mtype)556 _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype)
557 {
558     if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
559         /* trying to re-lock a mutex we already acquired */
560         return EDEADLK;
561     }
562 
563     /* Detect recursive lock overflow and return EAGAIN.
564      * This is safe because only the owner thread can modify the
565      * counter bits in the mutex value.
566      */
567     if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
568         return EAGAIN;
569     }
570 
571     /* We own the mutex, but other threads are able to change
572      * the lower bits (e.g. promoting it to "contended"), so we
573      * need to use an atomic cmpxchg loop to update the counter.
574      */
575     for (;;) {
576         /* increment counter, overflow was already checked */
577         int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
578         if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
579             /* mutex is still locked, not need for a memory barrier */
580             return 0;
581         }
582         /* the value was changed, this happens when another thread changes
583          * the lower state bits from 1 to 2 to indicate contention. This
584          * cannot change the counter, so simply reload and try again.
585          */
586         mvalue = mutex->value;
587     }
588 }
589 
590 __LIBC_HIDDEN__
pthread_mutex_lock_impl(pthread_mutex_t * mutex)591 int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
592 {
593     int mvalue, mtype, tid, shared;
594 
595     if (__unlikely(mutex == NULL))
596         return EINVAL;
597 
598     mvalue = mutex->value;
599     mtype = (mvalue & MUTEX_TYPE_MASK);
600     shared = (mvalue & MUTEX_SHARED_MASK);
601 
602     /* Handle normal case first */
603     if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
604         _normal_lock(mutex, shared);
605         return 0;
606     }
607 
608     /* Do we already own this recursive or error-check mutex ? */
609     tid = __get_thread()->tid;
610     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
611         return _recursive_increment(mutex, mvalue, mtype);
612 
613     /* Add in shared state to avoid extra 'or' operations below */
614     mtype |= shared;
615 
616     /* First, if the mutex is unlocked, try to quickly acquire it.
617      * In the optimistic case where this works, set the state to 1 to
618      * indicate locked with no contention */
619     if (mvalue == mtype) {
620         int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
621         if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
622             ANDROID_MEMBAR_FULL();
623             return 0;
624         }
625         /* argh, the value changed, reload before entering the loop */
626         mvalue = mutex->value;
627     }
628 
629     for (;;) {
630         int newval;
631 
632         /* if the mutex is unlocked, its value should be 'mtype' and
633          * we try to acquire it by setting its owner and state atomically.
634          * NOTE: We put the state to 2 since we _know_ there is contention
635          * when we are in this loop. This ensures all waiters will be
636          * unlocked.
637          */
638         if (mvalue == mtype) {
639             newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
640             /* TODO: Change this to __bionic_cmpxchg_acquire when we
641              *        implement it to get rid of the explicit memory
642              *        barrier below.
643              */
644             if (__unlikely(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
645                 mvalue = mutex->value;
646                 continue;
647             }
648             ANDROID_MEMBAR_FULL();
649             return 0;
650         }
651 
652         /* the mutex is already locked by another thread, if its state is 1
653          * we will change it to 2 to indicate contention. */
654         if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
655             newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
656             if (__unlikely(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
657                 mvalue = mutex->value;
658                 continue;
659             }
660             mvalue = newval;
661         }
662 
663         /* wait until the mutex is unlocked */
664         __futex_wait_ex(&mutex->value, shared, mvalue, NULL);
665 
666         mvalue = mutex->value;
667     }
668     /* NOTREACHED */
669 }
670 
pthread_mutex_lock(pthread_mutex_t * mutex)671 int pthread_mutex_lock(pthread_mutex_t *mutex)
672 {
673     int err = pthread_mutex_lock_impl(mutex);
674 #ifdef PTHREAD_DEBUG
675     if (PTHREAD_DEBUG_ENABLED) {
676         if (!err) {
677             pthread_debug_mutex_lock_check(mutex);
678         }
679     }
680 #endif
681     return err;
682 }
683 
684 __LIBC_HIDDEN__
pthread_mutex_unlock_impl(pthread_mutex_t * mutex)685 int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
686 {
687     int mvalue, mtype, tid, shared;
688 
689     if (__unlikely(mutex == NULL))
690         return EINVAL;
691 
692     mvalue = mutex->value;
693     mtype  = (mvalue & MUTEX_TYPE_MASK);
694     shared = (mvalue & MUTEX_SHARED_MASK);
695 
696     /* Handle common case first */
697     if (__likely(mtype == MUTEX_TYPE_BITS_NORMAL)) {
698         _normal_unlock(mutex, shared);
699         return 0;
700     }
701 
702     /* Do we already own this recursive or error-check mutex ? */
703     tid = __get_thread()->tid;
704     if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
705         return EPERM;
706 
707     /* If the counter is > 0, we can simply decrement it atomically.
708      * Since other threads can mutate the lower state bits (and only the
709      * lower state bits), use a cmpxchg to do it.
710      */
711     if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
712         for (;;) {
713             int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
714             if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
715                 /* success: we still own the mutex, so no memory barrier */
716                 return 0;
717             }
718             /* the value changed, so reload and loop */
719             mvalue = mutex->value;
720         }
721     }
722 
723     /* the counter is 0, so we're going to unlock the mutex by resetting
724      * its value to 'unlocked'. We need to perform a swap in order
725      * to read the current state, which will be 2 if there are waiters
726      * to awake.
727      *
728      * TODO: Change this to __bionic_swap_release when we implement it
729      *        to get rid of the explicit memory barrier below.
730      */
731     ANDROID_MEMBAR_FULL();  /* RELEASE BARRIER */
732     mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);
733 
734     /* Wake one waiting thread, if any */
735     if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
736         __futex_wake_ex(&mutex->value, shared, 1);
737     }
738     return 0;
739 }
740 
pthread_mutex_unlock(pthread_mutex_t * mutex)741 int pthread_mutex_unlock(pthread_mutex_t *mutex)
742 {
743 #ifdef PTHREAD_DEBUG
744     if (PTHREAD_DEBUG_ENABLED) {
745         pthread_debug_mutex_unlock_check(mutex);
746     }
747 #endif
748     return pthread_mutex_unlock_impl(mutex);
749 }
750 
751 __LIBC_HIDDEN__
pthread_mutex_trylock_impl(pthread_mutex_t * mutex)752 int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
753 {
754     int mvalue, mtype, tid, shared;
755 
756     if (__unlikely(mutex == NULL))
757         return EINVAL;
758 
759     mvalue = mutex->value;
760     mtype  = (mvalue & MUTEX_TYPE_MASK);
761     shared = (mvalue & MUTEX_SHARED_MASK);
762 
763     /* Handle common case first */
764     if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) )
765     {
766         if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
767                              shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
768                              &mutex->value) == 0) {
769             ANDROID_MEMBAR_FULL();
770             return 0;
771         }
772 
773         return EBUSY;
774     }
775 
776     /* Do we already own this recursive or error-check mutex ? */
777     tid = __get_thread()->tid;
778     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
779         return _recursive_increment(mutex, mvalue, mtype);
780 
781     /* Same as pthread_mutex_lock, except that we don't want to wait, and
782      * the only operation that can succeed is a single cmpxchg to acquire the
783      * lock if it is released / not owned by anyone. No need for a complex loop.
784      */
785     mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
786     mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
787 
788     if (__likely(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
789         ANDROID_MEMBAR_FULL();
790         return 0;
791     }
792 
793     return EBUSY;
794 }
795 
pthread_mutex_trylock(pthread_mutex_t * mutex)796 int pthread_mutex_trylock(pthread_mutex_t *mutex)
797 {
798     int err = pthread_mutex_trylock_impl(mutex);
799 #ifdef PTHREAD_DEBUG
800     if (PTHREAD_DEBUG_ENABLED) {
801         if (!err) {
802             pthread_debug_mutex_lock_check(mutex);
803         }
804     }
805 #endif
806     return err;
807 }
808 
809 /* initialize 'ts' with the difference between 'abstime' and the current time
810  * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
811  */
812 static int
__timespec_to_absolute(struct timespec * ts,const struct timespec * abstime,clockid_t clock)813 __timespec_to_absolute(struct timespec*  ts, const struct timespec*  abstime, clockid_t  clock)
814 {
815     clock_gettime(clock, ts);
816     ts->tv_sec  = abstime->tv_sec - ts->tv_sec;
817     ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
818     if (ts->tv_nsec < 0) {
819         ts->tv_sec--;
820         ts->tv_nsec += 1000000000;
821     }
822     if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
823         return -1;
824 
825     return 0;
826 }
827 
828 /* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
829  * milliseconds.
830  */
831 static void
__timespec_to_relative_msec(struct timespec * abstime,unsigned msecs,clockid_t clock)832 __timespec_to_relative_msec(struct timespec*  abstime, unsigned  msecs, clockid_t  clock)
833 {
834     clock_gettime(clock, abstime);
835     abstime->tv_sec  += msecs/1000;
836     abstime->tv_nsec += (msecs%1000)*1000000;
837     if (abstime->tv_nsec >= 1000000000) {
838         abstime->tv_sec++;
839         abstime->tv_nsec -= 1000000000;
840     }
841 }
842 
843 __LIBC_HIDDEN__
pthread_mutex_lock_timeout_np_impl(pthread_mutex_t * mutex,unsigned msecs)844 int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
845 {
846     clockid_t        clock = CLOCK_MONOTONIC;
847     struct timespec  abstime;
848     struct timespec  ts;
849     int               mvalue, mtype, tid, shared;
850 
851     /* compute absolute expiration time */
852     __timespec_to_relative_msec(&abstime, msecs, clock);
853 
854     if (__unlikely(mutex == NULL))
855         return EINVAL;
856 
857     mvalue = mutex->value;
858     mtype  = (mvalue & MUTEX_TYPE_MASK);
859     shared = (mvalue & MUTEX_SHARED_MASK);
860 
861     /* Handle common case first */
862     if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) )
863     {
864         const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
865         const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
866         const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
867 
868         /* fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0 */
869         if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
870             ANDROID_MEMBAR_FULL();
871             return 0;
872         }
873 
874         /* loop while needed */
875         while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
876             if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
877                 return EBUSY;
878 
879             __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
880         }
881         ANDROID_MEMBAR_FULL();
882         return 0;
883     }
884 
885     /* Do we already own this recursive or error-check mutex ? */
886     tid = __get_thread()->tid;
887     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
888         return _recursive_increment(mutex, mvalue, mtype);
889 
890     /* the following implements the same loop than pthread_mutex_lock_impl
891      * but adds checks to ensure that the operation never exceeds the
892      * absolute expiration time.
893      */
894     mtype |= shared;
895 
896     /* first try a quick lock */
897     if (mvalue == mtype) {
898         mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
899         if (__likely(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
900             ANDROID_MEMBAR_FULL();
901             return 0;
902         }
903         mvalue = mutex->value;
904     }
905 
906     for (;;) {
907         struct timespec ts;
908 
909         /* if the value is 'unlocked', try to acquire it directly */
910         /* NOTE: put state to 2 since we know there is contention */
911         if (mvalue == mtype) /* unlocked */ {
912             mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
913             if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
914                 ANDROID_MEMBAR_FULL();
915                 return 0;
916             }
917             /* the value changed before we could lock it. We need to check
918              * the time to avoid livelocks, reload the value, then loop again. */
919             if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
920                 return EBUSY;
921 
922             mvalue = mutex->value;
923             continue;
924         }
925 
926         /* The value is locked. If 'uncontended', try to switch its state
927          * to 'contented' to ensure we get woken up later. */
928         if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
929             int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
930             if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
931                 /* this failed because the value changed, reload it */
932                 mvalue = mutex->value;
933             } else {
934                 /* this succeeded, update mvalue */
935                 mvalue = newval;
936             }
937         }
938 
939         /* check time and update 'ts' */
940         if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
941             return EBUSY;
942 
943         /* Only wait to be woken up if the state is '2', otherwise we'll
944          * simply loop right now. This can happen when the second cmpxchg
945          * in our loop failed because the mutex was unlocked by another
946          * thread.
947          */
948         if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
949             if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == ETIMEDOUT) {
950                 return EBUSY;
951             }
952             mvalue = mutex->value;
953         }
954     }
955     /* NOTREACHED */
956 }
957 
pthread_mutex_lock_timeout_np(pthread_mutex_t * mutex,unsigned msecs)958 int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
959 {
960     int err = pthread_mutex_lock_timeout_np_impl(mutex, msecs);
961 #ifdef PTHREAD_DEBUG
962     if (PTHREAD_DEBUG_ENABLED) {
963         if (!err) {
964             pthread_debug_mutex_lock_check(mutex);
965         }
966     }
967 #endif
968     return err;
969 }
970 
pthread_mutex_destroy(pthread_mutex_t * mutex)971 int pthread_mutex_destroy(pthread_mutex_t *mutex)
972 {
973     int ret;
974 
975     /* use trylock to ensure that the mutex value is
976      * valid and is not already locked. */
977     ret = pthread_mutex_trylock_impl(mutex);
978     if (ret != 0)
979         return ret;
980 
981     mutex->value = 0xdead10cc;
982     return 0;
983 }
984 
985 
986 
pthread_condattr_init(pthread_condattr_t * attr)987 int pthread_condattr_init(pthread_condattr_t *attr)
988 {
989     if (attr == NULL)
990         return EINVAL;
991 
992     *attr = PTHREAD_PROCESS_PRIVATE;
993     return 0;
994 }
995 
pthread_condattr_getpshared(pthread_condattr_t * attr,int * pshared)996 int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
997 {
998     if (attr == NULL || pshared == NULL)
999         return EINVAL;
1000 
1001     *pshared = *attr;
1002     return 0;
1003 }
1004 
pthread_condattr_setpshared(pthread_condattr_t * attr,int pshared)1005 int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
1006 {
1007     if (attr == NULL)
1008         return EINVAL;
1009 
1010     if (pshared != PTHREAD_PROCESS_SHARED &&
1011         pshared != PTHREAD_PROCESS_PRIVATE)
1012         return EINVAL;
1013 
1014     *attr = pshared;
1015     return 0;
1016 }
1017 
pthread_condattr_destroy(pthread_condattr_t * attr)1018 int pthread_condattr_destroy(pthread_condattr_t *attr)
1019 {
1020     if (attr == NULL)
1021         return EINVAL;
1022 
1023     *attr = 0xdeada11d;
1024     return 0;
1025 }
1026 
1027 /* We use one bit in condition variable values as the 'shared' flag
1028  * The rest is a counter.
1029  */
1030 #define COND_SHARED_MASK        0x0001
1031 #define COND_COUNTER_INCREMENT  0x0002
1032 #define COND_COUNTER_MASK       (~COND_SHARED_MASK)
1033 
1034 #define COND_IS_SHARED(c)  (((c)->value & COND_SHARED_MASK) != 0)
1035 
1036 /* XXX *technically* there is a race condition that could allow
1037  * XXX a signal to be missed.  If thread A is preempted in _wait()
1038  * XXX after unlocking the mutex and before waiting, and if other
1039  * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
1040  * XXX before thread A is scheduled again and calls futex_wait(),
1041  * XXX then the signal will be lost.
1042  */
1043 
pthread_cond_init(pthread_cond_t * cond,const pthread_condattr_t * attr)1044 int pthread_cond_init(pthread_cond_t *cond,
1045                       const pthread_condattr_t *attr)
1046 {
1047     if (cond == NULL)
1048         return EINVAL;
1049 
1050     cond->value = 0;
1051 
1052     if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
1053         cond->value |= COND_SHARED_MASK;
1054 
1055     return 0;
1056 }
1057 
pthread_cond_destroy(pthread_cond_t * cond)1058 int pthread_cond_destroy(pthread_cond_t *cond)
1059 {
1060     if (cond == NULL)
1061         return EINVAL;
1062 
1063     cond->value = 0xdeadc04d;
1064     return 0;
1065 }
1066 
1067 /* This function is used by pthread_cond_broadcast and
1068  * pthread_cond_signal to atomically decrement the counter
1069  * then wake-up 'counter' threads.
1070  */
1071 static int
__pthread_cond_pulse(pthread_cond_t * cond,int counter)1072 __pthread_cond_pulse(pthread_cond_t *cond, int  counter)
1073 {
1074     long flags;
1075 
1076     if (__unlikely(cond == NULL))
1077         return EINVAL;
1078 
1079     flags = (cond->value & ~COND_COUNTER_MASK);
1080     for (;;) {
1081         long oldval = cond->value;
1082         long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
1083                       | flags;
1084         if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0)
1085             break;
1086     }
1087 
1088     /*
1089      * Ensure that all memory accesses previously made by this thread are
1090      * visible to the woken thread(s).  On the other side, the "wait"
1091      * code will issue any necessary barriers when locking the mutex.
1092      *
1093      * This may not strictly be necessary -- if the caller follows
1094      * recommended practice and holds the mutex before signaling the cond
1095      * var, the mutex ops will provide correct semantics.  If they don't
1096      * hold the mutex, they're subject to race conditions anyway.
1097      */
1098     ANDROID_MEMBAR_FULL();
1099 
1100     __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
1101     return 0;
1102 }
1103 
pthread_cond_broadcast(pthread_cond_t * cond)1104 int pthread_cond_broadcast(pthread_cond_t *cond)
1105 {
1106     return __pthread_cond_pulse(cond, INT_MAX);
1107 }
1108 
pthread_cond_signal(pthread_cond_t * cond)1109 int pthread_cond_signal(pthread_cond_t *cond)
1110 {
1111     return __pthread_cond_pulse(cond, 1);
1112 }
1113 
pthread_cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)1114 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
1115 {
1116     return pthread_cond_timedwait(cond, mutex, NULL);
1117 }
1118 
__pthread_cond_timedwait_relative(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1119 int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
1120                                       pthread_mutex_t * mutex,
1121                                       const struct timespec *reltime)
1122 {
1123     int  status;
1124     int  oldvalue = cond->value;
1125 
1126     pthread_mutex_unlock(mutex);
1127     status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
1128     pthread_mutex_lock(mutex);
1129 
1130     if (status == (-ETIMEDOUT)) return ETIMEDOUT;
1131     return 0;
1132 }
1133 
__pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime,clockid_t clock)1134 int __pthread_cond_timedwait(pthread_cond_t *cond,
1135                              pthread_mutex_t * mutex,
1136                              const struct timespec *abstime,
1137                              clockid_t clock)
1138 {
1139     struct timespec ts;
1140     struct timespec * tsp;
1141 
1142     if (abstime != NULL) {
1143         if (__timespec_to_absolute(&ts, abstime, clock) < 0)
1144             return ETIMEDOUT;
1145         tsp = &ts;
1146     } else {
1147         tsp = NULL;
1148     }
1149 
1150     return __pthread_cond_timedwait_relative(cond, mutex, tsp);
1151 }
1152 
pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1153 int pthread_cond_timedwait(pthread_cond_t *cond,
1154                            pthread_mutex_t * mutex,
1155                            const struct timespec *abstime)
1156 {
1157     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
1158 }
1159 
1160 
1161 /* this one exists only for backward binary compatibility */
pthread_cond_timedwait_monotonic(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1162 int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
1163                                      pthread_mutex_t * mutex,
1164                                      const struct timespec *abstime)
1165 {
1166     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1167 }
1168 
pthread_cond_timedwait_monotonic_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1169 int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
1170                                      pthread_mutex_t * mutex,
1171                                      const struct timespec *abstime)
1172 {
1173     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1174 }
1175 
pthread_cond_timedwait_relative_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1176 int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1177                                       pthread_mutex_t * mutex,
1178                                       const struct timespec *reltime)
1179 {
1180     return __pthread_cond_timedwait_relative(cond, mutex, reltime);
1181 }
1182 
pthread_cond_timeout_np(pthread_cond_t * cond,pthread_mutex_t * mutex,unsigned msecs)1183 int pthread_cond_timeout_np(pthread_cond_t *cond,
1184                             pthread_mutex_t * mutex,
1185                             unsigned msecs)
1186 {
1187     struct timespec ts;
1188 
1189     ts.tv_sec = msecs / 1000;
1190     ts.tv_nsec = (msecs % 1000) * 1000000;
1191 
1192     return __pthread_cond_timedwait_relative(cond, mutex, &ts);
1193 }
1194 
1195 
1196 /* NOTE: this implementation doesn't support a init function that throws a C++ exception
1197  *       or calls fork()
1198  */
pthread_once(pthread_once_t * once_control,void (* init_routine)(void))1199 int pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
1200 {
1201     volatile pthread_once_t* ocptr = once_control;
1202 
1203     /* PTHREAD_ONCE_INIT is 0, we use the following bit flags
1204      *
1205      *   bit 0 set  -> initialization is under way
1206      *   bit 1 set  -> initialization is complete
1207      */
1208 #define ONCE_INITIALIZING           (1 << 0)
1209 #define ONCE_COMPLETED              (1 << 1)
1210 
1211     /* First check if the once is already initialized. This will be the common
1212     * case and we want to make this as fast as possible. Note that this still
1213     * requires a load_acquire operation here to ensure that all the
1214     * stores performed by the initialization function are observable on
1215     * this CPU after we exit.
1216     */
1217     if (__likely((*ocptr & ONCE_COMPLETED) != 0)) {
1218         ANDROID_MEMBAR_FULL();
1219         return 0;
1220     }
1221 
1222     for (;;) {
1223         /* Try to atomically set the INITIALIZING flag.
1224          * This requires a cmpxchg loop, and we may need
1225          * to exit prematurely if we detect that
1226          * COMPLETED is now set.
1227          */
1228         int32_t  oldval, newval;
1229 
1230         do {
1231             oldval = *ocptr;
1232             if ((oldval & ONCE_COMPLETED) != 0)
1233                 break;
1234 
1235             newval = oldval | ONCE_INITIALIZING;
1236         } while (__bionic_cmpxchg(oldval, newval, ocptr) != 0);
1237 
1238         if ((oldval & ONCE_COMPLETED) != 0) {
1239             /* We detected that COMPLETED was set while in our loop */
1240             ANDROID_MEMBAR_FULL();
1241             return 0;
1242         }
1243 
1244         if ((oldval & ONCE_INITIALIZING) == 0) {
1245             /* We got there first, we can jump out of the loop to
1246              * handle the initialization */
1247             break;
1248         }
1249 
1250         /* Another thread is running the initialization and hasn't completed
1251          * yet, so wait for it, then try again. */
1252         __futex_wait_ex(ocptr, 0, oldval, NULL);
1253     }
1254 
1255     /* call the initialization function. */
1256     (*init_routine)();
1257 
1258     /* Do a store_release indicating that initialization is complete */
1259     ANDROID_MEMBAR_FULL();
1260     *ocptr = ONCE_COMPLETED;
1261 
1262     /* Wake up any waiters, if any */
1263     __futex_wake_ex(ocptr, 0, INT_MAX);
1264 
1265     return 0;
1266 }
1267 
__pthread_gettid(pthread_t thid)1268 pid_t __pthread_gettid(pthread_t thid) {
1269   pthread_internal_t* thread = (pthread_internal_t*) thid;
1270   return thread->tid;
1271 }
1272 
__pthread_settid(pthread_t thid,pid_t tid)1273 int __pthread_settid(pthread_t thid, pid_t tid) {
1274   if (thid == 0) {
1275       return EINVAL;
1276   }
1277 
1278   pthread_internal_t* thread = (pthread_internal_t*) thid;
1279   thread->tid = tid;
1280 
1281   return 0;
1282 }
1283