• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <pthread.h>
30 
31 #include <errno.h>
32 #include <limits.h>
33 #include <sys/atomics.h>
34 #include <sys/mman.h>
35 #include <unistd.h>
36 
37 #include "bionic_atomic_inline.h"
38 #include "bionic_futex.h"
39 #include "bionic_pthread.h"
40 #include "bionic_tls.h"
41 #include "pthread_internal.h"
42 #include "thread_private.h"
43 
44 extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
45 extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
46 
47 extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
48 extern void _exit_thread(int  retCode);
49 
__futex_wake_ex(volatile void * ftx,int pshared,int val)50 int  __futex_wake_ex(volatile void *ftx, int pshared, int val)
51 {
52     return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
53 }
54 
__futex_wait_ex(volatile void * ftx,int pshared,int val,const struct timespec * timeout)55 int  __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
56 {
57     return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
58 }
59 
60 /* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
61  *         and thread cancelation
62  */
63 
__pthread_cleanup_push(__pthread_cleanup_t * c,__pthread_cleanup_func_t routine,void * arg)64 void __pthread_cleanup_push( __pthread_cleanup_t*      c,
65                              __pthread_cleanup_func_t  routine,
66                              void*                     arg )
67 {
68     pthread_internal_t*  thread = __get_thread();
69 
70     c->__cleanup_routine  = routine;
71     c->__cleanup_arg      = arg;
72     c->__cleanup_prev     = thread->cleanup_stack;
73     thread->cleanup_stack = c;
74 }
75 
__pthread_cleanup_pop(__pthread_cleanup_t * c,int execute)76 void __pthread_cleanup_pop( __pthread_cleanup_t*  c, int  execute )
77 {
78     pthread_internal_t*  thread = __get_thread();
79 
80     thread->cleanup_stack = c->__cleanup_prev;
81     if (execute)
82         c->__cleanup_routine(c->__cleanup_arg);
83 }
84 
pthread_exit(void * retval)85 void pthread_exit(void * retval)
86 {
87     pthread_internal_t*  thread     = __get_thread();
88     void*                stack_base = thread->attr.stack_base;
89     int                  stack_size = thread->attr.stack_size;
90     int                  user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
91     sigset_t mask;
92 
93     // call the cleanup handlers first
94     while (thread->cleanup_stack) {
95         __pthread_cleanup_t*  c = thread->cleanup_stack;
96         thread->cleanup_stack   = c->__cleanup_prev;
97         c->__cleanup_routine(c->__cleanup_arg);
98     }
99 
100     // call the TLS destructors, it is important to do that before removing this
101     // thread from the global list. this will ensure that if someone else deletes
102     // a TLS key, the corresponding value will be set to NULL in this thread's TLS
103     // space (see pthread_key_delete)
104     pthread_key_clean_all();
105 
106     if (thread->alternate_signal_stack != NULL) {
107       // Tell the kernel to stop using the alternate signal stack.
108       stack_t ss;
109       ss.ss_sp = NULL;
110       ss.ss_flags = SS_DISABLE;
111       sigaltstack(&ss, NULL);
112 
113       // Free it.
114       munmap(thread->alternate_signal_stack, SIGSTKSZ);
115       thread->alternate_signal_stack = NULL;
116     }
117 
118     // if the thread is detached, destroy the pthread_internal_t
119     // otherwise, keep it in memory and signal any joiners.
120     pthread_mutex_lock(&gThreadListLock);
121     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
122         _pthread_internal_remove_locked(thread);
123     } else {
124        /* make sure that the thread struct doesn't have stale pointers to a stack that
125         * will be unmapped after the exit call below.
126         */
127         if (!user_stack) {
128             thread->attr.stack_base = NULL;
129             thread->attr.stack_size = 0;
130             thread->tls = NULL;
131         }
132 
133        /* Indicate that the thread has exited for joining threads. */
134         thread->attr.flags |= PTHREAD_ATTR_FLAG_ZOMBIE;
135         thread->return_value = retval;
136 
137        /* Signal the joining thread if present. */
138         if (thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) {
139             pthread_cond_signal(&thread->join_cond);
140         }
141     }
142     pthread_mutex_unlock(&gThreadListLock);
143 
144     sigfillset(&mask);
145     sigdelset(&mask, SIGSEGV);
146     (void)sigprocmask(SIG_SETMASK, &mask, (sigset_t *)NULL);
147 
148     // destroy the thread stack
149     if (user_stack)
150         _exit_thread((int)retval);
151     else
152         _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
153 }
154 
155 /* a mutex is implemented as a 32-bit integer holding the following fields
156  *
157  * bits:     name     description
158  * 31-16     tid      owner thread's tid (recursive and errorcheck only)
159  * 15-14     type     mutex type
160  * 13        shared   process-shared flag
161  * 12-2      counter  counter of recursive mutexes
162  * 1-0       state    lock state (0, 1 or 2)
163  */
164 
165 /* Convenience macro, creates a mask of 'bits' bits that starts from
166  * the 'shift'-th least significant bit in a 32-bit word.
167  *
168  * Examples: FIELD_MASK(0,4)  -> 0xf
169  *           FIELD_MASK(16,9) -> 0x1ff0000
170  */
171 #define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
172 
173 /* This one is used to create a bit pattern from a given field value */
174 #define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
175 
176 /* And this one does the opposite, i.e. extract a field's value from a bit pattern */
177 #define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
178 
179 /* Mutex state:
180  *
181  * 0 for unlocked
182  * 1 for locked, no waiters
183  * 2 for locked, maybe waiters
184  */
185 #define  MUTEX_STATE_SHIFT      0
186 #define  MUTEX_STATE_LEN        2
187 
188 #define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
189 #define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
190 #define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
191 
192 #define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
193 #define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
194 #define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
195 
196 #define  MUTEX_STATE_FROM_BITS(v)    FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
197 #define  MUTEX_STATE_TO_BITS(v)      FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
198 
199 #define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
200 #define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
201 #define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
202 
203 /* return true iff the mutex if locked with no waiters */
204 #define  MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
205 
206 /* return true iff the mutex if locked with maybe waiters */
207 #define  MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
208 
209 /* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
210 #define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
211 
212 /* Mutex counter:
213  *
214  * We need to check for overflow before incrementing, and we also need to
215  * detect when the counter is 0
216  */
217 #define  MUTEX_COUNTER_SHIFT         2
218 #define  MUTEX_COUNTER_LEN           11
219 #define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
220 
221 #define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
222 #define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
223 
224 /* Used to increment the counter directly after overflow has been checked */
225 #define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1,MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
226 
227 /* Returns true iff the counter is 0 */
228 #define  MUTEX_COUNTER_BITS_ARE_ZERO(v)  (((v) & MUTEX_COUNTER_MASK) == 0)
229 
230 /* Mutex shared bit flag
231  *
232  * This flag is set to indicate that the mutex is shared among processes.
233  * This changes the futex opcode we use for futex wait/wake operations
234  * (non-shared operations are much faster).
235  */
236 #define  MUTEX_SHARED_SHIFT    13
237 #define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
238 
239 /* Mutex type:
240  *
241  * We support normal, recursive and errorcheck mutexes.
242  *
243  * The constants defined here *cannot* be changed because they must match
244  * the C library ABI which defines the following initialization values in
245  * <pthread.h>:
246  *
247  *   __PTHREAD_MUTEX_INIT_VALUE
248  *   __PTHREAD_RECURSIVE_MUTEX_VALUE
249  *   __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
250  */
251 #define  MUTEX_TYPE_SHIFT      14
252 #define  MUTEX_TYPE_LEN        2
253 #define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
254 
255 #define  MUTEX_TYPE_NORMAL          0  /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
256 #define  MUTEX_TYPE_RECURSIVE       1
257 #define  MUTEX_TYPE_ERRORCHECK      2
258 
259 #define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
260 
261 #define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
262 #define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
263 #define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
264 
265 /* Mutex owner field:
266  *
267  * This is only used for recursive and errorcheck mutexes. It holds the
268  * tid of the owning thread. Note that this works because the Linux
269  * kernel _only_ uses 16-bit values for tids.
270  *
271  * More specifically, it will wrap to 10000 when it reaches over 32768 for
272  * application processes. You can check this by running the following inside
273  * an adb shell session:
274  *
275     OLDPID=$$;
276     while true; do
277     NEWPID=$(sh -c 'echo $$')
278     if [ "$NEWPID" -gt 32768 ]; then
279         echo "AARGH: new PID $NEWPID is too high!"
280         exit 1
281     fi
282     if [ "$NEWPID" -lt "$OLDPID" ]; then
283         echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
284     else
285         echo -n "$NEWPID!"
286     fi
287     OLDPID=$NEWPID
288     done
289 
290  * Note that you can run the same example on a desktop Linux system,
291  * the wrapping will also happen at 32768, but will go back to 300 instead.
292  */
293 #define  MUTEX_OWNER_SHIFT     16
294 #define  MUTEX_OWNER_LEN       16
295 
296 #define  MUTEX_OWNER_FROM_BITS(v)    FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
297 #define  MUTEX_OWNER_TO_BITS(v)      FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
298 
299 /* Convenience macros.
300  *
301  * These are used to form or modify the bit pattern of a given mutex value
302  */
303 
304 
305 
306 /* a mutex attribute holds the following fields
307  *
308  * bits:     name       description
309  * 0-3       type       type of mutex
310  * 4         shared     process-shared flag
311  */
312 #define  MUTEXATTR_TYPE_MASK   0x000f
313 #define  MUTEXATTR_SHARED_MASK 0x0010
314 
315 
pthread_mutexattr_init(pthread_mutexattr_t * attr)316 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
317 {
318     if (attr) {
319         *attr = PTHREAD_MUTEX_DEFAULT;
320         return 0;
321     } else {
322         return EINVAL;
323     }
324 }
325 
pthread_mutexattr_destroy(pthread_mutexattr_t * attr)326 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
327 {
328     if (attr) {
329         *attr = -1;
330         return 0;
331     } else {
332         return EINVAL;
333     }
334 }
335 
pthread_mutexattr_gettype(const pthread_mutexattr_t * attr,int * type)336 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
337 {
338     if (attr) {
339         int  atype = (*attr & MUTEXATTR_TYPE_MASK);
340 
341          if (atype >= PTHREAD_MUTEX_NORMAL &&
342              atype <= PTHREAD_MUTEX_ERRORCHECK) {
343             *type = atype;
344             return 0;
345         }
346     }
347     return EINVAL;
348 }
349 
pthread_mutexattr_settype(pthread_mutexattr_t * attr,int type)350 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
351 {
352     if (attr && type >= PTHREAD_MUTEX_NORMAL &&
353                 type <= PTHREAD_MUTEX_ERRORCHECK ) {
354         *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
355         return 0;
356     }
357     return EINVAL;
358 }
359 
360 /* process-shared mutexes are not supported at the moment */
361 
pthread_mutexattr_setpshared(pthread_mutexattr_t * attr,int pshared)362 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
363 {
364     if (!attr)
365         return EINVAL;
366 
367     switch (pshared) {
368     case PTHREAD_PROCESS_PRIVATE:
369         *attr &= ~MUTEXATTR_SHARED_MASK;
370         return 0;
371 
372     case PTHREAD_PROCESS_SHARED:
373         /* our current implementation of pthread actually supports shared
374          * mutexes but won't cleanup if a process dies with the mutex held.
375          * Nevertheless, it's better than nothing. Shared mutexes are used
376          * by surfaceflinger and audioflinger.
377          */
378         *attr |= MUTEXATTR_SHARED_MASK;
379         return 0;
380     }
381     return EINVAL;
382 }
383 
pthread_mutexattr_getpshared(pthread_mutexattr_t * attr,int * pshared)384 int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
385 {
386     if (!attr || !pshared)
387         return EINVAL;
388 
389     *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
390                                                : PTHREAD_PROCESS_PRIVATE;
391     return 0;
392 }
393 
pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * attr)394 int pthread_mutex_init(pthread_mutex_t *mutex,
395                        const pthread_mutexattr_t *attr)
396 {
397     int value = 0;
398 
399     if (mutex == NULL)
400         return EINVAL;
401 
402     if (__predict_true(attr == NULL)) {
403         mutex->value = MUTEX_TYPE_BITS_NORMAL;
404         return 0;
405     }
406 
407     if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
408         value |= MUTEX_SHARED_MASK;
409 
410     switch (*attr & MUTEXATTR_TYPE_MASK) {
411     case PTHREAD_MUTEX_NORMAL:
412         value |= MUTEX_TYPE_BITS_NORMAL;
413         break;
414     case PTHREAD_MUTEX_RECURSIVE:
415         value |= MUTEX_TYPE_BITS_RECURSIVE;
416         break;
417     case PTHREAD_MUTEX_ERRORCHECK:
418         value |= MUTEX_TYPE_BITS_ERRORCHECK;
419         break;
420     default:
421         return EINVAL;
422     }
423 
424     mutex->value = value;
425     return 0;
426 }
427 
428 
429 /*
430  * Lock a non-recursive mutex.
431  *
432  * As noted above, there are three states:
433  *   0 (unlocked, no contention)
434  *   1 (locked, no contention)
435  *   2 (locked, contention)
436  *
437  * Non-recursive mutexes don't use the thread-id or counter fields, and the
438  * "type" value is zero, so the only bits that will be set are the ones in
439  * the lock state field.
440  */
441 static __inline__ void
_normal_lock(pthread_mutex_t * mutex,int shared)442 _normal_lock(pthread_mutex_t*  mutex, int shared)
443 {
444     /* convenience shortcuts */
445     const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
446     const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
447     /*
448      * The common case is an unlocked mutex, so we begin by trying to
449      * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
450      * __bionic_cmpxchg() returns 0 if it made the swap successfully.
451      * If the result is nonzero, this lock is already held by another thread.
452      */
453     if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
454         const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
455         /*
456          * We want to go to sleep until the mutex is available, which
457          * requires promoting it to state 2 (CONTENDED). We need to
458          * swap in the new state value and then wait until somebody wakes us up.
459          *
460          * __bionic_swap() returns the previous value.  We swap 2 in and
461          * see if we got zero back; if so, we have acquired the lock.  If
462          * not, another thread still holds the lock and we wait again.
463          *
464          * The second argument to the __futex_wait() call is compared
465          * against the current value.  If it doesn't match, __futex_wait()
466          * returns immediately (otherwise, it sleeps for a time specified
467          * by the third argument; 0 means sleep forever).  This ensures
468          * that the mutex is in state 2 when we go to sleep on it, which
469          * guarantees a wake-up call.
470          */
471         while (__bionic_swap(locked_contended, &mutex->value) != unlocked)
472             __futex_wait_ex(&mutex->value, shared, locked_contended, 0);
473     }
474     ANDROID_MEMBAR_FULL();
475 }
476 
477 /*
478  * Release a non-recursive mutex.  The caller is responsible for determining
479  * that we are in fact the owner of this lock.
480  */
481 static __inline__ void
_normal_unlock(pthread_mutex_t * mutex,int shared)482 _normal_unlock(pthread_mutex_t*  mutex, int shared)
483 {
484     ANDROID_MEMBAR_FULL();
485 
486     /*
487      * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
488      * to release the lock.  __bionic_atomic_dec() returns the previous value;
489      * if it wasn't 1 we have to do some additional work.
490      */
491     if (__bionic_atomic_dec(&mutex->value) != (shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED)) {
492         /*
493          * Start by releasing the lock.  The decrement changed it from
494          * "contended lock" to "uncontended lock", which means we still
495          * hold it, and anybody who tries to sneak in will push it back
496          * to state 2.
497          *
498          * Once we set it to zero the lock is up for grabs.  We follow
499          * this with a __futex_wake() to ensure that one of the waiting
500          * threads has a chance to grab it.
501          *
502          * This doesn't cause a race with the swap/wait pair in
503          * _normal_lock(), because the __futex_wait() call there will
504          * return immediately if the mutex value isn't 2.
505          */
506         mutex->value = shared;
507 
508         /*
509          * Wake up one waiting thread.  We don't know which thread will be
510          * woken or when it'll start executing -- futexes make no guarantees
511          * here.  There may not even be a thread waiting.
512          *
513          * The newly-woken thread will replace the 0 we just set above
514          * with 2, which means that when it eventually releases the mutex
515          * it will also call FUTEX_WAKE.  This results in one extra wake
516          * call whenever a lock is contended, but lets us avoid forgetting
517          * anyone without requiring us to track the number of sleepers.
518          *
519          * It's possible for another thread to sneak in and grab the lock
520          * between the zero assignment above and the wake call below.  If
521          * the new thread is "slow" and holds the lock for a while, we'll
522          * wake up a sleeper, which will swap in a 2 and then go back to
523          * sleep since the lock is still held.  If the new thread is "fast",
524          * running to completion before we call wake, the thread we
525          * eventually wake will find an unlocked mutex and will execute.
526          * Either way we have correct behavior and nobody is orphaned on
527          * the wait queue.
528          */
529         __futex_wake_ex(&mutex->value, shared, 1);
530     }
531 }
532 
533 /* This common inlined function is used to increment the counter of an
534  * errorcheck or recursive mutex.
535  *
536  * For errorcheck mutexes, it will return EDEADLK
537  * If the counter overflows, it will return EAGAIN
538  * Otherwise, it atomically increments the counter and returns 0
539  * after providing an acquire barrier.
540  *
541  * mtype is the current mutex type
542  * mvalue is the current mutex value (already loaded)
543  * mutex pointers to the mutex.
544  */
545 static __inline__ __attribute__((always_inline)) int
_recursive_increment(pthread_mutex_t * mutex,int mvalue,int mtype)546 _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype)
547 {
548     if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
549         /* trying to re-lock a mutex we already acquired */
550         return EDEADLK;
551     }
552 
553     /* Detect recursive lock overflow and return EAGAIN.
554      * This is safe because only the owner thread can modify the
555      * counter bits in the mutex value.
556      */
557     if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
558         return EAGAIN;
559     }
560 
561     /* We own the mutex, but other threads are able to change
562      * the lower bits (e.g. promoting it to "contended"), so we
563      * need to use an atomic cmpxchg loop to update the counter.
564      */
565     for (;;) {
566         /* increment counter, overflow was already checked */
567         int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
568         if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
569             /* mutex is still locked, not need for a memory barrier */
570             return 0;
571         }
572         /* the value was changed, this happens when another thread changes
573          * the lower state bits from 1 to 2 to indicate contention. This
574          * cannot change the counter, so simply reload and try again.
575          */
576         mvalue = mutex->value;
577     }
578 }
579 
580 __LIBC_HIDDEN__
pthread_mutex_lock_impl(pthread_mutex_t * mutex)581 int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
582 {
583     int mvalue, mtype, tid, shared;
584 
585     if (__predict_false(mutex == NULL))
586         return EINVAL;
587 
588     mvalue = mutex->value;
589     mtype = (mvalue & MUTEX_TYPE_MASK);
590     shared = (mvalue & MUTEX_SHARED_MASK);
591 
592     /* Handle normal case first */
593     if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
594         _normal_lock(mutex, shared);
595         return 0;
596     }
597 
598     /* Do we already own this recursive or error-check mutex ? */
599     tid = __get_thread()->tid;
600     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
601         return _recursive_increment(mutex, mvalue, mtype);
602 
603     /* Add in shared state to avoid extra 'or' operations below */
604     mtype |= shared;
605 
606     /* First, if the mutex is unlocked, try to quickly acquire it.
607      * In the optimistic case where this works, set the state to 1 to
608      * indicate locked with no contention */
609     if (mvalue == mtype) {
610         int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
611         if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
612             ANDROID_MEMBAR_FULL();
613             return 0;
614         }
615         /* argh, the value changed, reload before entering the loop */
616         mvalue = mutex->value;
617     }
618 
619     for (;;) {
620         int newval;
621 
622         /* if the mutex is unlocked, its value should be 'mtype' and
623          * we try to acquire it by setting its owner and state atomically.
624          * NOTE: We put the state to 2 since we _know_ there is contention
625          * when we are in this loop. This ensures all waiters will be
626          * unlocked.
627          */
628         if (mvalue == mtype) {
629             newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
630             /* TODO: Change this to __bionic_cmpxchg_acquire when we
631              *        implement it to get rid of the explicit memory
632              *        barrier below.
633              */
634             if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
635                 mvalue = mutex->value;
636                 continue;
637             }
638             ANDROID_MEMBAR_FULL();
639             return 0;
640         }
641 
642         /* the mutex is already locked by another thread, if its state is 1
643          * we will change it to 2 to indicate contention. */
644         if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
645             newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
646             if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
647                 mvalue = mutex->value;
648                 continue;
649             }
650             mvalue = newval;
651         }
652 
653         /* wait until the mutex is unlocked */
654         __futex_wait_ex(&mutex->value, shared, mvalue, NULL);
655 
656         mvalue = mutex->value;
657     }
658     /* NOTREACHED */
659 }
660 
pthread_mutex_lock(pthread_mutex_t * mutex)661 int pthread_mutex_lock(pthread_mutex_t *mutex)
662 {
663     int err = pthread_mutex_lock_impl(mutex);
664 #ifdef PTHREAD_DEBUG
665     if (PTHREAD_DEBUG_ENABLED) {
666         if (!err) {
667             pthread_debug_mutex_lock_check(mutex);
668         }
669     }
670 #endif
671     return err;
672 }
673 
674 __LIBC_HIDDEN__
pthread_mutex_unlock_impl(pthread_mutex_t * mutex)675 int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
676 {
677     int mvalue, mtype, tid, shared;
678 
679     if (__predict_false(mutex == NULL))
680         return EINVAL;
681 
682     mvalue = mutex->value;
683     mtype  = (mvalue & MUTEX_TYPE_MASK);
684     shared = (mvalue & MUTEX_SHARED_MASK);
685 
686     /* Handle common case first */
687     if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
688         _normal_unlock(mutex, shared);
689         return 0;
690     }
691 
692     /* Do we already own this recursive or error-check mutex ? */
693     tid = __get_thread()->tid;
694     if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
695         return EPERM;
696 
697     /* If the counter is > 0, we can simply decrement it atomically.
698      * Since other threads can mutate the lower state bits (and only the
699      * lower state bits), use a cmpxchg to do it.
700      */
701     if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
702         for (;;) {
703             int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
704             if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
705                 /* success: we still own the mutex, so no memory barrier */
706                 return 0;
707             }
708             /* the value changed, so reload and loop */
709             mvalue = mutex->value;
710         }
711     }
712 
713     /* the counter is 0, so we're going to unlock the mutex by resetting
714      * its value to 'unlocked'. We need to perform a swap in order
715      * to read the current state, which will be 2 if there are waiters
716      * to awake.
717      *
718      * TODO: Change this to __bionic_swap_release when we implement it
719      *        to get rid of the explicit memory barrier below.
720      */
721     ANDROID_MEMBAR_FULL();  /* RELEASE BARRIER */
722     mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);
723 
724     /* Wake one waiting thread, if any */
725     if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
726         __futex_wake_ex(&mutex->value, shared, 1);
727     }
728     return 0;
729 }
730 
pthread_mutex_unlock(pthread_mutex_t * mutex)731 int pthread_mutex_unlock(pthread_mutex_t *mutex)
732 {
733 #ifdef PTHREAD_DEBUG
734     if (PTHREAD_DEBUG_ENABLED) {
735         pthread_debug_mutex_unlock_check(mutex);
736     }
737 #endif
738     return pthread_mutex_unlock_impl(mutex);
739 }
740 
741 __LIBC_HIDDEN__
pthread_mutex_trylock_impl(pthread_mutex_t * mutex)742 int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
743 {
744     int mvalue, mtype, tid, shared;
745 
746     if (__predict_false(mutex == NULL))
747         return EINVAL;
748 
749     mvalue = mutex->value;
750     mtype  = (mvalue & MUTEX_TYPE_MASK);
751     shared = (mvalue & MUTEX_SHARED_MASK);
752 
753     /* Handle common case first */
754     if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
755     {
756         if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
757                              shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
758                              &mutex->value) == 0) {
759             ANDROID_MEMBAR_FULL();
760             return 0;
761         }
762 
763         return EBUSY;
764     }
765 
766     /* Do we already own this recursive or error-check mutex ? */
767     tid = __get_thread()->tid;
768     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
769         return _recursive_increment(mutex, mvalue, mtype);
770 
771     /* Same as pthread_mutex_lock, except that we don't want to wait, and
772      * the only operation that can succeed is a single cmpxchg to acquire the
773      * lock if it is released / not owned by anyone. No need for a complex loop.
774      */
775     mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
776     mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
777 
778     if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
779         ANDROID_MEMBAR_FULL();
780         return 0;
781     }
782 
783     return EBUSY;
784 }
785 
pthread_mutex_trylock(pthread_mutex_t * mutex)786 int pthread_mutex_trylock(pthread_mutex_t *mutex)
787 {
788     int err = pthread_mutex_trylock_impl(mutex);
789 #ifdef PTHREAD_DEBUG
790     if (PTHREAD_DEBUG_ENABLED) {
791         if (!err) {
792             pthread_debug_mutex_lock_check(mutex);
793         }
794     }
795 #endif
796     return err;
797 }
798 
799 /* initialize 'ts' with the difference between 'abstime' and the current time
800  * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
801  */
802 static int
__timespec_to_absolute(struct timespec * ts,const struct timespec * abstime,clockid_t clock)803 __timespec_to_absolute(struct timespec*  ts, const struct timespec*  abstime, clockid_t  clock)
804 {
805     clock_gettime(clock, ts);
806     ts->tv_sec  = abstime->tv_sec - ts->tv_sec;
807     ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
808     if (ts->tv_nsec < 0) {
809         ts->tv_sec--;
810         ts->tv_nsec += 1000000000;
811     }
812     if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
813         return -1;
814 
815     return 0;
816 }
817 
818 /* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
819  * milliseconds.
820  */
821 static void
__timespec_to_relative_msec(struct timespec * abstime,unsigned msecs,clockid_t clock)822 __timespec_to_relative_msec(struct timespec*  abstime, unsigned  msecs, clockid_t  clock)
823 {
824     clock_gettime(clock, abstime);
825     abstime->tv_sec  += msecs/1000;
826     abstime->tv_nsec += (msecs%1000)*1000000;
827     if (abstime->tv_nsec >= 1000000000) {
828         abstime->tv_sec++;
829         abstime->tv_nsec -= 1000000000;
830     }
831 }
832 
833 __LIBC_HIDDEN__
pthread_mutex_lock_timeout_np_impl(pthread_mutex_t * mutex,unsigned msecs)834 int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
835 {
836     clockid_t        clock = CLOCK_MONOTONIC;
837     struct timespec  abstime;
838     struct timespec  ts;
839     int               mvalue, mtype, tid, shared;
840 
841     /* compute absolute expiration time */
842     __timespec_to_relative_msec(&abstime, msecs, clock);
843 
844     if (__predict_false(mutex == NULL))
845         return EINVAL;
846 
847     mvalue = mutex->value;
848     mtype  = (mvalue & MUTEX_TYPE_MASK);
849     shared = (mvalue & MUTEX_SHARED_MASK);
850 
851     /* Handle common case first */
852     if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
853     {
854         const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
855         const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
856         const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
857 
858         /* fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0 */
859         if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
860             ANDROID_MEMBAR_FULL();
861             return 0;
862         }
863 
864         /* loop while needed */
865         while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
866             if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
867                 return EBUSY;
868 
869             __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
870         }
871         ANDROID_MEMBAR_FULL();
872         return 0;
873     }
874 
875     /* Do we already own this recursive or error-check mutex ? */
876     tid = __get_thread()->tid;
877     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
878         return _recursive_increment(mutex, mvalue, mtype);
879 
880     /* the following implements the same loop than pthread_mutex_lock_impl
881      * but adds checks to ensure that the operation never exceeds the
882      * absolute expiration time.
883      */
884     mtype |= shared;
885 
886     /* first try a quick lock */
887     if (mvalue == mtype) {
888         mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
889         if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
890             ANDROID_MEMBAR_FULL();
891             return 0;
892         }
893         mvalue = mutex->value;
894     }
895 
896     for (;;) {
897         struct timespec ts;
898 
899         /* if the value is 'unlocked', try to acquire it directly */
900         /* NOTE: put state to 2 since we know there is contention */
901         if (mvalue == mtype) /* unlocked */ {
902             mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
903             if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
904                 ANDROID_MEMBAR_FULL();
905                 return 0;
906             }
907             /* the value changed before we could lock it. We need to check
908              * the time to avoid livelocks, reload the value, then loop again. */
909             if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
910                 return EBUSY;
911 
912             mvalue = mutex->value;
913             continue;
914         }
915 
916         /* The value is locked. If 'uncontended', try to switch its state
917          * to 'contented' to ensure we get woken up later. */
918         if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
919             int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
920             if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
921                 /* this failed because the value changed, reload it */
922                 mvalue = mutex->value;
923             } else {
924                 /* this succeeded, update mvalue */
925                 mvalue = newval;
926             }
927         }
928 
929         /* check time and update 'ts' */
930         if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
931             return EBUSY;
932 
933         /* Only wait to be woken up if the state is '2', otherwise we'll
934          * simply loop right now. This can happen when the second cmpxchg
935          * in our loop failed because the mutex was unlocked by another
936          * thread.
937          */
938         if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
939             if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == ETIMEDOUT) {
940                 return EBUSY;
941             }
942             mvalue = mutex->value;
943         }
944     }
945     /* NOTREACHED */
946 }
947 
pthread_mutex_lock_timeout_np(pthread_mutex_t * mutex,unsigned msecs)948 int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
949 {
950     int err = pthread_mutex_lock_timeout_np_impl(mutex, msecs);
951 #ifdef PTHREAD_DEBUG
952     if (PTHREAD_DEBUG_ENABLED) {
953         if (!err) {
954             pthread_debug_mutex_lock_check(mutex);
955         }
956     }
957 #endif
958     return err;
959 }
960 
pthread_mutex_destroy(pthread_mutex_t * mutex)961 int pthread_mutex_destroy(pthread_mutex_t *mutex)
962 {
963     int ret;
964 
965     /* use trylock to ensure that the mutex value is
966      * valid and is not already locked. */
967     ret = pthread_mutex_trylock_impl(mutex);
968     if (ret != 0)
969         return ret;
970 
971     mutex->value = 0xdead10cc;
972     return 0;
973 }
974 
975 
976 
pthread_condattr_init(pthread_condattr_t * attr)977 int pthread_condattr_init(pthread_condattr_t *attr)
978 {
979     if (attr == NULL)
980         return EINVAL;
981 
982     *attr = PTHREAD_PROCESS_PRIVATE;
983     return 0;
984 }
985 
pthread_condattr_getpshared(pthread_condattr_t * attr,int * pshared)986 int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
987 {
988     if (attr == NULL || pshared == NULL)
989         return EINVAL;
990 
991     *pshared = *attr;
992     return 0;
993 }
994 
pthread_condattr_setpshared(pthread_condattr_t * attr,int pshared)995 int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
996 {
997     if (attr == NULL)
998         return EINVAL;
999 
1000     if (pshared != PTHREAD_PROCESS_SHARED &&
1001         pshared != PTHREAD_PROCESS_PRIVATE)
1002         return EINVAL;
1003 
1004     *attr = pshared;
1005     return 0;
1006 }
1007 
pthread_condattr_destroy(pthread_condattr_t * attr)1008 int pthread_condattr_destroy(pthread_condattr_t *attr)
1009 {
1010     if (attr == NULL)
1011         return EINVAL;
1012 
1013     *attr = 0xdeada11d;
1014     return 0;
1015 }
1016 
1017 /* We use one bit in condition variable values as the 'shared' flag
1018  * The rest is a counter.
1019  */
1020 #define COND_SHARED_MASK        0x0001
1021 #define COND_COUNTER_INCREMENT  0x0002
1022 #define COND_COUNTER_MASK       (~COND_SHARED_MASK)
1023 
1024 #define COND_IS_SHARED(c)  (((c)->value & COND_SHARED_MASK) != 0)
1025 
1026 /* XXX *technically* there is a race condition that could allow
1027  * XXX a signal to be missed.  If thread A is preempted in _wait()
1028  * XXX after unlocking the mutex and before waiting, and if other
1029  * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
1030  * XXX before thread A is scheduled again and calls futex_wait(),
1031  * XXX then the signal will be lost.
1032  */
1033 
pthread_cond_init(pthread_cond_t * cond,const pthread_condattr_t * attr)1034 int pthread_cond_init(pthread_cond_t *cond,
1035                       const pthread_condattr_t *attr)
1036 {
1037     if (cond == NULL)
1038         return EINVAL;
1039 
1040     cond->value = 0;
1041 
1042     if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
1043         cond->value |= COND_SHARED_MASK;
1044 
1045     return 0;
1046 }
1047 
pthread_cond_destroy(pthread_cond_t * cond)1048 int pthread_cond_destroy(pthread_cond_t *cond)
1049 {
1050     if (cond == NULL)
1051         return EINVAL;
1052 
1053     cond->value = 0xdeadc04d;
1054     return 0;
1055 }
1056 
1057 /* This function is used by pthread_cond_broadcast and
1058  * pthread_cond_signal to atomically decrement the counter
1059  * then wake-up 'counter' threads.
1060  */
1061 static int
__pthread_cond_pulse(pthread_cond_t * cond,int counter)1062 __pthread_cond_pulse(pthread_cond_t *cond, int  counter)
1063 {
1064     long flags;
1065 
1066     if (__predict_false(cond == NULL))
1067         return EINVAL;
1068 
1069     flags = (cond->value & ~COND_COUNTER_MASK);
1070     for (;;) {
1071         long oldval = cond->value;
1072         long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
1073                       | flags;
1074         if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0)
1075             break;
1076     }
1077 
1078     /*
1079      * Ensure that all memory accesses previously made by this thread are
1080      * visible to the woken thread(s).  On the other side, the "wait"
1081      * code will issue any necessary barriers when locking the mutex.
1082      *
1083      * This may not strictly be necessary -- if the caller follows
1084      * recommended practice and holds the mutex before signaling the cond
1085      * var, the mutex ops will provide correct semantics.  If they don't
1086      * hold the mutex, they're subject to race conditions anyway.
1087      */
1088     ANDROID_MEMBAR_FULL();
1089 
1090     __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
1091     return 0;
1092 }
1093 
pthread_cond_broadcast(pthread_cond_t * cond)1094 int pthread_cond_broadcast(pthread_cond_t *cond)
1095 {
1096     return __pthread_cond_pulse(cond, INT_MAX);
1097 }
1098 
pthread_cond_signal(pthread_cond_t * cond)1099 int pthread_cond_signal(pthread_cond_t *cond)
1100 {
1101     return __pthread_cond_pulse(cond, 1);
1102 }
1103 
pthread_cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)1104 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
1105 {
1106     return pthread_cond_timedwait(cond, mutex, NULL);
1107 }
1108 
__pthread_cond_timedwait_relative(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1109 int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
1110                                       pthread_mutex_t * mutex,
1111                                       const struct timespec *reltime)
1112 {
1113     int  status;
1114     int  oldvalue = cond->value;
1115 
1116     pthread_mutex_unlock(mutex);
1117     status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
1118     pthread_mutex_lock(mutex);
1119 
1120     if (status == (-ETIMEDOUT)) return ETIMEDOUT;
1121     return 0;
1122 }
1123 
__pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime,clockid_t clock)1124 int __pthread_cond_timedwait(pthread_cond_t *cond,
1125                              pthread_mutex_t * mutex,
1126                              const struct timespec *abstime,
1127                              clockid_t clock)
1128 {
1129     struct timespec ts;
1130     struct timespec * tsp;
1131 
1132     if (abstime != NULL) {
1133         if (__timespec_to_absolute(&ts, abstime, clock) < 0)
1134             return ETIMEDOUT;
1135         tsp = &ts;
1136     } else {
1137         tsp = NULL;
1138     }
1139 
1140     return __pthread_cond_timedwait_relative(cond, mutex, tsp);
1141 }
1142 
pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1143 int pthread_cond_timedwait(pthread_cond_t *cond,
1144                            pthread_mutex_t * mutex,
1145                            const struct timespec *abstime)
1146 {
1147     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
1148 }
1149 
1150 
1151 /* this one exists only for backward binary compatibility */
pthread_cond_timedwait_monotonic(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1152 int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
1153                                      pthread_mutex_t * mutex,
1154                                      const struct timespec *abstime)
1155 {
1156     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1157 }
1158 
pthread_cond_timedwait_monotonic_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1159 int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
1160                                      pthread_mutex_t * mutex,
1161                                      const struct timespec *abstime)
1162 {
1163     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1164 }
1165 
pthread_cond_timedwait_relative_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1166 int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1167                                       pthread_mutex_t * mutex,
1168                                       const struct timespec *reltime)
1169 {
1170     return __pthread_cond_timedwait_relative(cond, mutex, reltime);
1171 }
1172 
pthread_cond_timeout_np(pthread_cond_t * cond,pthread_mutex_t * mutex,unsigned msecs)1173 int pthread_cond_timeout_np(pthread_cond_t *cond,
1174                             pthread_mutex_t * mutex,
1175                             unsigned msecs)
1176 {
1177     struct timespec ts;
1178 
1179     ts.tv_sec = msecs / 1000;
1180     ts.tv_nsec = (msecs % 1000) * 1000000;
1181 
1182     return __pthread_cond_timedwait_relative(cond, mutex, &ts);
1183 }
1184 
1185 
1186 /* NOTE: this implementation doesn't support a init function that throws a C++ exception
1187  *       or calls fork()
1188  */
pthread_once(pthread_once_t * once_control,void (* init_routine)(void))1189 int pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
1190 {
1191     volatile pthread_once_t* ocptr = once_control;
1192 
1193     /* PTHREAD_ONCE_INIT is 0, we use the following bit flags
1194      *
1195      *   bit 0 set  -> initialization is under way
1196      *   bit 1 set  -> initialization is complete
1197      */
1198 #define ONCE_INITIALIZING           (1 << 0)
1199 #define ONCE_COMPLETED              (1 << 1)
1200 
1201     /* First check if the once is already initialized. This will be the common
1202     * case and we want to make this as fast as possible. Note that this still
1203     * requires a load_acquire operation here to ensure that all the
1204     * stores performed by the initialization function are observable on
1205     * this CPU after we exit.
1206     */
1207     if (__predict_true((*ocptr & ONCE_COMPLETED) != 0)) {
1208         ANDROID_MEMBAR_FULL();
1209         return 0;
1210     }
1211 
1212     for (;;) {
1213         /* Try to atomically set the INITIALIZING flag.
1214          * This requires a cmpxchg loop, and we may need
1215          * to exit prematurely if we detect that
1216          * COMPLETED is now set.
1217          */
1218         int32_t  oldval, newval;
1219 
1220         do {
1221             oldval = *ocptr;
1222             if ((oldval & ONCE_COMPLETED) != 0)
1223                 break;
1224 
1225             newval = oldval | ONCE_INITIALIZING;
1226         } while (__bionic_cmpxchg(oldval, newval, ocptr) != 0);
1227 
1228         if ((oldval & ONCE_COMPLETED) != 0) {
1229             /* We detected that COMPLETED was set while in our loop */
1230             ANDROID_MEMBAR_FULL();
1231             return 0;
1232         }
1233 
1234         if ((oldval & ONCE_INITIALIZING) == 0) {
1235             /* We got there first, we can jump out of the loop to
1236              * handle the initialization */
1237             break;
1238         }
1239 
1240         /* Another thread is running the initialization and hasn't completed
1241          * yet, so wait for it, then try again. */
1242         __futex_wait_ex(ocptr, 0, oldval, NULL);
1243     }
1244 
1245     /* call the initialization function. */
1246     (*init_routine)();
1247 
1248     /* Do a store_release indicating that initialization is complete */
1249     ANDROID_MEMBAR_FULL();
1250     *ocptr = ONCE_COMPLETED;
1251 
1252     /* Wake up any waiters, if any */
1253     __futex_wake_ex(ocptr, 0, INT_MAX);
1254 
1255     return 0;
1256 }
1257 
__pthread_gettid(pthread_t thid)1258 pid_t __pthread_gettid(pthread_t thid) {
1259   pthread_internal_t* thread = (pthread_internal_t*) thid;
1260   return thread->tid;
1261 }
1262 
__pthread_settid(pthread_t thid,pid_t tid)1263 int __pthread_settid(pthread_t thid, pid_t tid) {
1264   if (thid == 0) {
1265       return EINVAL;
1266   }
1267 
1268   pthread_internal_t* thread = (pthread_internal_t*) thid;
1269   thread->tid = tid;
1270 
1271   return 0;
1272 }
1273