• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/types.h>
29 #include <unistd.h>
30 #include <signal.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <errno.h>
35 #include <sys/atomics.h>
36 #include <bionic_tls.h>
37 #include <sys/mman.h>
38 #include <pthread.h>
39 #include <time.h>
40 #include "pthread_internal.h"
41 #include "thread_private.h"
42 #include <limits.h>
43 #include <memory.h>
44 #include <assert.h>
45 #include <malloc.h>
46 #include <bionic_futex.h>
47 #include <bionic_atomic_inline.h>
48 #include <sys/prctl.h>
49 #include <sys/stat.h>
50 #include <fcntl.h>
51 #include <stdio.h>
52 #include <bionic_pthread.h>
53 
54 extern int  __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
55 extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
56 extern void _exit_thread(int  retCode);
57 extern int  __set_errno(int);
58 
__futex_wake_ex(volatile void * ftx,int pshared,int val)59 int  __futex_wake_ex(volatile void *ftx, int pshared, int val)
60 {
61     return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
62 }
63 
__futex_wait_ex(volatile void * ftx,int pshared,int val,const struct timespec * timeout)64 int  __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
65 {
66     return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
67 }
68 
69 #define  __likely(cond)    __builtin_expect(!!(cond), 1)
70 #define  __unlikely(cond)  __builtin_expect(!!(cond), 0)
71 
72 #ifdef __i386__
73 #define ATTRIBUTES __attribute__((noinline)) __attribute__((fastcall))
74 #else
75 #define ATTRIBUTES __attribute__((noinline))
76 #endif
77 
78 void ATTRIBUTES _thread_created_hook(pid_t thread_id);
79 
80 #define PTHREAD_ATTR_FLAG_DETACHED      0x00000001
81 #define PTHREAD_ATTR_FLAG_USER_STACK    0x00000002
82 
83 #define DEFAULT_STACKSIZE (1024 * 1024)
84 #define STACKBASE 0x10000000
85 
86 static uint8_t * gStackBase = (uint8_t *)STACKBASE;
87 
88 static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER;
89 
90 
91 static const pthread_attr_t gDefaultPthreadAttr = {
92     .flags = 0,
93     .stack_base = NULL,
94     .stack_size = DEFAULT_STACKSIZE,
95     .guard_size = PAGE_SIZE,
96     .sched_policy = SCHED_NORMAL,
97     .sched_priority = 0
98 };
99 
100 #define  INIT_THREADS  1
101 
102 static pthread_internal_t*  gThreadList = NULL;
103 static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
104 static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
105 
106 
107 /* we simply malloc/free the internal pthread_internal_t structures. we may
108  * want to use a different allocation scheme in the future, but this one should
109  * be largely enough
110  */
111 static pthread_internal_t*
_pthread_internal_alloc(void)112 _pthread_internal_alloc(void)
113 {
114     pthread_internal_t*   thread;
115 
116     thread = calloc( sizeof(*thread), 1 );
117     if (thread)
118         thread->intern = 1;
119 
120     return thread;
121 }
122 
123 static void
_pthread_internal_free(pthread_internal_t * thread)124 _pthread_internal_free( pthread_internal_t*  thread )
125 {
126     if (thread && thread->intern) {
127         thread->intern = 0;  /* just in case */
128         free (thread);
129     }
130 }
131 
132 
133 static void
_pthread_internal_remove_locked(pthread_internal_t * thread)134 _pthread_internal_remove_locked( pthread_internal_t*  thread )
135 {
136     thread->next->pref = thread->pref;
137     thread->pref[0]    = thread->next;
138 }
139 
140 static void
_pthread_internal_remove(pthread_internal_t * thread)141 _pthread_internal_remove( pthread_internal_t*  thread )
142 {
143     pthread_mutex_lock(&gThreadListLock);
144     _pthread_internal_remove_locked(thread);
145     pthread_mutex_unlock(&gThreadListLock);
146 }
147 
148 static void
_pthread_internal_add(pthread_internal_t * thread)149 _pthread_internal_add( pthread_internal_t*  thread )
150 {
151     pthread_mutex_lock(&gThreadListLock);
152     thread->pref = &gThreadList;
153     thread->next = thread->pref[0];
154     if (thread->next)
155         thread->next->pref = &thread->next;
156     thread->pref[0] = thread;
157     pthread_mutex_unlock(&gThreadListLock);
158 }
159 
160 pthread_internal_t*
__get_thread(void)161 __get_thread(void)
162 {
163     void**  tls = (void**)__get_tls();
164 
165     return  (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
166 }
167 
168 
169 void*
__get_stack_base(int * p_stack_size)170 __get_stack_base(int  *p_stack_size)
171 {
172     pthread_internal_t*  thread = __get_thread();
173 
174     *p_stack_size = thread->attr.stack_size;
175     return thread->attr.stack_base;
176 }
177 
178 
__init_tls(void ** tls,void * thread)179 void  __init_tls(void**  tls, void*  thread)
180 {
181     int  nn;
182 
183     ((pthread_internal_t*)thread)->tls = tls;
184 
185     // slot 0 must point to the tls area, this is required by the implementation
186     // of the x86 Linux kernel thread-local-storage
187     tls[TLS_SLOT_SELF]      = (void*)tls;
188     tls[TLS_SLOT_THREAD_ID] = thread;
189     for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++)
190        tls[nn] = 0;
191 
192     __set_tls( (void*)tls );
193 }
194 
195 
196 /*
197  * This trampoline is called from the assembly clone() function
198  */
__thread_entry(int (* func)(void *),void * arg,void ** tls)199 void __thread_entry(int (*func)(void*), void *arg, void **tls)
200 {
201     int retValue;
202     pthread_internal_t * thrInfo;
203 
204     // Wait for our creating thread to release us. This lets it have time to
205     // notify gdb about this thread before it starts doing anything.
206     //
207     // This also provides the memory barrier needed to ensure that all memory
208     // accesses previously made by the creating thread are visible to us.
209     pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF];
210     pthread_mutex_lock(start_mutex);
211     pthread_mutex_destroy(start_mutex);
212 
213     thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID];
214 
215     __init_tls( tls, thrInfo );
216 
217     pthread_exit( (void*)func(arg) );
218 }
219 
_init_thread(pthread_internal_t * thread,pid_t kernel_id,pthread_attr_t * attr,void * stack_base)220 void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base)
221 {
222     if (attr == NULL) {
223         thread->attr = gDefaultPthreadAttr;
224     } else {
225         thread->attr = *attr;
226     }
227     thread->attr.stack_base = stack_base;
228     thread->kernel_id       = kernel_id;
229 
230     // set the scheduling policy/priority of the thread
231     if (thread->attr.sched_policy != SCHED_NORMAL) {
232         struct sched_param param;
233         param.sched_priority = thread->attr.sched_priority;
234         sched_setscheduler(kernel_id, thread->attr.sched_policy, &param);
235     }
236 
237     pthread_cond_init(&thread->join_cond, NULL);
238     thread->join_count = 0;
239 
240     thread->cleanup_stack = NULL;
241 
242     _pthread_internal_add(thread);
243 }
244 
245 
246 /* XXX stacks not reclaimed if thread spawn fails */
247 /* XXX stacks address spaces should be reused if available again */
248 
mkstack(size_t size,size_t guard_size)249 static void *mkstack(size_t size, size_t guard_size)
250 {
251     void * stack;
252 
253     pthread_mutex_lock(&mmap_lock);
254 
255     stack = mmap((void *)gStackBase, size,
256                  PROT_READ | PROT_WRITE,
257                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
258                  -1, 0);
259 
260     if(stack == MAP_FAILED) {
261         stack = NULL;
262         goto done;
263     }
264 
265     if(mprotect(stack, guard_size, PROT_NONE)){
266         munmap(stack, size);
267         stack = NULL;
268         goto done;
269     }
270 
271 done:
272     pthread_mutex_unlock(&mmap_lock);
273     return stack;
274 }
275 
276 /*
277  * Create a new thread. The thread's stack is laid out like so:
278  *
279  * +---------------------------+
280  * |     pthread_internal_t    |
281  * +---------------------------+
282  * |                           |
283  * |          TLS area         |
284  * |                           |
285  * +---------------------------+
286  * |                           |
287  * .                           .
288  * .         stack area        .
289  * .                           .
290  * |                           |
291  * +---------------------------+
292  * |         guard page        |
293  * +---------------------------+
294  *
295  *  note that TLS[0] must be a pointer to itself, this is required
296  *  by the thread-local storage implementation of the x86 Linux
297  *  kernel, where the TLS pointer is read by reading fs:[0]
298  */
pthread_create(pthread_t * thread_out,pthread_attr_t const * attr,void * (* start_routine)(void *),void * arg)299 int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr,
300                    void *(*start_routine)(void *), void * arg)
301 {
302     char*   stack;
303     void**  tls;
304     int tid;
305     pthread_mutex_t * start_mutex;
306     pthread_internal_t * thread;
307     int                  madestack = 0;
308     int     old_errno = errno;
309 
310     /* this will inform the rest of the C library that at least one thread
311      * was created. this will enforce certain functions to acquire/release
312      * locks (e.g. atexit()) to protect shared global structures.
313      *
314      * this works because pthread_create() is not called by the C library
315      * initialization routine that sets up the main thread's data structures.
316      */
317     __isthreaded = 1;
318 
319     thread = _pthread_internal_alloc();
320     if (thread == NULL)
321         return ENOMEM;
322 
323     if (attr == NULL) {
324         attr = &gDefaultPthreadAttr;
325     }
326 
327     // make sure the stack is PAGE_SIZE aligned
328     size_t stackSize = (attr->stack_size +
329                         (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
330 
331     if (!attr->stack_base) {
332         stack = mkstack(stackSize, attr->guard_size);
333         if(stack == NULL) {
334             _pthread_internal_free(thread);
335             return ENOMEM;
336         }
337         madestack = 1;
338     } else {
339         stack = attr->stack_base;
340     }
341 
342     // Make room for TLS
343     tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*));
344 
345     // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep
346     // it from doing anything until after we notify the debugger about it
347     //
348     // This also provides the memory barrier we need to ensure that all
349     // memory accesses previously performed by this thread are visible to
350     // the new thread.
351     start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF];
352     pthread_mutex_init(start_mutex, NULL);
353     pthread_mutex_lock(start_mutex);
354 
355     tls[TLS_SLOT_THREAD_ID] = thread;
356 
357     tid = __pthread_clone((int(*)(void*))start_routine, tls,
358                 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND
359                 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED,
360                 arg);
361 
362     if(tid < 0) {
363         int  result;
364         if (madestack)
365             munmap(stack, stackSize);
366         _pthread_internal_free(thread);
367         result = errno;
368         errno = old_errno;
369         return result;
370     }
371 
372     _init_thread(thread, tid, (pthread_attr_t*)attr, stack);
373 
374     if (!madestack)
375         thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK;
376 
377     // Notify any debuggers about the new thread
378     pthread_mutex_lock(&gDebuggerNotificationLock);
379     _thread_created_hook(tid);
380     pthread_mutex_unlock(&gDebuggerNotificationLock);
381 
382     // Let the thread do it's thing
383     pthread_mutex_unlock(start_mutex);
384 
385     *thread_out = (pthread_t)thread;
386     return 0;
387 }
388 
389 
pthread_attr_init(pthread_attr_t * attr)390 int pthread_attr_init(pthread_attr_t * attr)
391 {
392     *attr = gDefaultPthreadAttr;
393     return 0;
394 }
395 
pthread_attr_destroy(pthread_attr_t * attr)396 int pthread_attr_destroy(pthread_attr_t * attr)
397 {
398     memset(attr, 0x42, sizeof(pthread_attr_t));
399     return 0;
400 }
401 
pthread_attr_setdetachstate(pthread_attr_t * attr,int state)402 int pthread_attr_setdetachstate(pthread_attr_t * attr, int state)
403 {
404     if (state == PTHREAD_CREATE_DETACHED) {
405         attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
406     } else if (state == PTHREAD_CREATE_JOINABLE) {
407         attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
408     } else {
409         return EINVAL;
410     }
411     return 0;
412 }
413 
pthread_attr_getdetachstate(pthread_attr_t const * attr,int * state)414 int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state)
415 {
416     *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED)
417            ? PTHREAD_CREATE_DETACHED
418            : PTHREAD_CREATE_JOINABLE;
419     return 0;
420 }
421 
pthread_attr_setschedpolicy(pthread_attr_t * attr,int policy)422 int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy)
423 {
424     attr->sched_policy = policy;
425     return 0;
426 }
427 
pthread_attr_getschedpolicy(pthread_attr_t const * attr,int * policy)428 int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy)
429 {
430     *policy = attr->sched_policy;
431     return 0;
432 }
433 
pthread_attr_setschedparam(pthread_attr_t * attr,struct sched_param const * param)434 int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param)
435 {
436     attr->sched_priority = param->sched_priority;
437     return 0;
438 }
439 
pthread_attr_getschedparam(pthread_attr_t const * attr,struct sched_param * param)440 int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param)
441 {
442     param->sched_priority = attr->sched_priority;
443     return 0;
444 }
445 
pthread_attr_setstacksize(pthread_attr_t * attr,size_t stack_size)446 int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size)
447 {
448     if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
449         return EINVAL;
450     }
451     attr->stack_size = stack_size;
452     return 0;
453 }
454 
pthread_attr_getstacksize(pthread_attr_t const * attr,size_t * stack_size)455 int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size)
456 {
457     *stack_size = attr->stack_size;
458     return 0;
459 }
460 
pthread_attr_setstackaddr(pthread_attr_t * attr,void * stack_addr)461 int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr)
462 {
463 #if 1
464     // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now.
465     return ENOSYS;
466 #else
467     if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) {
468         return EINVAL;
469     }
470     attr->stack_base = stack_addr;
471     return 0;
472 #endif
473 }
474 
pthread_attr_getstackaddr(pthread_attr_t const * attr,void ** stack_addr)475 int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr)
476 {
477     *stack_addr = (char*)attr->stack_base + attr->stack_size;
478     return 0;
479 }
480 
pthread_attr_setstack(pthread_attr_t * attr,void * stack_base,size_t stack_size)481 int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size)
482 {
483     if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
484         return EINVAL;
485     }
486     if ((uint32_t)stack_base & (PAGE_SIZE - 1)) {
487         return EINVAL;
488     }
489     attr->stack_base = stack_base;
490     attr->stack_size = stack_size;
491     return 0;
492 }
493 
pthread_attr_getstack(pthread_attr_t const * attr,void ** stack_base,size_t * stack_size)494 int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size)
495 {
496     *stack_base = attr->stack_base;
497     *stack_size = attr->stack_size;
498     return 0;
499 }
500 
pthread_attr_setguardsize(pthread_attr_t * attr,size_t guard_size)501 int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size)
502 {
503     if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) {
504         return EINVAL;
505     }
506 
507     attr->guard_size = guard_size;
508     return 0;
509 }
510 
pthread_attr_getguardsize(pthread_attr_t const * attr,size_t * guard_size)511 int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size)
512 {
513     *guard_size = attr->guard_size;
514     return 0;
515 }
516 
pthread_getattr_np(pthread_t thid,pthread_attr_t * attr)517 int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr)
518 {
519     pthread_internal_t * thread = (pthread_internal_t *)thid;
520     *attr = thread->attr;
521     return 0;
522 }
523 
pthread_attr_setscope(pthread_attr_t * attr,int scope)524 int pthread_attr_setscope(pthread_attr_t *attr, int  scope)
525 {
526     if (scope == PTHREAD_SCOPE_SYSTEM)
527         return 0;
528     if (scope == PTHREAD_SCOPE_PROCESS)
529         return ENOTSUP;
530 
531     return EINVAL;
532 }
533 
pthread_attr_getscope(pthread_attr_t const * attr)534 int pthread_attr_getscope(pthread_attr_t const *attr)
535 {
536     return PTHREAD_SCOPE_SYSTEM;
537 }
538 
539 
540 /* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
541  *         and thread cancelation
542  */
543 
__pthread_cleanup_push(__pthread_cleanup_t * c,__pthread_cleanup_func_t routine,void * arg)544 void __pthread_cleanup_push( __pthread_cleanup_t*      c,
545                              __pthread_cleanup_func_t  routine,
546                              void*                     arg )
547 {
548     pthread_internal_t*  thread = __get_thread();
549 
550     c->__cleanup_routine  = routine;
551     c->__cleanup_arg      = arg;
552     c->__cleanup_prev     = thread->cleanup_stack;
553     thread->cleanup_stack = c;
554 }
555 
__pthread_cleanup_pop(__pthread_cleanup_t * c,int execute)556 void __pthread_cleanup_pop( __pthread_cleanup_t*  c, int  execute )
557 {
558     pthread_internal_t*  thread = __get_thread();
559 
560     thread->cleanup_stack = c->__cleanup_prev;
561     if (execute)
562         c->__cleanup_routine(c->__cleanup_arg);
563 }
564 
565 /* used by pthread_exit() to clean all TLS keys of the current thread */
566 static void pthread_key_clean_all(void);
567 
pthread_exit(void * retval)568 void pthread_exit(void * retval)
569 {
570     pthread_internal_t*  thread     = __get_thread();
571     void*                stack_base = thread->attr.stack_base;
572     int                  stack_size = thread->attr.stack_size;
573     int                  user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
574 
575     // call the cleanup handlers first
576     while (thread->cleanup_stack) {
577         __pthread_cleanup_t*  c = thread->cleanup_stack;
578         thread->cleanup_stack   = c->__cleanup_prev;
579         c->__cleanup_routine(c->__cleanup_arg);
580     }
581 
582     // call the TLS destructors, it is important to do that before removing this
583     // thread from the global list. this will ensure that if someone else deletes
584     // a TLS key, the corresponding value will be set to NULL in this thread's TLS
585     // space (see pthread_key_delete)
586     pthread_key_clean_all();
587 
588     // if the thread is detached, destroy the pthread_internal_t
589     // otherwise, keep it in memory and signal any joiners
590     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
591         _pthread_internal_remove(thread);
592         _pthread_internal_free(thread);
593     } else {
594        /* the join_count field is used to store the number of threads waiting for
595         * the termination of this thread with pthread_join(),
596         *
597         * if it is positive we need to signal the waiters, and we do not touch
598         * the count (it will be decremented by the waiters, the last one will
599         * also remove/free the thread structure
600         *
601         * if it is zero, we set the count value to -1 to indicate that the
602         * thread is in 'zombie' state: it has stopped executing, and its stack
603         * is gone (as well as its TLS area). when another thread calls pthread_join()
604         * on it, it will immediately free the thread and return.
605         */
606         pthread_mutex_lock(&gThreadListLock);
607         thread->return_value = retval;
608         if (thread->join_count > 0) {
609             pthread_cond_broadcast(&thread->join_cond);
610         } else {
611             thread->join_count = -1;  /* zombie thread */
612         }
613         pthread_mutex_unlock(&gThreadListLock);
614     }
615 
616     // destroy the thread stack
617     if (user_stack)
618         _exit_thread((int)retval);
619     else
620         _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
621 }
622 
pthread_join(pthread_t thid,void ** ret_val)623 int pthread_join(pthread_t thid, void ** ret_val)
624 {
625     pthread_internal_t*  thread = (pthread_internal_t*)thid;
626     int                  count;
627 
628     // check that the thread still exists and is not detached
629     pthread_mutex_lock(&gThreadListLock);
630 
631     for (thread = gThreadList; thread != NULL; thread = thread->next)
632         if (thread == (pthread_internal_t*)thid)
633             goto FoundIt;
634 
635     pthread_mutex_unlock(&gThreadListLock);
636     return ESRCH;
637 
638 FoundIt:
639     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
640         pthread_mutex_unlock(&gThreadListLock);
641         return EINVAL;
642     }
643 
644    /* wait for thread death when needed
645     *
646     * if the 'join_count' is negative, this is a 'zombie' thread that
647     * is already dead and without stack/TLS
648     *
649     * otherwise, we need to increment 'join-count' and wait to be signaled
650     */
651    count = thread->join_count;
652     if (count >= 0) {
653         thread->join_count += 1;
654         pthread_cond_wait( &thread->join_cond, &gThreadListLock );
655         count = --thread->join_count;
656     }
657     if (ret_val)
658         *ret_val = thread->return_value;
659 
660     /* remove thread descriptor when we're the last joiner or when the
661      * thread was already a zombie.
662      */
663     if (count <= 0) {
664         _pthread_internal_remove_locked(thread);
665         _pthread_internal_free(thread);
666     }
667     pthread_mutex_unlock(&gThreadListLock);
668     return 0;
669 }
670 
pthread_detach(pthread_t thid)671 int  pthread_detach( pthread_t  thid )
672 {
673     pthread_internal_t*  thread;
674     int                  result = 0;
675     int                  flags;
676 
677     pthread_mutex_lock(&gThreadListLock);
678     for (thread = gThreadList; thread != NULL; thread = thread->next)
679         if (thread == (pthread_internal_t*)thid)
680             goto FoundIt;
681 
682     result = ESRCH;
683     goto Exit;
684 
685 FoundIt:
686     do {
687         flags = thread->attr.flags;
688 
689         if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) {
690             /* thread is not joinable ! */
691             result = EINVAL;
692             goto Exit;
693         }
694     }
695     while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
696                               (volatile int*)&thread->attr.flags ) != 0 );
697 Exit:
698     pthread_mutex_unlock(&gThreadListLock);
699     return result;
700 }
701 
pthread_self(void)702 pthread_t pthread_self(void)
703 {
704     return (pthread_t)__get_thread();
705 }
706 
pthread_equal(pthread_t one,pthread_t two)707 int pthread_equal(pthread_t one, pthread_t two)
708 {
709     return (one == two ? 1 : 0);
710 }
711 
pthread_getschedparam(pthread_t thid,int * policy,struct sched_param * param)712 int pthread_getschedparam(pthread_t thid, int * policy,
713                           struct sched_param * param)
714 {
715     int  old_errno = errno;
716 
717     pthread_internal_t * thread = (pthread_internal_t *)thid;
718     int err = sched_getparam(thread->kernel_id, param);
719     if (!err) {
720         *policy = sched_getscheduler(thread->kernel_id);
721     } else {
722         err = errno;
723         errno = old_errno;
724     }
725     return err;
726 }
727 
pthread_setschedparam(pthread_t thid,int policy,struct sched_param const * param)728 int pthread_setschedparam(pthread_t thid, int policy,
729                           struct sched_param const * param)
730 {
731     pthread_internal_t * thread = (pthread_internal_t *)thid;
732     int                  old_errno = errno;
733     int                  ret;
734 
735     ret = sched_setscheduler(thread->kernel_id, policy, param);
736     if (ret < 0) {
737         ret = errno;
738         errno = old_errno;
739     }
740     return ret;
741 }
742 
743 
744 // mutex lock states
745 //
746 // 0: unlocked
747 // 1: locked, no waiters
748 // 2: locked, maybe waiters
749 
750 /* a mutex is implemented as a 32-bit integer holding the following fields
751  *
752  * bits:     name     description
753  * 31-16     tid      owner thread's kernel id (recursive and errorcheck only)
754  * 15-14     type     mutex type
755  * 13        shared   process-shared flag
756  * 12-2      counter  counter of recursive mutexes
757  * 1-0       state    lock state (0, 1 or 2)
758  */
759 
760 
761 #define  MUTEX_OWNER(m)  (((m)->value >> 16) & 0xffff)
762 #define  MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff)
763 
764 #define  MUTEX_TYPE_MASK       0xc000
765 #define  MUTEX_TYPE_NORMAL     0x0000
766 #define  MUTEX_TYPE_RECURSIVE  0x4000
767 #define  MUTEX_TYPE_ERRORCHECK 0x8000
768 
769 #define  MUTEX_COUNTER_SHIFT  2
770 #define  MUTEX_COUNTER_MASK   0x1ffc
771 #define  MUTEX_SHARED_MASK    0x2000
772 
773 /* a mutex attribute holds the following fields
774  *
775  * bits:     name       description
776  * 0-3       type       type of mutex
777  * 4         shared     process-shared flag
778  */
779 #define  MUTEXATTR_TYPE_MASK   0x000f
780 #define  MUTEXATTR_SHARED_MASK 0x0010
781 
782 
pthread_mutexattr_init(pthread_mutexattr_t * attr)783 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
784 {
785     if (attr) {
786         *attr = PTHREAD_MUTEX_DEFAULT;
787         return 0;
788     } else {
789         return EINVAL;
790     }
791 }
792 
pthread_mutexattr_destroy(pthread_mutexattr_t * attr)793 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
794 {
795     if (attr) {
796         *attr = -1;
797         return 0;
798     } else {
799         return EINVAL;
800     }
801 }
802 
pthread_mutexattr_gettype(const pthread_mutexattr_t * attr,int * type)803 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
804 {
805     if (attr) {
806         int  atype = (*attr & MUTEXATTR_TYPE_MASK);
807 
808          if (atype >= PTHREAD_MUTEX_NORMAL &&
809              atype <= PTHREAD_MUTEX_ERRORCHECK) {
810             *type = atype;
811             return 0;
812         }
813     }
814     return EINVAL;
815 }
816 
pthread_mutexattr_settype(pthread_mutexattr_t * attr,int type)817 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
818 {
819     if (attr && type >= PTHREAD_MUTEX_NORMAL &&
820                 type <= PTHREAD_MUTEX_ERRORCHECK ) {
821         *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
822         return 0;
823     }
824     return EINVAL;
825 }
826 
827 /* process-shared mutexes are not supported at the moment */
828 
pthread_mutexattr_setpshared(pthread_mutexattr_t * attr,int pshared)829 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
830 {
831     if (!attr)
832         return EINVAL;
833 
834     switch (pshared) {
835     case PTHREAD_PROCESS_PRIVATE:
836         *attr &= ~MUTEXATTR_SHARED_MASK;
837         return 0;
838 
839     case PTHREAD_PROCESS_SHARED:
840         /* our current implementation of pthread actually supports shared
841          * mutexes but won't cleanup if a process dies with the mutex held.
842          * Nevertheless, it's better than nothing. Shared mutexes are used
843          * by surfaceflinger and audioflinger.
844          */
845         *attr |= MUTEXATTR_SHARED_MASK;
846         return 0;
847     }
848     return EINVAL;
849 }
850 
pthread_mutexattr_getpshared(pthread_mutexattr_t * attr,int * pshared)851 int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
852 {
853     if (!attr || !pshared)
854         return EINVAL;
855 
856     *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
857                                                : PTHREAD_PROCESS_PRIVATE;
858     return 0;
859 }
860 
pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * attr)861 int pthread_mutex_init(pthread_mutex_t *mutex,
862                        const pthread_mutexattr_t *attr)
863 {
864     int value = 0;
865 
866     if (mutex == NULL)
867         return EINVAL;
868 
869     if (__likely(attr == NULL)) {
870         mutex->value = MUTEX_TYPE_NORMAL;
871         return 0;
872     }
873 
874     if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
875         value |= MUTEX_SHARED_MASK;
876 
877     switch (*attr & MUTEXATTR_TYPE_MASK) {
878     case PTHREAD_MUTEX_NORMAL:
879         value |= MUTEX_TYPE_NORMAL;
880         break;
881     case PTHREAD_MUTEX_RECURSIVE:
882         value |= MUTEX_TYPE_RECURSIVE;
883         break;
884     case PTHREAD_MUTEX_ERRORCHECK:
885         value |= MUTEX_TYPE_ERRORCHECK;
886         break;
887     default:
888         return EINVAL;
889     }
890 
891     mutex->value = value;
892     return 0;
893 }
894 
pthread_mutex_destroy(pthread_mutex_t * mutex)895 int pthread_mutex_destroy(pthread_mutex_t *mutex)
896 {
897     int ret;
898 
899     /* use trylock to ensure that the mutex value is
900      * valid and is not already locked. */
901     ret = pthread_mutex_trylock(mutex);
902     if (ret != 0)
903         return ret;
904 
905     mutex->value = 0xdead10cc;
906     return 0;
907 }
908 
909 
910 /*
911  * Lock a non-recursive mutex.
912  *
913  * As noted above, there are three states:
914  *   0 (unlocked, no contention)
915  *   1 (locked, no contention)
916  *   2 (locked, contention)
917  *
918  * Non-recursive mutexes don't use the thread-id or counter fields, and the
919  * "type" value is zero, so the only bits that will be set are the ones in
920  * the lock state field.
921  */
922 static __inline__ void
_normal_lock(pthread_mutex_t * mutex)923 _normal_lock(pthread_mutex_t*  mutex)
924 {
925     /* We need to preserve the shared flag during operations */
926     int  shared = mutex->value & MUTEX_SHARED_MASK;
927     /*
928      * The common case is an unlocked mutex, so we begin by trying to
929      * change the lock's state from 0 to 1.  __atomic_cmpxchg() returns 0
930      * if it made the swap successfully.  If the result is nonzero, this
931      * lock is already held by another thread.
932      */
933     if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) {
934         /*
935          * We want to go to sleep until the mutex is available, which
936          * requires promoting it to state 2.  We need to swap in the new
937          * state value and then wait until somebody wakes us up.
938          *
939          * __atomic_swap() returns the previous value.  We swap 2 in and
940          * see if we got zero back; if so, we have acquired the lock.  If
941          * not, another thread still holds the lock and we wait again.
942          *
943          * The second argument to the __futex_wait() call is compared
944          * against the current value.  If it doesn't match, __futex_wait()
945          * returns immediately (otherwise, it sleeps for a time specified
946          * by the third argument; 0 means sleep forever).  This ensures
947          * that the mutex is in state 2 when we go to sleep on it, which
948          * guarantees a wake-up call.
949          */
950         while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
951             __futex_wait_ex(&mutex->value, shared, shared|2, 0);
952     }
953     ANDROID_MEMBAR_FULL();
954 }
955 
956 /*
957  * Release a non-recursive mutex.  The caller is responsible for determining
958  * that we are in fact the owner of this lock.
959  */
960 static __inline__ void
_normal_unlock(pthread_mutex_t * mutex)961 _normal_unlock(pthread_mutex_t*  mutex)
962 {
963     ANDROID_MEMBAR_FULL();
964 
965     /* We need to preserve the shared flag during operations */
966     int  shared = mutex->value & MUTEX_SHARED_MASK;
967 
968     /*
969      * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
970      * to release the lock.  __atomic_dec() returns the previous value;
971      * if it wasn't 1 we have to do some additional work.
972      */
973     if (__atomic_dec(&mutex->value) != (shared|1)) {
974         /*
975          * Start by releasing the lock.  The decrement changed it from
976          * "contended lock" to "uncontended lock", which means we still
977          * hold it, and anybody who tries to sneak in will push it back
978          * to state 2.
979          *
980          * Once we set it to zero the lock is up for grabs.  We follow
981          * this with a __futex_wake() to ensure that one of the waiting
982          * threads has a chance to grab it.
983          *
984          * This doesn't cause a race with the swap/wait pair in
985          * _normal_lock(), because the __futex_wait() call there will
986          * return immediately if the mutex value isn't 2.
987          */
988         mutex->value = shared;
989 
990         /*
991          * Wake up one waiting thread.  We don't know which thread will be
992          * woken or when it'll start executing -- futexes make no guarantees
993          * here.  There may not even be a thread waiting.
994          *
995          * The newly-woken thread will replace the 0 we just set above
996          * with 2, which means that when it eventually releases the mutex
997          * it will also call FUTEX_WAKE.  This results in one extra wake
998          * call whenever a lock is contended, but lets us avoid forgetting
999          * anyone without requiring us to track the number of sleepers.
1000          *
1001          * It's possible for another thread to sneak in and grab the lock
1002          * between the zero assignment above and the wake call below.  If
1003          * the new thread is "slow" and holds the lock for a while, we'll
1004          * wake up a sleeper, which will swap in a 2 and then go back to
1005          * sleep since the lock is still held.  If the new thread is "fast",
1006          * running to completion before we call wake, the thread we
1007          * eventually wake will find an unlocked mutex and will execute.
1008          * Either way we have correct behavior and nobody is orphaned on
1009          * the wait queue.
1010          */
1011         __futex_wake_ex(&mutex->value, shared, 1);
1012     }
1013 }
1014 
1015 static pthread_mutex_t  __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
1016 
1017 static void
_recursive_lock(void)1018 _recursive_lock(void)
1019 {
1020     _normal_lock(&__recursive_lock);
1021 }
1022 
1023 static void
_recursive_unlock(void)1024 _recursive_unlock(void)
1025 {
1026     _normal_unlock(&__recursive_lock );
1027 }
1028 
pthread_mutex_lock(pthread_mutex_t * mutex)1029 int pthread_mutex_lock(pthread_mutex_t *mutex)
1030 {
1031     int mtype, tid, new_lock_type, shared;
1032 
1033     if (__unlikely(mutex == NULL))
1034         return EINVAL;
1035 
1036     mtype = (mutex->value & MUTEX_TYPE_MASK);
1037     shared = (mutex->value & MUTEX_SHARED_MASK);
1038 
1039     /* Handle normal case first */
1040     if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
1041         _normal_lock(mutex);
1042         return 0;
1043     }
1044 
1045     /* Do we already own this recursive or error-check mutex ? */
1046     tid = __get_thread()->kernel_id;
1047     if ( tid == MUTEX_OWNER(mutex) )
1048     {
1049         int  oldv, counter;
1050 
1051         if (mtype == MUTEX_TYPE_ERRORCHECK) {
1052             /* trying to re-lock a mutex we already acquired */
1053             return EDEADLK;
1054         }
1055         /*
1056          * We own the mutex, but other threads are able to change
1057          * the contents (e.g. promoting it to "contended"), so we
1058          * need to hold the global lock.
1059          */
1060         _recursive_lock();
1061         oldv         = mutex->value;
1062         counter      = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1063         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1064         _recursive_unlock();
1065         return 0;
1066     }
1067 
1068     /* We don't own the mutex, so try to get it.
1069      *
1070      * First, we try to change its state from 0 to 1, if this
1071      * doesn't work, try to change it to state 2.
1072      */
1073     new_lock_type = 1;
1074 
1075     /* compute futex wait opcode and restore shared flag in mtype */
1076     mtype |= shared;
1077 
1078     for (;;) {
1079         int  oldv;
1080 
1081         _recursive_lock();
1082         oldv = mutex->value;
1083         if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
1084             mutex->value = ((tid << 16) | mtype | new_lock_type);
1085         } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
1086             oldv ^= 3;
1087             mutex->value = oldv;
1088         }
1089         _recursive_unlock();
1090 
1091         if (oldv == mtype)
1092             break;
1093 
1094         /*
1095          * The lock was held, possibly contended by others.  From
1096          * now on, if we manage to acquire the lock, we have to
1097          * assume that others are still contending for it so that
1098          * we'll wake them when we unlock it.
1099          */
1100         new_lock_type = 2;
1101 
1102         __futex_wait_ex(&mutex->value, shared, oldv, NULL);
1103     }
1104     return 0;
1105 }
1106 
1107 
pthread_mutex_unlock(pthread_mutex_t * mutex)1108 int pthread_mutex_unlock(pthread_mutex_t *mutex)
1109 {
1110     int mtype, tid, oldv, shared;
1111 
1112     if (__unlikely(mutex == NULL))
1113         return EINVAL;
1114 
1115     mtype  = (mutex->value & MUTEX_TYPE_MASK);
1116     shared = (mutex->value & MUTEX_SHARED_MASK);
1117 
1118     /* Handle common case first */
1119     if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
1120         _normal_unlock(mutex);
1121         return 0;
1122     }
1123 
1124     /* Do we already own this recursive or error-check mutex ? */
1125     tid = __get_thread()->kernel_id;
1126     if ( tid != MUTEX_OWNER(mutex) )
1127         return EPERM;
1128 
1129     /* We do, decrement counter or release the mutex if it is 0 */
1130     _recursive_lock();
1131     oldv = mutex->value;
1132     if (oldv & MUTEX_COUNTER_MASK) {
1133         mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
1134         oldv = 0;
1135     } else {
1136         mutex->value = shared | mtype;
1137     }
1138     _recursive_unlock();
1139 
1140     /* Wake one waiting thread, if any */
1141     if ((oldv & 3) == 2) {
1142         __futex_wake_ex(&mutex->value, shared, 1);
1143     }
1144     return 0;
1145 }
1146 
1147 
pthread_mutex_trylock(pthread_mutex_t * mutex)1148 int pthread_mutex_trylock(pthread_mutex_t *mutex)
1149 {
1150     int mtype, tid, oldv, shared;
1151 
1152     if (__unlikely(mutex == NULL))
1153         return EINVAL;
1154 
1155     mtype  = (mutex->value & MUTEX_TYPE_MASK);
1156     shared = (mutex->value & MUTEX_SHARED_MASK);
1157 
1158     /* Handle common case first */
1159     if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
1160     {
1161         if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
1162             ANDROID_MEMBAR_FULL();
1163             return 0;
1164         }
1165 
1166         return EBUSY;
1167     }
1168 
1169     /* Do we already own this recursive or error-check mutex ? */
1170     tid = __get_thread()->kernel_id;
1171     if ( tid == MUTEX_OWNER(mutex) )
1172     {
1173         int counter;
1174 
1175         if (mtype == MUTEX_TYPE_ERRORCHECK) {
1176             /* already locked by ourselves */
1177             return EDEADLK;
1178         }
1179 
1180         _recursive_lock();
1181         oldv = mutex->value;
1182         counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1183         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1184         _recursive_unlock();
1185         return 0;
1186     }
1187 
1188     /* Restore sharing bit in mtype */
1189     mtype |= shared;
1190 
1191     /* Try to lock it, just once. */
1192     _recursive_lock();
1193     oldv = mutex->value;
1194     if (oldv == mtype)  /* uncontended released lock => state 1 */
1195         mutex->value = ((tid << 16) | mtype | 1);
1196     _recursive_unlock();
1197 
1198     if (oldv != mtype)
1199         return EBUSY;
1200 
1201     return 0;
1202 }
1203 
1204 
1205 /* initialize 'ts' with the difference between 'abstime' and the current time
1206  * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
1207  */
1208 static int
__timespec_to_absolute(struct timespec * ts,const struct timespec * abstime,clockid_t clock)1209 __timespec_to_absolute(struct timespec*  ts, const struct timespec*  abstime, clockid_t  clock)
1210 {
1211     clock_gettime(clock, ts);
1212     ts->tv_sec  = abstime->tv_sec - ts->tv_sec;
1213     ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
1214     if (ts->tv_nsec < 0) {
1215         ts->tv_sec--;
1216         ts->tv_nsec += 1000000000;
1217     }
1218     if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
1219         return -1;
1220 
1221     return 0;
1222 }
1223 
1224 /* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
1225  * milliseconds.
1226  */
1227 static void
__timespec_to_relative_msec(struct timespec * abstime,unsigned msecs,clockid_t clock)1228 __timespec_to_relative_msec(struct timespec*  abstime, unsigned  msecs, clockid_t  clock)
1229 {
1230     clock_gettime(clock, abstime);
1231     abstime->tv_sec  += msecs/1000;
1232     abstime->tv_nsec += (msecs%1000)*1000000;
1233     if (abstime->tv_nsec >= 1000000000) {
1234         abstime->tv_sec++;
1235         abstime->tv_nsec -= 1000000000;
1236     }
1237 }
1238 
pthread_mutex_lock_timeout_np(pthread_mutex_t * mutex,unsigned msecs)1239 int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
1240 {
1241     clockid_t        clock = CLOCK_MONOTONIC;
1242     struct timespec  abstime;
1243     struct timespec  ts;
1244     int              mtype, tid, oldv, new_lock_type, shared;
1245 
1246     /* compute absolute expiration time */
1247     __timespec_to_relative_msec(&abstime, msecs, clock);
1248 
1249     if (__unlikely(mutex == NULL))
1250         return EINVAL;
1251 
1252     mtype  = (mutex->value & MUTEX_TYPE_MASK);
1253     shared = (mutex->value & MUTEX_SHARED_MASK);
1254 
1255     /* Handle common case first */
1256     if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
1257     {
1258         /* fast path for uncontended lock */
1259         if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
1260             ANDROID_MEMBAR_FULL();
1261             return 0;
1262         }
1263 
1264         /* loop while needed */
1265         while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) {
1266             if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
1267                 return EBUSY;
1268 
1269             __futex_wait_ex(&mutex->value, shared, shared|2, &ts);
1270         }
1271         ANDROID_MEMBAR_FULL();
1272         return 0;
1273     }
1274 
1275     /* Do we already own this recursive or error-check mutex ? */
1276     tid = __get_thread()->kernel_id;
1277     if ( tid == MUTEX_OWNER(mutex) )
1278     {
1279         int  oldv, counter;
1280 
1281         if (mtype == MUTEX_TYPE_ERRORCHECK) {
1282             /* already locked by ourselves */
1283             return EDEADLK;
1284         }
1285 
1286         _recursive_lock();
1287         oldv = mutex->value;
1288         counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1289         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1290         _recursive_unlock();
1291         return 0;
1292     }
1293 
1294     /* We don't own the mutex, so try to get it.
1295      *
1296      * First, we try to change its state from 0 to 1, if this
1297      * doesn't work, try to change it to state 2.
1298      */
1299     new_lock_type = 1;
1300 
1301     /* Compute wait op and restore sharing bit in mtype */
1302     mtype  |= shared;
1303 
1304     for (;;) {
1305         int  oldv;
1306         struct timespec  ts;
1307 
1308         _recursive_lock();
1309         oldv = mutex->value;
1310         if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
1311             mutex->value = ((tid << 16) | mtype | new_lock_type);
1312         } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
1313             oldv ^= 3;
1314             mutex->value = oldv;
1315         }
1316         _recursive_unlock();
1317 
1318         if (oldv == mtype)
1319             break;
1320 
1321         /*
1322          * The lock was held, possibly contended by others.  From
1323          * now on, if we manage to acquire the lock, we have to
1324          * assume that others are still contending for it so that
1325          * we'll wake them when we unlock it.
1326          */
1327         new_lock_type = 2;
1328 
1329         if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
1330             return EBUSY;
1331 
1332         __futex_wait_ex(&mutex->value, shared, oldv, &ts);
1333     }
1334     return 0;
1335 }
1336 
pthread_condattr_init(pthread_condattr_t * attr)1337 int pthread_condattr_init(pthread_condattr_t *attr)
1338 {
1339     if (attr == NULL)
1340         return EINVAL;
1341 
1342     *attr = PTHREAD_PROCESS_PRIVATE;
1343     return 0;
1344 }
1345 
pthread_condattr_getpshared(pthread_condattr_t * attr,int * pshared)1346 int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
1347 {
1348     if (attr == NULL || pshared == NULL)
1349         return EINVAL;
1350 
1351     *pshared = *attr;
1352     return 0;
1353 }
1354 
pthread_condattr_setpshared(pthread_condattr_t * attr,int pshared)1355 int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
1356 {
1357     if (attr == NULL)
1358         return EINVAL;
1359 
1360     if (pshared != PTHREAD_PROCESS_SHARED &&
1361         pshared != PTHREAD_PROCESS_PRIVATE)
1362         return EINVAL;
1363 
1364     *attr = pshared;
1365     return 0;
1366 }
1367 
pthread_condattr_destroy(pthread_condattr_t * attr)1368 int pthread_condattr_destroy(pthread_condattr_t *attr)
1369 {
1370     if (attr == NULL)
1371         return EINVAL;
1372 
1373     *attr = 0xdeada11d;
1374     return 0;
1375 }
1376 
1377 /* We use one bit in condition variable values as the 'shared' flag
1378  * The rest is a counter.
1379  */
1380 #define COND_SHARED_MASK        0x0001
1381 #define COND_COUNTER_INCREMENT  0x0002
1382 #define COND_COUNTER_MASK       (~COND_SHARED_MASK)
1383 
1384 #define COND_IS_SHARED(c)  (((c)->value & COND_SHARED_MASK) != 0)
1385 
1386 /* XXX *technically* there is a race condition that could allow
1387  * XXX a signal to be missed.  If thread A is preempted in _wait()
1388  * XXX after unlocking the mutex and before waiting, and if other
1389  * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
1390  * XXX before thread A is scheduled again and calls futex_wait(),
1391  * XXX then the signal will be lost.
1392  */
1393 
pthread_cond_init(pthread_cond_t * cond,const pthread_condattr_t * attr)1394 int pthread_cond_init(pthread_cond_t *cond,
1395                       const pthread_condattr_t *attr)
1396 {
1397     if (cond == NULL)
1398         return EINVAL;
1399 
1400     cond->value = 0;
1401 
1402     if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
1403         cond->value |= COND_SHARED_MASK;
1404 
1405     return 0;
1406 }
1407 
pthread_cond_destroy(pthread_cond_t * cond)1408 int pthread_cond_destroy(pthread_cond_t *cond)
1409 {
1410     if (cond == NULL)
1411         return EINVAL;
1412 
1413     cond->value = 0xdeadc04d;
1414     return 0;
1415 }
1416 
1417 /* This function is used by pthread_cond_broadcast and
1418  * pthread_cond_signal to atomically decrement the counter
1419  * then wake-up 'counter' threads.
1420  */
1421 static int
__pthread_cond_pulse(pthread_cond_t * cond,int counter)1422 __pthread_cond_pulse(pthread_cond_t *cond, int  counter)
1423 {
1424     long flags;
1425 
1426     if (__unlikely(cond == NULL))
1427         return EINVAL;
1428 
1429     flags = (cond->value & ~COND_COUNTER_MASK);
1430     for (;;) {
1431         long oldval = cond->value;
1432         long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
1433                       | flags;
1434         if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
1435             break;
1436     }
1437 
1438     /*
1439      * Ensure that all memory accesses previously made by this thread are
1440      * visible to the woken thread(s).  On the other side, the "wait"
1441      * code will issue any necessary barriers when locking the mutex.
1442      *
1443      * This may not strictly be necessary -- if the caller follows
1444      * recommended practice and holds the mutex before signaling the cond
1445      * var, the mutex ops will provide correct semantics.  If they don't
1446      * hold the mutex, they're subject to race conditions anyway.
1447      */
1448     ANDROID_MEMBAR_FULL();
1449 
1450     __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
1451     return 0;
1452 }
1453 
pthread_cond_broadcast(pthread_cond_t * cond)1454 int pthread_cond_broadcast(pthread_cond_t *cond)
1455 {
1456     return __pthread_cond_pulse(cond, INT_MAX);
1457 }
1458 
pthread_cond_signal(pthread_cond_t * cond)1459 int pthread_cond_signal(pthread_cond_t *cond)
1460 {
1461     return __pthread_cond_pulse(cond, 1);
1462 }
1463 
pthread_cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)1464 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
1465 {
1466     return pthread_cond_timedwait(cond, mutex, NULL);
1467 }
1468 
__pthread_cond_timedwait_relative(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1469 int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
1470                                       pthread_mutex_t * mutex,
1471                                       const struct timespec *reltime)
1472 {
1473     int  status;
1474     int  oldvalue = cond->value;
1475 
1476     pthread_mutex_unlock(mutex);
1477     status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
1478     pthread_mutex_lock(mutex);
1479 
1480     if (status == (-ETIMEDOUT)) return ETIMEDOUT;
1481     return 0;
1482 }
1483 
__pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime,clockid_t clock)1484 int __pthread_cond_timedwait(pthread_cond_t *cond,
1485                              pthread_mutex_t * mutex,
1486                              const struct timespec *abstime,
1487                              clockid_t clock)
1488 {
1489     struct timespec ts;
1490     struct timespec * tsp;
1491 
1492     if (abstime != NULL) {
1493         if (__timespec_to_absolute(&ts, abstime, clock) < 0)
1494             return ETIMEDOUT;
1495         tsp = &ts;
1496     } else {
1497         tsp = NULL;
1498     }
1499 
1500     return __pthread_cond_timedwait_relative(cond, mutex, tsp);
1501 }
1502 
pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1503 int pthread_cond_timedwait(pthread_cond_t *cond,
1504                            pthread_mutex_t * mutex,
1505                            const struct timespec *abstime)
1506 {
1507     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
1508 }
1509 
1510 
1511 /* this one exists only for backward binary compatibility */
pthread_cond_timedwait_monotonic(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1512 int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
1513                                      pthread_mutex_t * mutex,
1514                                      const struct timespec *abstime)
1515 {
1516     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1517 }
1518 
pthread_cond_timedwait_monotonic_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1519 int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
1520                                      pthread_mutex_t * mutex,
1521                                      const struct timespec *abstime)
1522 {
1523     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1524 }
1525 
pthread_cond_timedwait_relative_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1526 int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1527                                       pthread_mutex_t * mutex,
1528                                       const struct timespec *reltime)
1529 {
1530     return __pthread_cond_timedwait_relative(cond, mutex, reltime);
1531 }
1532 
pthread_cond_timeout_np(pthread_cond_t * cond,pthread_mutex_t * mutex,unsigned msecs)1533 int pthread_cond_timeout_np(pthread_cond_t *cond,
1534                             pthread_mutex_t * mutex,
1535                             unsigned msecs)
1536 {
1537     struct timespec ts;
1538 
1539     ts.tv_sec = msecs / 1000;
1540     ts.tv_nsec = (msecs % 1000) * 1000000;
1541 
1542     return __pthread_cond_timedwait_relative(cond, mutex, &ts);
1543 }
1544 
1545 
1546 
1547 /* A technical note regarding our thread-local-storage (TLS) implementation:
1548  *
1549  * There can be up to TLSMAP_SIZE independent TLS keys in a given process,
1550  * though the first TLSMAP_START keys are reserved for Bionic to hold
1551  * special thread-specific variables like errno or a pointer to
1552  * the current thread's descriptor.
1553  *
1554  * while stored in the TLS area, these entries cannot be accessed through
1555  * pthread_getspecific() / pthread_setspecific() and pthread_key_delete()
1556  *
1557  * also, some entries in the key table are pre-allocated (see tlsmap_lock)
1558  * to greatly simplify and speedup some OpenGL-related operations. though the
1559  * initialy value will be NULL on all threads.
1560  *
1561  * you can use pthread_getspecific()/setspecific() on these, and in theory
1562  * you could also call pthread_key_delete() as well, though this would
1563  * probably break some apps.
1564  *
1565  * The 'tlsmap_t' type defined below implements a shared global map of
1566  * currently created/allocated TLS keys and the destructors associated
1567  * with them. You should use tlsmap_lock/unlock to access it to avoid
1568  * any race condition.
1569  *
1570  * the global TLS map simply contains a bitmap of allocated keys, and
1571  * an array of destructors.
1572  *
1573  * each thread has a TLS area that is a simple array of TLSMAP_SIZE void*
1574  * pointers. the TLS area of the main thread is stack-allocated in
1575  * __libc_init_common, while the TLS area of other threads is placed at
1576  * the top of their stack in pthread_create.
1577  *
1578  * when pthread_key_create() is called, it finds the first free key in the
1579  * bitmap, then set it to 1, saving the destructor altogether
1580  *
1581  * when pthread_key_delete() is called. it will erase the key's bitmap bit
1582  * and its destructor, and will also clear the key data in the TLS area of
1583  * all created threads. As mandated by Posix, it is the responsability of
1584  * the caller of pthread_key_delete() to properly reclaim the objects that
1585  * were pointed to by these data fields (either before or after the call).
1586  *
1587  */
1588 
1589 /* TLS Map implementation
1590  */
1591 
1592 #define TLSMAP_START      (TLS_SLOT_MAX_WELL_KNOWN+1)
1593 #define TLSMAP_SIZE       BIONIC_TLS_SLOTS
1594 #define TLSMAP_BITS       32
1595 #define TLSMAP_WORDS      ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS)
1596 #define TLSMAP_WORD(m,k)  (m)->map[(k)/TLSMAP_BITS]
1597 #define TLSMAP_MASK(k)    (1U << ((k)&(TLSMAP_BITS-1)))
1598 
1599 /* this macro is used to quickly check that a key belongs to a reasonable range */
1600 #define TLSMAP_VALIDATE_KEY(key)  \
1601     ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE)
1602 
1603 /* the type of tls key destructor functions */
1604 typedef void (*tls_dtor_t)(void*);
1605 
1606 typedef struct {
1607     int         init;                  /* see comment in tlsmap_lock() */
1608     uint32_t    map[TLSMAP_WORDS];     /* bitmap of allocated keys */
1609     tls_dtor_t  dtors[TLSMAP_SIZE];    /* key destructors */
1610 } tlsmap_t;
1611 
1612 static pthread_mutex_t  _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER;
1613 static tlsmap_t         _tlsmap;
1614 
1615 /* lock the global TLS map lock and return a handle to it */
tlsmap_lock(void)1616 static __inline__ tlsmap_t* tlsmap_lock(void)
1617 {
1618     tlsmap_t*   m = &_tlsmap;
1619 
1620     pthread_mutex_lock(&_tlsmap_lock);
1621     /* we need to initialize the first entry of the 'map' array
1622      * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically
1623      * when declaring _tlsmap is a bit awkward and is going to
1624      * produce warnings, so do it the first time we use the map
1625      * instead
1626      */
1627     if (__unlikely(!m->init)) {
1628         TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP;
1629         m->init          = 1;
1630     }
1631     return m;
1632 }
1633 
1634 /* unlock the global TLS map */
tlsmap_unlock(tlsmap_t * m)1635 static __inline__ void tlsmap_unlock(tlsmap_t*  m)
1636 {
1637     pthread_mutex_unlock(&_tlsmap_lock);
1638     (void)m;  /* a good compiler is a happy compiler */
1639 }
1640 
1641 /* test to see wether a key is allocated */
tlsmap_test(tlsmap_t * m,int key)1642 static __inline__ int tlsmap_test(tlsmap_t*  m, int  key)
1643 {
1644     return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0;
1645 }
1646 
1647 /* set the destructor and bit flag on a newly allocated key */
tlsmap_set(tlsmap_t * m,int key,tls_dtor_t dtor)1648 static __inline__ void tlsmap_set(tlsmap_t*  m, int  key, tls_dtor_t  dtor)
1649 {
1650     TLSMAP_WORD(m,key) |= TLSMAP_MASK(key);
1651     m->dtors[key]       = dtor;
1652 }
1653 
1654 /* clear the destructor and bit flag on an existing key */
tlsmap_clear(tlsmap_t * m,int key)1655 static __inline__ void  tlsmap_clear(tlsmap_t*  m, int  key)
1656 {
1657     TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key);
1658     m->dtors[key]       = NULL;
1659 }
1660 
1661 /* allocate a new TLS key, return -1 if no room left */
tlsmap_alloc(tlsmap_t * m,tls_dtor_t dtor)1662 static int tlsmap_alloc(tlsmap_t*  m, tls_dtor_t  dtor)
1663 {
1664     int  key;
1665 
1666     for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) {
1667         if ( !tlsmap_test(m, key) ) {
1668             tlsmap_set(m, key, dtor);
1669             return key;
1670         }
1671     }
1672     return -1;
1673 }
1674 
1675 
pthread_key_create(pthread_key_t * key,void (* destructor_function)(void *))1676 int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *))
1677 {
1678     uint32_t   err = ENOMEM;
1679     tlsmap_t*  map = tlsmap_lock();
1680     int        k   = tlsmap_alloc(map, destructor_function);
1681 
1682     if (k >= 0) {
1683         *key = k;
1684         err  = 0;
1685     }
1686     tlsmap_unlock(map);
1687     return err;
1688 }
1689 
1690 
1691 /* This deletes a pthread_key_t. note that the standard mandates that this does
1692  * not call the destructor of non-NULL key values. Instead, it is the
1693  * responsability of the caller to properly dispose of the corresponding data
1694  * and resources, using any mean it finds suitable.
1695  *
1696  * On the other hand, this function will clear the corresponding key data
1697  * values in all known threads. this prevents later (invalid) calls to
1698  * pthread_getspecific() to receive invalid/stale values.
1699  */
pthread_key_delete(pthread_key_t key)1700 int pthread_key_delete(pthread_key_t key)
1701 {
1702     uint32_t             err;
1703     pthread_internal_t*  thr;
1704     tlsmap_t*            map;
1705 
1706     if (!TLSMAP_VALIDATE_KEY(key)) {
1707         return EINVAL;
1708     }
1709 
1710     map = tlsmap_lock();
1711 
1712     if (!tlsmap_test(map, key)) {
1713         err = EINVAL;
1714         goto err1;
1715     }
1716 
1717     /* clear value in all threads */
1718     pthread_mutex_lock(&gThreadListLock);
1719     for ( thr = gThreadList; thr != NULL; thr = thr->next ) {
1720         /* avoid zombie threads with a negative 'join_count'. these are really
1721          * already dead and don't have a TLS area anymore.
1722          *
1723          * similarly, it is possible to have thr->tls == NULL for threads that
1724          * were just recently created through pthread_create() but whose
1725          * startup trampoline (__thread_entry) hasn't been run yet by the
1726          * scheduler. so check for this too.
1727          */
1728         if (thr->join_count < 0 || !thr->tls)
1729             continue;
1730 
1731         thr->tls[key] = NULL;
1732     }
1733     tlsmap_clear(map, key);
1734 
1735     pthread_mutex_unlock(&gThreadListLock);
1736     err = 0;
1737 
1738 err1:
1739     tlsmap_unlock(map);
1740     return err;
1741 }
1742 
1743 
pthread_setspecific(pthread_key_t key,const void * ptr)1744 int pthread_setspecific(pthread_key_t key, const void *ptr)
1745 {
1746     int        err = EINVAL;
1747     tlsmap_t*  map;
1748 
1749     if (TLSMAP_VALIDATE_KEY(key)) {
1750         /* check that we're trying to set data for an allocated key */
1751         map = tlsmap_lock();
1752         if (tlsmap_test(map, key)) {
1753             ((uint32_t *)__get_tls())[key] = (uint32_t)ptr;
1754             err = 0;
1755         }
1756         tlsmap_unlock(map);
1757     }
1758     return err;
1759 }
1760 
pthread_getspecific(pthread_key_t key)1761 void * pthread_getspecific(pthread_key_t key)
1762 {
1763     if (!TLSMAP_VALIDATE_KEY(key)) {
1764         return NULL;
1765     }
1766 
1767     /* for performance reason, we do not lock/unlock the global TLS map
1768      * to check that the key is properly allocated. if the key was not
1769      * allocated, the value read from the TLS should always be NULL
1770      * due to pthread_key_delete() clearing the values for all threads.
1771      */
1772     return (void *)(((unsigned *)__get_tls())[key]);
1773 }
1774 
1775 /* Posix mandates that this be defined in <limits.h> but we don't have
1776  * it just yet.
1777  */
1778 #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
1779 #  define PTHREAD_DESTRUCTOR_ITERATIONS  4
1780 #endif
1781 
1782 /* this function is called from pthread_exit() to remove all TLS key data
1783  * from this thread's TLS area. this must call the destructor of all keys
1784  * that have a non-NULL data value (and a non-NULL destructor).
1785  *
1786  * because destructors can do funky things like deleting/creating other
1787  * keys, we need to implement this in a loop
1788  */
pthread_key_clean_all(void)1789 static void pthread_key_clean_all(void)
1790 {
1791     tlsmap_t*    map;
1792     void**       tls = (void**)__get_tls();
1793     int          rounds = PTHREAD_DESTRUCTOR_ITERATIONS;
1794 
1795     map = tlsmap_lock();
1796 
1797     for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--)
1798     {
1799         int  kk, count = 0;
1800 
1801         for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) {
1802             if ( tlsmap_test(map, kk) )
1803             {
1804                 void*       data = tls[kk];
1805                 tls_dtor_t  dtor = map->dtors[kk];
1806 
1807                 if (data != NULL && dtor != NULL)
1808                 {
1809                    /* we need to clear the key data now, this will prevent the
1810                     * destructor (or a later one) from seeing the old value if
1811                     * it calls pthread_getspecific() for some odd reason
1812                     *
1813                     * we do not do this if 'dtor == NULL' just in case another
1814                     * destructor function might be responsible for manually
1815                     * releasing the corresponding data.
1816                     */
1817                     tls[kk] = NULL;
1818 
1819                    /* because the destructor is free to call pthread_key_create
1820                     * and/or pthread_key_delete, we need to temporarily unlock
1821                     * the TLS map
1822                     */
1823                     tlsmap_unlock(map);
1824                     (*dtor)(data);
1825                     map = tlsmap_lock();
1826 
1827                     count += 1;
1828                 }
1829             }
1830         }
1831 
1832         /* if we didn't call any destructor, there is no need to check the
1833          * TLS data again
1834          */
1835         if (count == 0)
1836             break;
1837     }
1838     tlsmap_unlock(map);
1839 }
1840 
1841 // man says this should be in <linux/unistd.h>, but it isn't
1842 extern int tkill(int tid, int sig);
1843 
pthread_kill(pthread_t tid,int sig)1844 int pthread_kill(pthread_t tid, int sig)
1845 {
1846     int  ret;
1847     int  old_errno = errno;
1848     pthread_internal_t * thread = (pthread_internal_t *)tid;
1849 
1850     ret = tkill(thread->kernel_id, sig);
1851     if (ret < 0) {
1852         ret = errno;
1853         errno = old_errno;
1854     }
1855 
1856     return ret;
1857 }
1858 
1859 extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
1860 
pthread_sigmask(int how,const sigset_t * set,sigset_t * oset)1861 int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
1862 {
1863     /* pthread_sigmask must return the error code, but the syscall
1864      * will set errno instead and return 0/-1
1865      */
1866     int ret, old_errno = errno;
1867 
1868     /* Use NSIG which corresponds to the number of signals in
1869      * our 32-bit sigset_t implementation. As such, this function, or
1870      * anything that deals with sigset_t cannot manage real-time signals
1871      * (signo >= 32). We might want to introduce sigset_rt_t as an
1872      * extension to do so in the future.
1873      */
1874     ret = __rt_sigprocmask(how, set, oset, NSIG / 8);
1875     if (ret < 0)
1876         ret = errno;
1877 
1878     errno = old_errno;
1879     return ret;
1880 }
1881 
1882 
pthread_getcpuclockid(pthread_t tid,clockid_t * clockid)1883 int pthread_getcpuclockid(pthread_t  tid, clockid_t  *clockid)
1884 {
1885     const int            CLOCK_IDTYPE_BITS = 3;
1886     pthread_internal_t*  thread = (pthread_internal_t*)tid;
1887 
1888     if (!thread)
1889         return ESRCH;
1890 
1891     *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
1892     return 0;
1893 }
1894 
1895 
1896 /* NOTE: this implementation doesn't support a init function that throws a C++ exception
1897  *       or calls fork()
1898  */
pthread_once(pthread_once_t * once_control,void (* init_routine)(void))1899 int  pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
1900 {
1901     static pthread_mutex_t   once_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
1902     volatile pthread_once_t* ocptr = once_control;
1903 
1904     pthread_once_t tmp = *ocptr;
1905     ANDROID_MEMBAR_FULL();
1906     if (tmp == PTHREAD_ONCE_INIT) {
1907         pthread_mutex_lock( &once_lock );
1908         if (*ocptr == PTHREAD_ONCE_INIT) {
1909             (*init_routine)();
1910             ANDROID_MEMBAR_FULL();
1911             *ocptr = ~PTHREAD_ONCE_INIT;
1912         }
1913         pthread_mutex_unlock( &once_lock );
1914     }
1915     return 0;
1916 }
1917 
1918 /* This value is not exported by kernel headers, so hardcode it here */
1919 #define MAX_TASK_COMM_LEN	16
1920 #define TASK_COMM_FMT 		"/proc/self/task/%u/comm"
1921 
pthread_setname_np(pthread_t thid,const char * thname)1922 int pthread_setname_np(pthread_t thid, const char *thname)
1923 {
1924     size_t thname_len;
1925     int saved_errno, ret;
1926 
1927     if (thid == 0 || thname == NULL)
1928         return EINVAL;
1929 
1930     thname_len = strlen(thname);
1931     if (thname_len >= MAX_TASK_COMM_LEN)
1932         return ERANGE;
1933 
1934     saved_errno = errno;
1935     if (thid == pthread_self())
1936     {
1937         ret = prctl(PR_SET_NAME, (unsigned long)thname, 0, 0, 0) ? errno : 0;
1938     }
1939     else
1940     {
1941         /* Have to change another thread's name */
1942         pthread_internal_t *thread = (pthread_internal_t *)thid;
1943         char comm_name[sizeof(TASK_COMM_FMT) + 8];
1944         ssize_t n;
1945         int fd;
1946 
1947         snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, (unsigned int)thread->kernel_id);
1948         fd = open(comm_name, O_RDWR);
1949         if (fd == -1)
1950         {
1951             ret = errno;
1952             goto exit;
1953         }
1954         n = TEMP_FAILURE_RETRY(write(fd, thname, thname_len));
1955         close(fd);
1956 
1957         if (n < 0)
1958             ret = errno;
1959         else if ((size_t)n != thname_len)
1960             ret = EIO;
1961         else
1962             ret = 0;
1963     }
1964 exit:
1965     errno = saved_errno;
1966     return ret;
1967 }
1968 
1969 /* Return the kernel thread ID for a pthread.
1970  * This is only defined for implementations where pthread <-> kernel is 1:1, which this is.
1971  * Not the same as pthread_getthreadid_np, which is commonly defined to be opaque.
1972  * Internal, not an NDK API.
1973  */
1974 
__pthread_gettid(pthread_t thid)1975 pid_t __pthread_gettid(pthread_t thid)
1976 {
1977     pthread_internal_t* thread = (pthread_internal_t*)thid;
1978     return thread->kernel_id;
1979 }
1980