• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/types.h>
29 #include <unistd.h>
30 #include <signal.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <errno.h>
35 #include <sys/atomics.h>
36 #include <bionic_tls.h>
37 #include <sys/mman.h>
38 #include <pthread.h>
39 #include <time.h>
40 #include "pthread_internal.h"
41 #include "thread_private.h"
42 #include <limits.h>
43 #include <memory.h>
44 #include <assert.h>
45 #include <malloc.h>
46 #include <bionic_futex.h>
47 #include <bionic_atomic_inline.h>
48 #include <sys/prctl.h>
49 #include <sys/stat.h>
50 #include <fcntl.h>
51 #include <stdio.h>
52 
53 extern int  __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
54 extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
55 extern void _exit_thread(int  retCode);
56 extern int  __set_errno(int);
57 
__futex_wake_ex(volatile void * ftx,int pshared,int val)58 int  __futex_wake_ex(volatile void *ftx, int pshared, int val)
59 {
60     return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
61 }
62 
__futex_wait_ex(volatile void * ftx,int pshared,int val,const struct timespec * timeout)63 int  __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
64 {
65     return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
66 }
67 
68 #define  __likely(cond)    __builtin_expect(!!(cond), 1)
69 #define  __unlikely(cond)  __builtin_expect(!!(cond), 0)
70 
71 void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
72 
73 #define PTHREAD_ATTR_FLAG_DETACHED      0x00000001
74 #define PTHREAD_ATTR_FLAG_USER_STACK    0x00000002
75 
76 #define DEFAULT_STACKSIZE (1024 * 1024)
77 #define STACKBASE 0x10000000
78 
79 static uint8_t * gStackBase = (uint8_t *)STACKBASE;
80 
81 static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER;
82 
83 
84 static const pthread_attr_t gDefaultPthreadAttr = {
85     .flags = 0,
86     .stack_base = NULL,
87     .stack_size = DEFAULT_STACKSIZE,
88     .guard_size = PAGE_SIZE,
89     .sched_policy = SCHED_NORMAL,
90     .sched_priority = 0
91 };
92 
93 #define  INIT_THREADS  1
94 
95 static pthread_internal_t*  gThreadList = NULL;
96 static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
97 static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
98 
99 
100 /* we simply malloc/free the internal pthread_internal_t structures. we may
101  * want to use a different allocation scheme in the future, but this one should
102  * be largely enough
103  */
104 static pthread_internal_t*
_pthread_internal_alloc(void)105 _pthread_internal_alloc(void)
106 {
107     pthread_internal_t*   thread;
108 
109     thread = calloc( sizeof(*thread), 1 );
110     if (thread)
111         thread->intern = 1;
112 
113     return thread;
114 }
115 
116 static void
_pthread_internal_free(pthread_internal_t * thread)117 _pthread_internal_free( pthread_internal_t*  thread )
118 {
119     if (thread && thread->intern) {
120         thread->intern = 0;  /* just in case */
121         free (thread);
122     }
123 }
124 
125 
126 static void
_pthread_internal_remove_locked(pthread_internal_t * thread)127 _pthread_internal_remove_locked( pthread_internal_t*  thread )
128 {
129     thread->next->pref = thread->pref;
130     thread->pref[0]    = thread->next;
131 }
132 
133 static void
_pthread_internal_remove(pthread_internal_t * thread)134 _pthread_internal_remove( pthread_internal_t*  thread )
135 {
136     pthread_mutex_lock(&gThreadListLock);
137     _pthread_internal_remove_locked(thread);
138     pthread_mutex_unlock(&gThreadListLock);
139 }
140 
141 static void
_pthread_internal_add(pthread_internal_t * thread)142 _pthread_internal_add( pthread_internal_t*  thread )
143 {
144     pthread_mutex_lock(&gThreadListLock);
145     thread->pref = &gThreadList;
146     thread->next = thread->pref[0];
147     if (thread->next)
148         thread->next->pref = &thread->next;
149     thread->pref[0] = thread;
150     pthread_mutex_unlock(&gThreadListLock);
151 }
152 
153 pthread_internal_t*
__get_thread(void)154 __get_thread(void)
155 {
156     void**  tls = (void**)__get_tls();
157 
158     return  (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
159 }
160 
161 
162 void*
__get_stack_base(int * p_stack_size)163 __get_stack_base(int  *p_stack_size)
164 {
165     pthread_internal_t*  thread = __get_thread();
166 
167     *p_stack_size = thread->attr.stack_size;
168     return thread->attr.stack_base;
169 }
170 
171 
__init_tls(void ** tls,void * thread)172 void  __init_tls(void**  tls, void*  thread)
173 {
174     int  nn;
175 
176     ((pthread_internal_t*)thread)->tls = tls;
177 
178     // slot 0 must point to the tls area, this is required by the implementation
179     // of the x86 Linux kernel thread-local-storage
180     tls[TLS_SLOT_SELF]      = (void*)tls;
181     tls[TLS_SLOT_THREAD_ID] = thread;
182     for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++)
183        tls[nn] = 0;
184 
185     __set_tls( (void*)tls );
186 }
187 
188 
189 /*
190  * This trampoline is called from the assembly clone() function
191  */
__thread_entry(int (* func)(void *),void * arg,void ** tls)192 void __thread_entry(int (*func)(void*), void *arg, void **tls)
193 {
194     int retValue;
195     pthread_internal_t * thrInfo;
196 
197     // Wait for our creating thread to release us. This lets it have time to
198     // notify gdb about this thread before it starts doing anything.
199     pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF];
200     pthread_mutex_lock(start_mutex);
201     pthread_mutex_destroy(start_mutex);
202 
203     thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID];
204 
205     __init_tls( tls, thrInfo );
206 
207     pthread_exit( (void*)func(arg) );
208 }
209 
_init_thread(pthread_internal_t * thread,pid_t kernel_id,pthread_attr_t * attr,void * stack_base)210 void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base)
211 {
212     if (attr == NULL) {
213         thread->attr = gDefaultPthreadAttr;
214     } else {
215         thread->attr = *attr;
216     }
217     thread->attr.stack_base = stack_base;
218     thread->kernel_id       = kernel_id;
219 
220     // set the scheduling policy/priority of the thread
221     if (thread->attr.sched_policy != SCHED_NORMAL) {
222         struct sched_param param;
223         param.sched_priority = thread->attr.sched_priority;
224         sched_setscheduler(kernel_id, thread->attr.sched_policy, &param);
225     }
226 
227     pthread_cond_init(&thread->join_cond, NULL);
228     thread->join_count = 0;
229 
230     thread->cleanup_stack = NULL;
231 
232     _pthread_internal_add(thread);
233 }
234 
235 
236 /* XXX stacks not reclaimed if thread spawn fails */
237 /* XXX stacks address spaces should be reused if available again */
238 
mkstack(size_t size,size_t guard_size)239 static void *mkstack(size_t size, size_t guard_size)
240 {
241     void * stack;
242 
243     pthread_mutex_lock(&mmap_lock);
244 
245     stack = mmap((void *)gStackBase, size,
246                  PROT_READ | PROT_WRITE,
247                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
248                  -1, 0);
249 
250     if(stack == MAP_FAILED) {
251         stack = NULL;
252         goto done;
253     }
254 
255     if(mprotect(stack, guard_size, PROT_NONE)){
256         munmap(stack, size);
257         stack = NULL;
258         goto done;
259     }
260 
261 done:
262     pthread_mutex_unlock(&mmap_lock);
263     return stack;
264 }
265 
266 /*
267  * Create a new thread. The thread's stack is layed out like so:
268  *
269  * +---------------------------+
270  * |     pthread_internal_t    |
271  * +---------------------------+
272  * |                           |
273  * |          TLS area         |
274  * |                           |
275  * +---------------------------+
276  * |                           |
277  * .                           .
278  * .         stack area        .
279  * .                           .
280  * |                           |
281  * +---------------------------+
282  * |         guard page        |
283  * +---------------------------+
284  *
285  *  note that TLS[0] must be a pointer to itself, this is required
286  *  by the thread-local storage implementation of the x86 Linux
287  *  kernel, where the TLS pointer is read by reading fs:[0]
288  */
pthread_create(pthread_t * thread_out,pthread_attr_t const * attr,void * (* start_routine)(void *),void * arg)289 int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr,
290                    void *(*start_routine)(void *), void * arg)
291 {
292     char*   stack;
293     void**  tls;
294     int tid;
295     pthread_mutex_t * start_mutex;
296     pthread_internal_t * thread;
297     int                  madestack = 0;
298     int     old_errno = errno;
299 
300     /* this will inform the rest of the C library that at least one thread
301      * was created. this will enforce certain functions to acquire/release
302      * locks (e.g. atexit()) to protect shared global structures.
303      *
304      * this works because pthread_create() is not called by the C library
305      * initialization routine that sets up the main thread's data structures.
306      */
307     __isthreaded = 1;
308 
309     thread = _pthread_internal_alloc();
310     if (thread == NULL)
311         return ENOMEM;
312 
313     if (attr == NULL) {
314         attr = &gDefaultPthreadAttr;
315     }
316 
317     // make sure the stack is PAGE_SIZE aligned
318     size_t stackSize = (attr->stack_size +
319                         (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
320 
321     if (!attr->stack_base) {
322         stack = mkstack(stackSize, attr->guard_size);
323         if(stack == NULL) {
324             _pthread_internal_free(thread);
325             return ENOMEM;
326         }
327         madestack = 1;
328     } else {
329         stack = attr->stack_base;
330     }
331 
332     // Make room for TLS
333     tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*));
334 
335     // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep
336     // it from doing anything until after we notify the debugger about it
337     start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF];
338     pthread_mutex_init(start_mutex, NULL);
339     pthread_mutex_lock(start_mutex);
340 
341     tls[TLS_SLOT_THREAD_ID] = thread;
342 
343     tid = __pthread_clone((int(*)(void*))start_routine, tls,
344                 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND
345                 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED,
346                 arg);
347 
348     if(tid < 0) {
349         int  result;
350         if (madestack)
351             munmap(stack, stackSize);
352         _pthread_internal_free(thread);
353         result = errno;
354         errno = old_errno;
355         return result;
356     }
357 
358     _init_thread(thread, tid, (pthread_attr_t*)attr, stack);
359 
360     if (!madestack)
361         thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK;
362 
363     // Notify any debuggers about the new thread
364     pthread_mutex_lock(&gDebuggerNotificationLock);
365     _thread_created_hook(tid);
366     pthread_mutex_unlock(&gDebuggerNotificationLock);
367 
368     // Let the thread do it's thing
369     pthread_mutex_unlock(start_mutex);
370 
371     *thread_out = (pthread_t)thread;
372     return 0;
373 }
374 
375 
pthread_attr_init(pthread_attr_t * attr)376 int pthread_attr_init(pthread_attr_t * attr)
377 {
378     *attr = gDefaultPthreadAttr;
379     return 0;
380 }
381 
pthread_attr_destroy(pthread_attr_t * attr)382 int pthread_attr_destroy(pthread_attr_t * attr)
383 {
384     memset(attr, 0x42, sizeof(pthread_attr_t));
385     return 0;
386 }
387 
pthread_attr_setdetachstate(pthread_attr_t * attr,int state)388 int pthread_attr_setdetachstate(pthread_attr_t * attr, int state)
389 {
390     if (state == PTHREAD_CREATE_DETACHED) {
391         attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
392     } else if (state == PTHREAD_CREATE_JOINABLE) {
393         attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
394     } else {
395         return EINVAL;
396     }
397     return 0;
398 }
399 
pthread_attr_getdetachstate(pthread_attr_t const * attr,int * state)400 int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state)
401 {
402     *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED)
403            ? PTHREAD_CREATE_DETACHED
404            : PTHREAD_CREATE_JOINABLE;
405     return 0;
406 }
407 
pthread_attr_setschedpolicy(pthread_attr_t * attr,int policy)408 int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy)
409 {
410     attr->sched_policy = policy;
411     return 0;
412 }
413 
pthread_attr_getschedpolicy(pthread_attr_t const * attr,int * policy)414 int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy)
415 {
416     *policy = attr->sched_policy;
417     return 0;
418 }
419 
pthread_attr_setschedparam(pthread_attr_t * attr,struct sched_param const * param)420 int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param)
421 {
422     attr->sched_priority = param->sched_priority;
423     return 0;
424 }
425 
pthread_attr_getschedparam(pthread_attr_t const * attr,struct sched_param * param)426 int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param)
427 {
428     param->sched_priority = attr->sched_priority;
429     return 0;
430 }
431 
pthread_attr_setstacksize(pthread_attr_t * attr,size_t stack_size)432 int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size)
433 {
434     if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
435         return EINVAL;
436     }
437     attr->stack_size = stack_size;
438     return 0;
439 }
440 
pthread_attr_getstacksize(pthread_attr_t const * attr,size_t * stack_size)441 int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size)
442 {
443     *stack_size = attr->stack_size;
444     return 0;
445 }
446 
pthread_attr_setstackaddr(pthread_attr_t * attr,void * stack_addr)447 int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr)
448 {
449 #if 1
450     // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now.
451     return ENOSYS;
452 #else
453     if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) {
454         return EINVAL;
455     }
456     attr->stack_base = stack_addr;
457     return 0;
458 #endif
459 }
460 
pthread_attr_getstackaddr(pthread_attr_t const * attr,void ** stack_addr)461 int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr)
462 {
463     *stack_addr = (char*)attr->stack_base + attr->stack_size;
464     return 0;
465 }
466 
pthread_attr_setstack(pthread_attr_t * attr,void * stack_base,size_t stack_size)467 int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size)
468 {
469     if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
470         return EINVAL;
471     }
472     if ((uint32_t)stack_base & (PAGE_SIZE - 1)) {
473         return EINVAL;
474     }
475     attr->stack_base = stack_base;
476     attr->stack_size = stack_size;
477     return 0;
478 }
479 
pthread_attr_getstack(pthread_attr_t const * attr,void ** stack_base,size_t * stack_size)480 int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size)
481 {
482     *stack_base = attr->stack_base;
483     *stack_size = attr->stack_size;
484     return 0;
485 }
486 
pthread_attr_setguardsize(pthread_attr_t * attr,size_t guard_size)487 int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size)
488 {
489     if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) {
490         return EINVAL;
491     }
492 
493     attr->guard_size = guard_size;
494     return 0;
495 }
496 
pthread_attr_getguardsize(pthread_attr_t const * attr,size_t * guard_size)497 int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size)
498 {
499     *guard_size = attr->guard_size;
500     return 0;
501 }
502 
pthread_getattr_np(pthread_t thid,pthread_attr_t * attr)503 int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr)
504 {
505     pthread_internal_t * thread = (pthread_internal_t *)thid;
506     *attr = thread->attr;
507     return 0;
508 }
509 
pthread_attr_setscope(pthread_attr_t * attr,int scope)510 int pthread_attr_setscope(pthread_attr_t *attr, int  scope)
511 {
512     if (scope == PTHREAD_SCOPE_SYSTEM)
513         return 0;
514     if (scope == PTHREAD_SCOPE_PROCESS)
515         return ENOTSUP;
516 
517     return EINVAL;
518 }
519 
pthread_attr_getscope(pthread_attr_t const * attr)520 int pthread_attr_getscope(pthread_attr_t const *attr)
521 {
522     return PTHREAD_SCOPE_SYSTEM;
523 }
524 
525 
526 /* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
527  *         and thread cancelation
528  */
529 
__pthread_cleanup_push(__pthread_cleanup_t * c,__pthread_cleanup_func_t routine,void * arg)530 void __pthread_cleanup_push( __pthread_cleanup_t*      c,
531                              __pthread_cleanup_func_t  routine,
532                              void*                     arg )
533 {
534     pthread_internal_t*  thread = __get_thread();
535 
536     c->__cleanup_routine  = routine;
537     c->__cleanup_arg      = arg;
538     c->__cleanup_prev     = thread->cleanup_stack;
539     thread->cleanup_stack = c;
540 }
541 
__pthread_cleanup_pop(__pthread_cleanup_t * c,int execute)542 void __pthread_cleanup_pop( __pthread_cleanup_t*  c, int  execute )
543 {
544     pthread_internal_t*  thread = __get_thread();
545 
546     thread->cleanup_stack = c->__cleanup_prev;
547     if (execute)
548         c->__cleanup_routine(c->__cleanup_arg);
549 }
550 
551 /* used by pthread_exit() to clean all TLS keys of the current thread */
552 static void pthread_key_clean_all(void);
553 
pthread_exit(void * retval)554 void pthread_exit(void * retval)
555 {
556     pthread_internal_t*  thread     = __get_thread();
557     void*                stack_base = thread->attr.stack_base;
558     int                  stack_size = thread->attr.stack_size;
559     int                  user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
560 
561     // call the cleanup handlers first
562     while (thread->cleanup_stack) {
563         __pthread_cleanup_t*  c = thread->cleanup_stack;
564         thread->cleanup_stack   = c->__cleanup_prev;
565         c->__cleanup_routine(c->__cleanup_arg);
566     }
567 
568     // call the TLS destructors, it is important to do that before removing this
569     // thread from the global list. this will ensure that if someone else deletes
570     // a TLS key, the corresponding value will be set to NULL in this thread's TLS
571     // space (see pthread_key_delete)
572     pthread_key_clean_all();
573 
574     // if the thread is detached, destroy the pthread_internal_t
575     // otherwise, keep it in memory and signal any joiners
576     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
577         _pthread_internal_remove(thread);
578         _pthread_internal_free(thread);
579     } else {
580        /* the join_count field is used to store the number of threads waiting for
581         * the termination of this thread with pthread_join(),
582         *
583         * if it is positive we need to signal the waiters, and we do not touch
584         * the count (it will be decremented by the waiters, the last one will
585         * also remove/free the thread structure
586         *
587         * if it is zero, we set the count value to -1 to indicate that the
588         * thread is in 'zombie' state: it has stopped executing, and its stack
589         * is gone (as well as its TLS area). when another thread calls pthread_join()
590         * on it, it will immediately free the thread and return.
591         */
592         pthread_mutex_lock(&gThreadListLock);
593         thread->return_value = retval;
594         if (thread->join_count > 0) {
595             pthread_cond_broadcast(&thread->join_cond);
596         } else {
597             thread->join_count = -1;  /* zombie thread */
598         }
599         pthread_mutex_unlock(&gThreadListLock);
600     }
601 
602     // destroy the thread stack
603     if (user_stack)
604         _exit_thread((int)retval);
605     else
606         _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
607 }
608 
pthread_join(pthread_t thid,void ** ret_val)609 int pthread_join(pthread_t thid, void ** ret_val)
610 {
611     pthread_internal_t*  thread = (pthread_internal_t*)thid;
612     int                  count;
613 
614     // check that the thread still exists and is not detached
615     pthread_mutex_lock(&gThreadListLock);
616 
617     for (thread = gThreadList; thread != NULL; thread = thread->next)
618         if (thread == (pthread_internal_t*)thid)
619             goto FoundIt;
620 
621     pthread_mutex_unlock(&gThreadListLock);
622     return ESRCH;
623 
624 FoundIt:
625     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
626         pthread_mutex_unlock(&gThreadListLock);
627         return EINVAL;
628     }
629 
630    /* wait for thread death when needed
631     *
632     * if the 'join_count' is negative, this is a 'zombie' thread that
633     * is already dead and without stack/TLS
634     *
635     * otherwise, we need to increment 'join-count' and wait to be signaled
636     */
637    count = thread->join_count;
638     if (count >= 0) {
639         thread->join_count += 1;
640         pthread_cond_wait( &thread->join_cond, &gThreadListLock );
641         count = --thread->join_count;
642     }
643     if (ret_val)
644         *ret_val = thread->return_value;
645 
646     /* remove thread descriptor when we're the last joiner or when the
647      * thread was already a zombie.
648      */
649     if (count <= 0) {
650         _pthread_internal_remove_locked(thread);
651         _pthread_internal_free(thread);
652     }
653     pthread_mutex_unlock(&gThreadListLock);
654     return 0;
655 }
656 
pthread_detach(pthread_t thid)657 int  pthread_detach( pthread_t  thid )
658 {
659     pthread_internal_t*  thread;
660     int                  result = 0;
661     int                  flags;
662 
663     pthread_mutex_lock(&gThreadListLock);
664     for (thread = gThreadList; thread != NULL; thread = thread->next)
665         if (thread == (pthread_internal_t*)thid)
666             goto FoundIt;
667 
668     result = ESRCH;
669     goto Exit;
670 
671 FoundIt:
672     do {
673         flags = thread->attr.flags;
674 
675         if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) {
676             /* thread is not joinable ! */
677             result = EINVAL;
678             goto Exit;
679         }
680     }
681     while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
682                               (volatile int*)&thread->attr.flags ) != 0 );
683 Exit:
684     pthread_mutex_unlock(&gThreadListLock);
685     return result;
686 }
687 
pthread_self(void)688 pthread_t pthread_self(void)
689 {
690     return (pthread_t)__get_thread();
691 }
692 
pthread_equal(pthread_t one,pthread_t two)693 int pthread_equal(pthread_t one, pthread_t two)
694 {
695     return (one == two ? 1 : 0);
696 }
697 
pthread_getschedparam(pthread_t thid,int * policy,struct sched_param * param)698 int pthread_getschedparam(pthread_t thid, int * policy,
699                           struct sched_param * param)
700 {
701     int  old_errno = errno;
702 
703     pthread_internal_t * thread = (pthread_internal_t *)thid;
704     int err = sched_getparam(thread->kernel_id, param);
705     if (!err) {
706         *policy = sched_getscheduler(thread->kernel_id);
707     } else {
708         err = errno;
709         errno = old_errno;
710     }
711     return err;
712 }
713 
pthread_setschedparam(pthread_t thid,int policy,struct sched_param const * param)714 int pthread_setschedparam(pthread_t thid, int policy,
715                           struct sched_param const * param)
716 {
717     pthread_internal_t * thread = (pthread_internal_t *)thid;
718     int                  old_errno = errno;
719     int                  ret;
720 
721     ret = sched_setscheduler(thread->kernel_id, policy, param);
722     if (ret < 0) {
723         ret = errno;
724         errno = old_errno;
725     }
726     return ret;
727 }
728 
729 
730 // mutex lock states
731 //
732 // 0: unlocked
733 // 1: locked, no waiters
734 // 2: locked, maybe waiters
735 
736 /* a mutex is implemented as a 32-bit integer holding the following fields
737  *
738  * bits:     name     description
739  * 31-16     tid      owner thread's kernel id (recursive and errorcheck only)
740  * 15-14     type     mutex type
741  * 13        shared   process-shared flag
742  * 12-2      counter  counter of recursive mutexes
743  * 1-0       state    lock state (0, 1 or 2)
744  */
745 
746 
747 #define  MUTEX_OWNER(m)  (((m)->value >> 16) & 0xffff)
748 #define  MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff)
749 
750 #define  MUTEX_TYPE_MASK       0xc000
751 #define  MUTEX_TYPE_NORMAL     0x0000
752 #define  MUTEX_TYPE_RECURSIVE  0x4000
753 #define  MUTEX_TYPE_ERRORCHECK 0x8000
754 
755 #define  MUTEX_COUNTER_SHIFT  2
756 #define  MUTEX_COUNTER_MASK   0x1ffc
757 #define  MUTEX_SHARED_MASK    0x2000
758 
759 /* a mutex attribute holds the following fields
760  *
761  * bits:     name       description
762  * 0-3       type       type of mutex
763  * 4         shared     process-shared flag
764  */
765 #define  MUTEXATTR_TYPE_MASK   0x000f
766 #define  MUTEXATTR_SHARED_MASK 0x0010
767 
768 
pthread_mutexattr_init(pthread_mutexattr_t * attr)769 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
770 {
771     if (attr) {
772         *attr = PTHREAD_MUTEX_DEFAULT;
773         return 0;
774     } else {
775         return EINVAL;
776     }
777 }
778 
pthread_mutexattr_destroy(pthread_mutexattr_t * attr)779 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
780 {
781     if (attr) {
782         *attr = -1;
783         return 0;
784     } else {
785         return EINVAL;
786     }
787 }
788 
pthread_mutexattr_gettype(const pthread_mutexattr_t * attr,int * type)789 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
790 {
791     if (attr) {
792         int  atype = (*attr & MUTEXATTR_TYPE_MASK);
793 
794          if (atype >= PTHREAD_MUTEX_NORMAL &&
795              atype <= PTHREAD_MUTEX_ERRORCHECK) {
796             *type = atype;
797             return 0;
798         }
799     }
800     return EINVAL;
801 }
802 
pthread_mutexattr_settype(pthread_mutexattr_t * attr,int type)803 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
804 {
805     if (attr && type >= PTHREAD_MUTEX_NORMAL &&
806                 type <= PTHREAD_MUTEX_ERRORCHECK ) {
807         *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
808         return 0;
809     }
810     return EINVAL;
811 }
812 
813 /* process-shared mutexes are not supported at the moment */
814 
pthread_mutexattr_setpshared(pthread_mutexattr_t * attr,int pshared)815 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
816 {
817     if (!attr)
818         return EINVAL;
819 
820     switch (pshared) {
821     case PTHREAD_PROCESS_PRIVATE:
822         *attr &= ~MUTEXATTR_SHARED_MASK;
823         return 0;
824 
825     case PTHREAD_PROCESS_SHARED:
826         /* our current implementation of pthread actually supports shared
827          * mutexes but won't cleanup if a process dies with the mutex held.
828          * Nevertheless, it's better than nothing. Shared mutexes are used
829          * by surfaceflinger and audioflinger.
830          */
831         *attr |= MUTEXATTR_SHARED_MASK;
832         return 0;
833     }
834     return EINVAL;
835 }
836 
pthread_mutexattr_getpshared(pthread_mutexattr_t * attr,int * pshared)837 int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
838 {
839     if (!attr || !pshared)
840         return EINVAL;
841 
842     *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
843                                                : PTHREAD_PROCESS_PRIVATE;
844     return 0;
845 }
846 
pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * attr)847 int pthread_mutex_init(pthread_mutex_t *mutex,
848                        const pthread_mutexattr_t *attr)
849 {
850     int value = 0;
851 
852     if (mutex == NULL)
853         return EINVAL;
854 
855     if (__likely(attr == NULL)) {
856         mutex->value = MUTEX_TYPE_NORMAL;
857         return 0;
858     }
859 
860     if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
861         value |= MUTEX_SHARED_MASK;
862 
863     switch (*attr & MUTEXATTR_TYPE_MASK) {
864     case PTHREAD_MUTEX_NORMAL:
865         value |= MUTEX_TYPE_NORMAL;
866         break;
867     case PTHREAD_MUTEX_RECURSIVE:
868         value |= MUTEX_TYPE_RECURSIVE;
869         break;
870     case PTHREAD_MUTEX_ERRORCHECK:
871         value |= MUTEX_TYPE_ERRORCHECK;
872         break;
873     default:
874         return EINVAL;
875     }
876 
877     mutex->value = value;
878     return 0;
879 }
880 
pthread_mutex_destroy(pthread_mutex_t * mutex)881 int pthread_mutex_destroy(pthread_mutex_t *mutex)
882 {
883     int ret;
884 
885     /* use trylock to ensure that the mutex value is
886      * valid and is not already locked. */
887     ret = pthread_mutex_trylock(mutex);
888     if (ret != 0)
889         return ret;
890 
891     mutex->value = 0xdead10cc;
892     return 0;
893 }
894 
895 
896 /*
897  * Lock a non-recursive mutex.
898  *
899  * As noted above, there are three states:
900  *   0 (unlocked, no contention)
901  *   1 (locked, no contention)
902  *   2 (locked, contention)
903  *
904  * Non-recursive mutexes don't use the thread-id or counter fields, and the
905  * "type" value is zero, so the only bits that will be set are the ones in
906  * the lock state field.
907  */
908 static __inline__ void
_normal_lock(pthread_mutex_t * mutex)909 _normal_lock(pthread_mutex_t*  mutex)
910 {
911     /* We need to preserve the shared flag during operations */
912     int  shared = mutex->value & MUTEX_SHARED_MASK;
913     /*
914      * The common case is an unlocked mutex, so we begin by trying to
915      * change the lock's state from 0 to 1.  __atomic_cmpxchg() returns 0
916      * if it made the swap successfully.  If the result is nonzero, this
917      * lock is already held by another thread.
918      */
919     if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) {
920         /*
921          * We want to go to sleep until the mutex is available, which
922          * requires promoting it to state 2.  We need to swap in the new
923          * state value and then wait until somebody wakes us up.
924          *
925          * __atomic_swap() returns the previous value.  We swap 2 in and
926          * see if we got zero back; if so, we have acquired the lock.  If
927          * not, another thread still holds the lock and we wait again.
928          *
929          * The second argument to the __futex_wait() call is compared
930          * against the current value.  If it doesn't match, __futex_wait()
931          * returns immediately (otherwise, it sleeps for a time specified
932          * by the third argument; 0 means sleep forever).  This ensures
933          * that the mutex is in state 2 when we go to sleep on it, which
934          * guarantees a wake-up call.
935          */
936         while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
937             __futex_wait_ex(&mutex->value, shared, shared|2, 0);
938     }
939     ANDROID_MEMBAR_FULL();
940 }
941 
942 /*
943  * Release a non-recursive mutex.  The caller is responsible for determining
944  * that we are in fact the owner of this lock.
945  */
946 static __inline__ void
_normal_unlock(pthread_mutex_t * mutex)947 _normal_unlock(pthread_mutex_t*  mutex)
948 {
949     ANDROID_MEMBAR_FULL();
950 
951     /* We need to preserve the shared flag during operations */
952     int  shared = mutex->value & MUTEX_SHARED_MASK;
953 
954     /*
955      * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
956      * to release the lock.  __atomic_dec() returns the previous value;
957      * if it wasn't 1 we have to do some additional work.
958      */
959     if (__atomic_dec(&mutex->value) != (shared|1)) {
960         /*
961          * Start by releasing the lock.  The decrement changed it from
962          * "contended lock" to "uncontended lock", which means we still
963          * hold it, and anybody who tries to sneak in will push it back
964          * to state 2.
965          *
966          * Once we set it to zero the lock is up for grabs.  We follow
967          * this with a __futex_wake() to ensure that one of the waiting
968          * threads has a chance to grab it.
969          *
970          * This doesn't cause a race with the swap/wait pair in
971          * _normal_lock(), because the __futex_wait() call there will
972          * return immediately if the mutex value isn't 2.
973          */
974         mutex->value = shared;
975 
976         /*
977          * Wake up one waiting thread.  We don't know which thread will be
978          * woken or when it'll start executing -- futexes make no guarantees
979          * here.  There may not even be a thread waiting.
980          *
981          * The newly-woken thread will replace the 0 we just set above
982          * with 2, which means that when it eventually releases the mutex
983          * it will also call FUTEX_WAKE.  This results in one extra wake
984          * call whenever a lock is contended, but lets us avoid forgetting
985          * anyone without requiring us to track the number of sleepers.
986          *
987          * It's possible for another thread to sneak in and grab the lock
988          * between the zero assignment above and the wake call below.  If
989          * the new thread is "slow" and holds the lock for a while, we'll
990          * wake up a sleeper, which will swap in a 2 and then go back to
991          * sleep since the lock is still held.  If the new thread is "fast",
992          * running to completion before we call wake, the thread we
993          * eventually wake will find an unlocked mutex and will execute.
994          * Either way we have correct behavior and nobody is orphaned on
995          * the wait queue.
996          */
997         __futex_wake_ex(&mutex->value, shared, 1);
998     }
999 }
1000 
1001 static pthread_mutex_t  __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
1002 
1003 static void
_recursive_lock(void)1004 _recursive_lock(void)
1005 {
1006     _normal_lock(&__recursive_lock);
1007 }
1008 
1009 static void
_recursive_unlock(void)1010 _recursive_unlock(void)
1011 {
1012     _normal_unlock(&__recursive_lock );
1013 }
1014 
pthread_mutex_lock(pthread_mutex_t * mutex)1015 int pthread_mutex_lock(pthread_mutex_t *mutex)
1016 {
1017     int mtype, tid, new_lock_type, shared;
1018 
1019     if (__unlikely(mutex == NULL))
1020         return EINVAL;
1021 
1022     mtype = (mutex->value & MUTEX_TYPE_MASK);
1023     shared = (mutex->value & MUTEX_SHARED_MASK);
1024 
1025     /* Handle normal case first */
1026     if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
1027         _normal_lock(mutex);
1028         return 0;
1029     }
1030 
1031     /* Do we already own this recursive or error-check mutex ? */
1032     tid = __get_thread()->kernel_id;
1033     if ( tid == MUTEX_OWNER(mutex) )
1034     {
1035         int  oldv, counter;
1036 
1037         if (mtype == MUTEX_TYPE_ERRORCHECK) {
1038             /* trying to re-lock a mutex we already acquired */
1039             return EDEADLK;
1040         }
1041         /*
1042          * We own the mutex, but other threads are able to change
1043          * the contents (e.g. promoting it to "contended"), so we
1044          * need to hold the global lock.
1045          */
1046         _recursive_lock();
1047         oldv         = mutex->value;
1048         counter      = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1049         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1050         _recursive_unlock();
1051         return 0;
1052     }
1053 
1054     /* We don't own the mutex, so try to get it.
1055      *
1056      * First, we try to change its state from 0 to 1, if this
1057      * doesn't work, try to change it to state 2.
1058      */
1059     new_lock_type = 1;
1060 
1061     /* compute futex wait opcode and restore shared flag in mtype */
1062     mtype |= shared;
1063 
1064     for (;;) {
1065         int  oldv;
1066 
1067         _recursive_lock();
1068         oldv = mutex->value;
1069         if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
1070             mutex->value = ((tid << 16) | mtype | new_lock_type);
1071         } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
1072             oldv ^= 3;
1073             mutex->value = oldv;
1074         }
1075         _recursive_unlock();
1076 
1077         if (oldv == mtype)
1078             break;
1079 
1080         /*
1081          * The lock was held, possibly contended by others.  From
1082          * now on, if we manage to acquire the lock, we have to
1083          * assume that others are still contending for it so that
1084          * we'll wake them when we unlock it.
1085          */
1086         new_lock_type = 2;
1087 
1088         __futex_wait_ex(&mutex->value, shared, oldv, NULL);
1089     }
1090     return 0;
1091 }
1092 
1093 
pthread_mutex_unlock(pthread_mutex_t * mutex)1094 int pthread_mutex_unlock(pthread_mutex_t *mutex)
1095 {
1096     int mtype, tid, oldv, shared;
1097 
1098     if (__unlikely(mutex == NULL))
1099         return EINVAL;
1100 
1101     mtype  = (mutex->value & MUTEX_TYPE_MASK);
1102     shared = (mutex->value & MUTEX_SHARED_MASK);
1103 
1104     /* Handle common case first */
1105     if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
1106         _normal_unlock(mutex);
1107         return 0;
1108     }
1109 
1110     /* Do we already own this recursive or error-check mutex ? */
1111     tid = __get_thread()->kernel_id;
1112     if ( tid != MUTEX_OWNER(mutex) )
1113         return EPERM;
1114 
1115     /* We do, decrement counter or release the mutex if it is 0 */
1116     _recursive_lock();
1117     oldv = mutex->value;
1118     if (oldv & MUTEX_COUNTER_MASK) {
1119         mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
1120         oldv = 0;
1121     } else {
1122         mutex->value = shared | mtype;
1123     }
1124     _recursive_unlock();
1125 
1126     /* Wake one waiting thread, if any */
1127     if ((oldv & 3) == 2) {
1128         __futex_wake_ex(&mutex->value, shared, 1);
1129     }
1130     return 0;
1131 }
1132 
1133 
pthread_mutex_trylock(pthread_mutex_t * mutex)1134 int pthread_mutex_trylock(pthread_mutex_t *mutex)
1135 {
1136     int mtype, tid, oldv, shared;
1137 
1138     if (__unlikely(mutex == NULL))
1139         return EINVAL;
1140 
1141     mtype  = (mutex->value & MUTEX_TYPE_MASK);
1142     shared = (mutex->value & MUTEX_SHARED_MASK);
1143 
1144     /* Handle common case first */
1145     if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
1146     {
1147         if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
1148             ANDROID_MEMBAR_FULL();
1149             return 0;
1150         }
1151 
1152         return EBUSY;
1153     }
1154 
1155     /* Do we already own this recursive or error-check mutex ? */
1156     tid = __get_thread()->kernel_id;
1157     if ( tid == MUTEX_OWNER(mutex) )
1158     {
1159         int counter;
1160 
1161         if (mtype == MUTEX_TYPE_ERRORCHECK) {
1162             /* already locked by ourselves */
1163             return EDEADLK;
1164         }
1165 
1166         _recursive_lock();
1167         oldv = mutex->value;
1168         counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1169         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1170         _recursive_unlock();
1171         return 0;
1172     }
1173 
1174     /* Restore sharing bit in mtype */
1175     mtype |= shared;
1176 
1177     /* Try to lock it, just once. */
1178     _recursive_lock();
1179     oldv = mutex->value;
1180     if (oldv == mtype)  /* uncontended released lock => state 1 */
1181         mutex->value = ((tid << 16) | mtype | 1);
1182     _recursive_unlock();
1183 
1184     if (oldv != mtype)
1185         return EBUSY;
1186 
1187     return 0;
1188 }
1189 
1190 
1191 /* initialize 'ts' with the difference between 'abstime' and the current time
1192  * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
1193  */
1194 static int
__timespec_to_absolute(struct timespec * ts,const struct timespec * abstime,clockid_t clock)1195 __timespec_to_absolute(struct timespec*  ts, const struct timespec*  abstime, clockid_t  clock)
1196 {
1197     clock_gettime(clock, ts);
1198     ts->tv_sec  = abstime->tv_sec - ts->tv_sec;
1199     ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
1200     if (ts->tv_nsec < 0) {
1201         ts->tv_sec--;
1202         ts->tv_nsec += 1000000000;
1203     }
1204     if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
1205         return -1;
1206 
1207     return 0;
1208 }
1209 
1210 /* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
1211  * milliseconds.
1212  */
1213 static void
__timespec_to_relative_msec(struct timespec * abstime,unsigned msecs,clockid_t clock)1214 __timespec_to_relative_msec(struct timespec*  abstime, unsigned  msecs, clockid_t  clock)
1215 {
1216     clock_gettime(clock, abstime);
1217     abstime->tv_sec  += msecs/1000;
1218     abstime->tv_nsec += (msecs%1000)*1000000;
1219     if (abstime->tv_nsec >= 1000000000) {
1220         abstime->tv_sec++;
1221         abstime->tv_nsec -= 1000000000;
1222     }
1223 }
1224 
pthread_mutex_lock_timeout_np(pthread_mutex_t * mutex,unsigned msecs)1225 int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
1226 {
1227     clockid_t        clock = CLOCK_MONOTONIC;
1228     struct timespec  abstime;
1229     struct timespec  ts;
1230     int              mtype, tid, oldv, new_lock_type, shared;
1231 
1232     /* compute absolute expiration time */
1233     __timespec_to_relative_msec(&abstime, msecs, clock);
1234 
1235     if (__unlikely(mutex == NULL))
1236         return EINVAL;
1237 
1238     mtype  = (mutex->value & MUTEX_TYPE_MASK);
1239     shared = (mutex->value & MUTEX_SHARED_MASK);
1240 
1241     /* Handle common case first */
1242     if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
1243     {
1244         /* fast path for uncontended lock */
1245         if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
1246             ANDROID_MEMBAR_FULL();
1247             return 0;
1248         }
1249 
1250         /* loop while needed */
1251         while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) {
1252             if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
1253                 return EBUSY;
1254 
1255             __futex_wait_ex(&mutex->value, shared, shared|2, &ts);
1256         }
1257         ANDROID_MEMBAR_FULL();
1258         return 0;
1259     }
1260 
1261     /* Do we already own this recursive or error-check mutex ? */
1262     tid = __get_thread()->kernel_id;
1263     if ( tid == MUTEX_OWNER(mutex) )
1264     {
1265         int  oldv, counter;
1266 
1267         if (mtype == MUTEX_TYPE_ERRORCHECK) {
1268             /* already locked by ourselves */
1269             return EDEADLK;
1270         }
1271 
1272         _recursive_lock();
1273         oldv = mutex->value;
1274         counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1275         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1276         _recursive_unlock();
1277         return 0;
1278     }
1279 
1280     /* We don't own the mutex, so try to get it.
1281      *
1282      * First, we try to change its state from 0 to 1, if this
1283      * doesn't work, try to change it to state 2.
1284      */
1285     new_lock_type = 1;
1286 
1287     /* Compute wait op and restore sharing bit in mtype */
1288     mtype  |= shared;
1289 
1290     for (;;) {
1291         int  oldv;
1292         struct timespec  ts;
1293 
1294         _recursive_lock();
1295         oldv = mutex->value;
1296         if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
1297             mutex->value = ((tid << 16) | mtype | new_lock_type);
1298         } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
1299             oldv ^= 3;
1300             mutex->value = oldv;
1301         }
1302         _recursive_unlock();
1303 
1304         if (oldv == mtype)
1305             break;
1306 
1307         /*
1308          * The lock was held, possibly contended by others.  From
1309          * now on, if we manage to acquire the lock, we have to
1310          * assume that others are still contending for it so that
1311          * we'll wake them when we unlock it.
1312          */
1313         new_lock_type = 2;
1314 
1315         if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
1316             return EBUSY;
1317 
1318         __futex_wait_ex(&mutex->value, shared, oldv, &ts);
1319     }
1320     return 0;
1321 }
1322 
pthread_condattr_init(pthread_condattr_t * attr)1323 int pthread_condattr_init(pthread_condattr_t *attr)
1324 {
1325     if (attr == NULL)
1326         return EINVAL;
1327 
1328     *attr = PTHREAD_PROCESS_PRIVATE;
1329     return 0;
1330 }
1331 
pthread_condattr_getpshared(pthread_condattr_t * attr,int * pshared)1332 int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
1333 {
1334     if (attr == NULL || pshared == NULL)
1335         return EINVAL;
1336 
1337     *pshared = *attr;
1338     return 0;
1339 }
1340 
pthread_condattr_setpshared(pthread_condattr_t * attr,int pshared)1341 int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
1342 {
1343     if (attr == NULL)
1344         return EINVAL;
1345 
1346     if (pshared != PTHREAD_PROCESS_SHARED &&
1347         pshared != PTHREAD_PROCESS_PRIVATE)
1348         return EINVAL;
1349 
1350     *attr = pshared;
1351     return 0;
1352 }
1353 
pthread_condattr_destroy(pthread_condattr_t * attr)1354 int pthread_condattr_destroy(pthread_condattr_t *attr)
1355 {
1356     if (attr == NULL)
1357         return EINVAL;
1358 
1359     *attr = 0xdeada11d;
1360     return 0;
1361 }
1362 
1363 /* We use one bit in condition variable values as the 'shared' flag
1364  * The rest is a counter.
1365  */
1366 #define COND_SHARED_MASK        0x0001
1367 #define COND_COUNTER_INCREMENT  0x0002
1368 #define COND_COUNTER_MASK       (~COND_SHARED_MASK)
1369 
1370 #define COND_IS_SHARED(c)  (((c)->value & COND_SHARED_MASK) != 0)
1371 
1372 /* XXX *technically* there is a race condition that could allow
1373  * XXX a signal to be missed.  If thread A is preempted in _wait()
1374  * XXX after unlocking the mutex and before waiting, and if other
1375  * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
1376  * XXX before thread A is scheduled again and calls futex_wait(),
1377  * XXX then the signal will be lost.
1378  */
1379 
pthread_cond_init(pthread_cond_t * cond,const pthread_condattr_t * attr)1380 int pthread_cond_init(pthread_cond_t *cond,
1381                       const pthread_condattr_t *attr)
1382 {
1383     if (cond == NULL)
1384         return EINVAL;
1385 
1386     cond->value = 0;
1387 
1388     if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
1389         cond->value |= COND_SHARED_MASK;
1390 
1391     return 0;
1392 }
1393 
pthread_cond_destroy(pthread_cond_t * cond)1394 int pthread_cond_destroy(pthread_cond_t *cond)
1395 {
1396     if (cond == NULL)
1397         return EINVAL;
1398 
1399     cond->value = 0xdeadc04d;
1400     return 0;
1401 }
1402 
1403 /* This function is used by pthread_cond_broadcast and
1404  * pthread_cond_signal to atomically decrement the counter
1405  * then wake-up 'counter' threads.
1406  */
1407 static int
__pthread_cond_pulse(pthread_cond_t * cond,int counter)1408 __pthread_cond_pulse(pthread_cond_t *cond, int  counter)
1409 {
1410     long flags;
1411 
1412     if (__unlikely(cond == NULL))
1413         return EINVAL;
1414 
1415     flags = (cond->value & ~COND_COUNTER_MASK);
1416     for (;;) {
1417         long oldval = cond->value;
1418         long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
1419                       | flags;
1420         if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
1421             break;
1422     }
1423 
1424     __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
1425     return 0;
1426 }
1427 
pthread_cond_broadcast(pthread_cond_t * cond)1428 int pthread_cond_broadcast(pthread_cond_t *cond)
1429 {
1430     return __pthread_cond_pulse(cond, INT_MAX);
1431 }
1432 
pthread_cond_signal(pthread_cond_t * cond)1433 int pthread_cond_signal(pthread_cond_t *cond)
1434 {
1435     return __pthread_cond_pulse(cond, 1);
1436 }
1437 
pthread_cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)1438 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
1439 {
1440     return pthread_cond_timedwait(cond, mutex, NULL);
1441 }
1442 
__pthread_cond_timedwait_relative(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1443 int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
1444                                       pthread_mutex_t * mutex,
1445                                       const struct timespec *reltime)
1446 {
1447     int  status;
1448     int  oldvalue = cond->value;
1449 
1450     pthread_mutex_unlock(mutex);
1451     status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
1452     pthread_mutex_lock(mutex);
1453 
1454     if (status == (-ETIMEDOUT)) return ETIMEDOUT;
1455     return 0;
1456 }
1457 
__pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime,clockid_t clock)1458 int __pthread_cond_timedwait(pthread_cond_t *cond,
1459                              pthread_mutex_t * mutex,
1460                              const struct timespec *abstime,
1461                              clockid_t clock)
1462 {
1463     struct timespec ts;
1464     struct timespec * tsp;
1465 
1466     if (abstime != NULL) {
1467         if (__timespec_to_absolute(&ts, abstime, clock) < 0)
1468             return ETIMEDOUT;
1469         tsp = &ts;
1470     } else {
1471         tsp = NULL;
1472     }
1473 
1474     return __pthread_cond_timedwait_relative(cond, mutex, tsp);
1475 }
1476 
pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1477 int pthread_cond_timedwait(pthread_cond_t *cond,
1478                            pthread_mutex_t * mutex,
1479                            const struct timespec *abstime)
1480 {
1481     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
1482 }
1483 
1484 
1485 /* this one exists only for backward binary compatibility */
pthread_cond_timedwait_monotonic(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1486 int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
1487                                      pthread_mutex_t * mutex,
1488                                      const struct timespec *abstime)
1489 {
1490     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1491 }
1492 
pthread_cond_timedwait_monotonic_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1493 int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
1494                                      pthread_mutex_t * mutex,
1495                                      const struct timespec *abstime)
1496 {
1497     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1498 }
1499 
pthread_cond_timedwait_relative_np(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1500 int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1501                                       pthread_mutex_t * mutex,
1502                                       const struct timespec *reltime)
1503 {
1504     return __pthread_cond_timedwait_relative(cond, mutex, reltime);
1505 }
1506 
pthread_cond_timeout_np(pthread_cond_t * cond,pthread_mutex_t * mutex,unsigned msecs)1507 int pthread_cond_timeout_np(pthread_cond_t *cond,
1508                             pthread_mutex_t * mutex,
1509                             unsigned msecs)
1510 {
1511     struct timespec ts;
1512 
1513     ts.tv_sec = msecs / 1000;
1514     ts.tv_nsec = (msecs % 1000) * 1000000;
1515 
1516     return __pthread_cond_timedwait_relative(cond, mutex, &ts);
1517 }
1518 
1519 
1520 
1521 /* A technical note regarding our thread-local-storage (TLS) implementation:
1522  *
1523  * There can be up to TLSMAP_SIZE independent TLS keys in a given process,
1524  * though the first TLSMAP_START keys are reserved for Bionic to hold
1525  * special thread-specific variables like errno or a pointer to
1526  * the current thread's descriptor.
1527  *
1528  * while stored in the TLS area, these entries cannot be accessed through
1529  * pthread_getspecific() / pthread_setspecific() and pthread_key_delete()
1530  *
1531  * also, some entries in the key table are pre-allocated (see tlsmap_lock)
1532  * to greatly simplify and speedup some OpenGL-related operations. though the
1533  * initialy value will be NULL on all threads.
1534  *
1535  * you can use pthread_getspecific()/setspecific() on these, and in theory
1536  * you could also call pthread_key_delete() as well, though this would
1537  * probably break some apps.
1538  *
1539  * The 'tlsmap_t' type defined below implements a shared global map of
1540  * currently created/allocated TLS keys and the destructors associated
1541  * with them. You should use tlsmap_lock/unlock to access it to avoid
1542  * any race condition.
1543  *
1544  * the global TLS map simply contains a bitmap of allocated keys, and
1545  * an array of destructors.
1546  *
1547  * each thread has a TLS area that is a simple array of TLSMAP_SIZE void*
1548  * pointers. the TLS area of the main thread is stack-allocated in
1549  * __libc_init_common, while the TLS area of other threads is placed at
1550  * the top of their stack in pthread_create.
1551  *
1552  * when pthread_key_create() is called, it finds the first free key in the
1553  * bitmap, then set it to 1, saving the destructor altogether
1554  *
1555  * when pthread_key_delete() is called. it will erase the key's bitmap bit
1556  * and its destructor, and will also clear the key data in the TLS area of
1557  * all created threads. As mandated by Posix, it is the responsability of
1558  * the caller of pthread_key_delete() to properly reclaim the objects that
1559  * were pointed to by these data fields (either before or after the call).
1560  *
1561  */
1562 
1563 /* TLS Map implementation
1564  */
1565 
1566 #define TLSMAP_START      (TLS_SLOT_MAX_WELL_KNOWN+1)
1567 #define TLSMAP_SIZE       BIONIC_TLS_SLOTS
1568 #define TLSMAP_BITS       32
1569 #define TLSMAP_WORDS      ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS)
1570 #define TLSMAP_WORD(m,k)  (m)->map[(k)/TLSMAP_BITS]
1571 #define TLSMAP_MASK(k)    (1U << ((k)&(TLSMAP_BITS-1)))
1572 
1573 /* this macro is used to quickly check that a key belongs to a reasonable range */
1574 #define TLSMAP_VALIDATE_KEY(key)  \
1575     ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE)
1576 
1577 /* the type of tls key destructor functions */
1578 typedef void (*tls_dtor_t)(void*);
1579 
1580 typedef struct {
1581     int         init;                  /* see comment in tlsmap_lock() */
1582     uint32_t    map[TLSMAP_WORDS];     /* bitmap of allocated keys */
1583     tls_dtor_t  dtors[TLSMAP_SIZE];    /* key destructors */
1584 } tlsmap_t;
1585 
1586 static pthread_mutex_t  _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER;
1587 static tlsmap_t         _tlsmap;
1588 
1589 /* lock the global TLS map lock and return a handle to it */
tlsmap_lock(void)1590 static __inline__ tlsmap_t* tlsmap_lock(void)
1591 {
1592     tlsmap_t*   m = &_tlsmap;
1593 
1594     pthread_mutex_lock(&_tlsmap_lock);
1595     /* we need to initialize the first entry of the 'map' array
1596      * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically
1597      * when declaring _tlsmap is a bit awkward and is going to
1598      * produce warnings, so do it the first time we use the map
1599      * instead
1600      */
1601     if (__unlikely(!m->init)) {
1602         TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP;
1603         m->init          = 1;
1604     }
1605     return m;
1606 }
1607 
1608 /* unlock the global TLS map */
tlsmap_unlock(tlsmap_t * m)1609 static __inline__ void tlsmap_unlock(tlsmap_t*  m)
1610 {
1611     pthread_mutex_unlock(&_tlsmap_lock);
1612     (void)m;  /* a good compiler is a happy compiler */
1613 }
1614 
1615 /* test to see wether a key is allocated */
tlsmap_test(tlsmap_t * m,int key)1616 static __inline__ int tlsmap_test(tlsmap_t*  m, int  key)
1617 {
1618     return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0;
1619 }
1620 
1621 /* set the destructor and bit flag on a newly allocated key */
tlsmap_set(tlsmap_t * m,int key,tls_dtor_t dtor)1622 static __inline__ void tlsmap_set(tlsmap_t*  m, int  key, tls_dtor_t  dtor)
1623 {
1624     TLSMAP_WORD(m,key) |= TLSMAP_MASK(key);
1625     m->dtors[key]       = dtor;
1626 }
1627 
1628 /* clear the destructor and bit flag on an existing key */
tlsmap_clear(tlsmap_t * m,int key)1629 static __inline__ void  tlsmap_clear(tlsmap_t*  m, int  key)
1630 {
1631     TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key);
1632     m->dtors[key]       = NULL;
1633 }
1634 
1635 /* allocate a new TLS key, return -1 if no room left */
tlsmap_alloc(tlsmap_t * m,tls_dtor_t dtor)1636 static int tlsmap_alloc(tlsmap_t*  m, tls_dtor_t  dtor)
1637 {
1638     int  key;
1639 
1640     for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) {
1641         if ( !tlsmap_test(m, key) ) {
1642             tlsmap_set(m, key, dtor);
1643             return key;
1644         }
1645     }
1646     return -1;
1647 }
1648 
1649 
pthread_key_create(pthread_key_t * key,void (* destructor_function)(void *))1650 int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *))
1651 {
1652     uint32_t   err = ENOMEM;
1653     tlsmap_t*  map = tlsmap_lock();
1654     int        k   = tlsmap_alloc(map, destructor_function);
1655 
1656     if (k >= 0) {
1657         *key = k;
1658         err  = 0;
1659     }
1660     tlsmap_unlock(map);
1661     return err;
1662 }
1663 
1664 
1665 /* This deletes a pthread_key_t. note that the standard mandates that this does
1666  * not call the destructor of non-NULL key values. Instead, it is the
1667  * responsability of the caller to properly dispose of the corresponding data
1668  * and resources, using any mean it finds suitable.
1669  *
1670  * On the other hand, this function will clear the corresponding key data
1671  * values in all known threads. this prevents later (invalid) calls to
1672  * pthread_getspecific() to receive invalid/stale values.
1673  */
pthread_key_delete(pthread_key_t key)1674 int pthread_key_delete(pthread_key_t key)
1675 {
1676     uint32_t             err;
1677     pthread_internal_t*  thr;
1678     tlsmap_t*            map;
1679 
1680     if (!TLSMAP_VALIDATE_KEY(key)) {
1681         return EINVAL;
1682     }
1683 
1684     map = tlsmap_lock();
1685 
1686     if (!tlsmap_test(map, key)) {
1687         err = EINVAL;
1688         goto err1;
1689     }
1690 
1691     /* clear value in all threads */
1692     pthread_mutex_lock(&gThreadListLock);
1693     for ( thr = gThreadList; thr != NULL; thr = thr->next ) {
1694         /* avoid zombie threads with a negative 'join_count'. these are really
1695          * already dead and don't have a TLS area anymore.
1696          *
1697          * similarly, it is possible to have thr->tls == NULL for threads that
1698          * were just recently created through pthread_create() but whose
1699          * startup trampoline (__thread_entry) hasn't been run yet by the
1700          * scheduler. so check for this too.
1701          */
1702         if (thr->join_count < 0 || !thr->tls)
1703             continue;
1704 
1705         thr->tls[key] = NULL;
1706     }
1707     tlsmap_clear(map, key);
1708 
1709     pthread_mutex_unlock(&gThreadListLock);
1710     err = 0;
1711 
1712 err1:
1713     tlsmap_unlock(map);
1714     return err;
1715 }
1716 
1717 
pthread_setspecific(pthread_key_t key,const void * ptr)1718 int pthread_setspecific(pthread_key_t key, const void *ptr)
1719 {
1720     int        err = EINVAL;
1721     tlsmap_t*  map;
1722 
1723     if (TLSMAP_VALIDATE_KEY(key)) {
1724         /* check that we're trying to set data for an allocated key */
1725         map = tlsmap_lock();
1726         if (tlsmap_test(map, key)) {
1727             ((uint32_t *)__get_tls())[key] = (uint32_t)ptr;
1728             err = 0;
1729         }
1730         tlsmap_unlock(map);
1731     }
1732     return err;
1733 }
1734 
pthread_getspecific(pthread_key_t key)1735 void * pthread_getspecific(pthread_key_t key)
1736 {
1737     if (!TLSMAP_VALIDATE_KEY(key)) {
1738         return NULL;
1739     }
1740 
1741     /* for performance reason, we do not lock/unlock the global TLS map
1742      * to check that the key is properly allocated. if the key was not
1743      * allocated, the value read from the TLS should always be NULL
1744      * due to pthread_key_delete() clearing the values for all threads.
1745      */
1746     return (void *)(((unsigned *)__get_tls())[key]);
1747 }
1748 
1749 /* Posix mandates that this be defined in <limits.h> but we don't have
1750  * it just yet.
1751  */
1752 #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
1753 #  define PTHREAD_DESTRUCTOR_ITERATIONS  4
1754 #endif
1755 
1756 /* this function is called from pthread_exit() to remove all TLS key data
1757  * from this thread's TLS area. this must call the destructor of all keys
1758  * that have a non-NULL data value (and a non-NULL destructor).
1759  *
1760  * because destructors can do funky things like deleting/creating other
1761  * keys, we need to implement this in a loop
1762  */
pthread_key_clean_all(void)1763 static void pthread_key_clean_all(void)
1764 {
1765     tlsmap_t*    map;
1766     void**       tls = (void**)__get_tls();
1767     int          rounds = PTHREAD_DESTRUCTOR_ITERATIONS;
1768 
1769     map = tlsmap_lock();
1770 
1771     for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--)
1772     {
1773         int  kk, count = 0;
1774 
1775         for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) {
1776             if ( tlsmap_test(map, kk) )
1777             {
1778                 void*       data = tls[kk];
1779                 tls_dtor_t  dtor = map->dtors[kk];
1780 
1781                 if (data != NULL && dtor != NULL)
1782                 {
1783                    /* we need to clear the key data now, this will prevent the
1784                     * destructor (or a later one) from seeing the old value if
1785                     * it calls pthread_getspecific() for some odd reason
1786                     *
1787                     * we do not do this if 'dtor == NULL' just in case another
1788                     * destructor function might be responsible for manually
1789                     * releasing the corresponding data.
1790                     */
1791                     tls[kk] = NULL;
1792 
1793                    /* because the destructor is free to call pthread_key_create
1794                     * and/or pthread_key_delete, we need to temporarily unlock
1795                     * the TLS map
1796                     */
1797                     tlsmap_unlock(map);
1798                     (*dtor)(data);
1799                     map = tlsmap_lock();
1800 
1801                     count += 1;
1802                 }
1803             }
1804         }
1805 
1806         /* if we didn't call any destructor, there is no need to check the
1807          * TLS data again
1808          */
1809         if (count == 0)
1810             break;
1811     }
1812     tlsmap_unlock(map);
1813 }
1814 
1815 // man says this should be in <linux/unistd.h>, but it isn't
1816 extern int tkill(int tid, int sig);
1817 
pthread_kill(pthread_t tid,int sig)1818 int pthread_kill(pthread_t tid, int sig)
1819 {
1820     int  ret;
1821     int  old_errno = errno;
1822     pthread_internal_t * thread = (pthread_internal_t *)tid;
1823 
1824     ret = tkill(thread->kernel_id, sig);
1825     if (ret < 0) {
1826         ret = errno;
1827         errno = old_errno;
1828     }
1829 
1830     return ret;
1831 }
1832 
1833 extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
1834 
pthread_sigmask(int how,const sigset_t * set,sigset_t * oset)1835 int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
1836 {
1837     /* pthread_sigmask must return the error code, but the syscall
1838      * will set errno instead and return 0/-1
1839      */
1840     int ret, old_errno = errno;
1841 
1842     ret = __rt_sigprocmask(how, set, oset, _NSIG / 8);
1843     if (ret < 0)
1844         ret = errno;
1845 
1846     errno = old_errno;
1847     return ret;
1848 }
1849 
1850 
pthread_getcpuclockid(pthread_t tid,clockid_t * clockid)1851 int pthread_getcpuclockid(pthread_t  tid, clockid_t  *clockid)
1852 {
1853     const int            CLOCK_IDTYPE_BITS = 3;
1854     pthread_internal_t*  thread = (pthread_internal_t*)tid;
1855 
1856     if (!thread)
1857         return ESRCH;
1858 
1859     *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
1860     return 0;
1861 }
1862 
1863 
1864 /* NOTE: this implementation doesn't support a init function that throws a C++ exception
1865  *       or calls fork()
1866  */
pthread_once(pthread_once_t * once_control,void (* init_routine)(void))1867 int  pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
1868 {
1869     static pthread_mutex_t   once_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
1870 
1871     if (*once_control == PTHREAD_ONCE_INIT) {
1872         pthread_mutex_lock( &once_lock );
1873         if (*once_control == PTHREAD_ONCE_INIT) {
1874             (*init_routine)();
1875             *once_control = ~PTHREAD_ONCE_INIT;
1876         }
1877         pthread_mutex_unlock( &once_lock );
1878     }
1879     return 0;
1880 }
1881 
1882 /* This value is not exported by kernel headers, so hardcode it here */
1883 #define MAX_TASK_COMM_LEN	16
1884 #define TASK_COMM_FMT 		"/proc/self/task/%u/comm"
1885 
pthread_setname_np(pthread_t thid,const char * thname)1886 int pthread_setname_np(pthread_t thid, const char *thname)
1887 {
1888     size_t thname_len;
1889     int saved_errno, ret;
1890 
1891     if (thid == 0 || thname == NULL)
1892         return EINVAL;
1893 
1894     thname_len = strlen(thname);
1895     if (thname_len >= MAX_TASK_COMM_LEN)
1896         return ERANGE;
1897 
1898     saved_errno = errno;
1899     if (thid == pthread_self())
1900     {
1901         ret = prctl(PR_SET_NAME, (unsigned long)thname, 0, 0, 0) ? errno : 0;
1902     }
1903     else
1904     {
1905         /* Have to change another thread's name */
1906         pthread_internal_t *thread = (pthread_internal_t *)thid;
1907         char comm_name[sizeof(TASK_COMM_FMT) + 8];
1908         ssize_t n;
1909         int fd;
1910 
1911         snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, (unsigned int)thread->kernel_id);
1912         fd = open(comm_name, O_RDWR);
1913         if (fd == -1)
1914         {
1915             ret = errno;
1916             goto exit;
1917         }
1918         n = TEMP_FAILURE_RETRY(write(fd, thname, thname_len));
1919         close(fd);
1920 
1921         if (n < 0)
1922             ret = errno;
1923         else if ((size_t)n != thname_len)
1924             ret = EIO;
1925         else
1926             ret = 0;
1927     }
1928 exit:
1929     errno = saved_errno;
1930     return ret;
1931 }
1932