1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 #include <sys/types.h>
29 #include <unistd.h>
30 #include <signal.h>
31 #include <stdint.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <errno.h>
35 #include <sys/atomics.h>
36 #include <bionic_tls.h>
37 #include <sys/mman.h>
38 #include <pthread.h>
39 #include <time.h>
40 #include "pthread_internal.h"
41 #include "thread_private.h"
42 #include <limits.h>
43 #include <memory.h>
44 #include <assert.h>
45 #include <malloc.h>
46
47 extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
48 extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
49 extern void _exit_thread(int retCode);
50 extern int __set_errno(int);
51
52 void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
53
54 #define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
55 #define PTHREAD_ATTR_FLAG_USER_STACK 0x00000002
56
57 #define DEFAULT_STACKSIZE (1024 * 1024)
58 #define STACKBASE 0x10000000
59
60 static uint8_t * gStackBase = (uint8_t *)STACKBASE;
61
62 static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER;
63
64
65 static const pthread_attr_t gDefaultPthreadAttr = {
66 .flags = 0,
67 .stack_base = NULL,
68 .stack_size = DEFAULT_STACKSIZE,
69 .guard_size = PAGE_SIZE,
70 .sched_policy = SCHED_NORMAL,
71 .sched_priority = 0
72 };
73
74 #define INIT_THREADS 1
75
76 static pthread_internal_t* gThreadList = NULL;
77 static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
78 static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
79
80
81 /* we simply malloc/free the internal pthread_internal_t structures. we may
82 * want to use a different allocation scheme in the future, but this one should
83 * be largely enough
84 */
85 static pthread_internal_t*
_pthread_internal_alloc(void)86 _pthread_internal_alloc(void)
87 {
88 pthread_internal_t* thread;
89
90 thread = calloc( sizeof(*thread), 1 );
91 if (thread)
92 thread->intern = 1;
93
94 return thread;
95 }
96
97 static void
_pthread_internal_free(pthread_internal_t * thread)98 _pthread_internal_free( pthread_internal_t* thread )
99 {
100 if (thread && thread->intern) {
101 thread->intern = 0; /* just in case */
102 free (thread);
103 }
104 }
105
106
107 static void
_pthread_internal_remove_locked(pthread_internal_t * thread)108 _pthread_internal_remove_locked( pthread_internal_t* thread )
109 {
110 thread->next->pref = thread->pref;
111 thread->pref[0] = thread->next;
112 }
113
114 static void
_pthread_internal_remove(pthread_internal_t * thread)115 _pthread_internal_remove( pthread_internal_t* thread )
116 {
117 pthread_mutex_lock(&gThreadListLock);
118 _pthread_internal_remove_locked(thread);
119 pthread_mutex_unlock(&gThreadListLock);
120 }
121
122 static void
_pthread_internal_add(pthread_internal_t * thread)123 _pthread_internal_add( pthread_internal_t* thread )
124 {
125 pthread_mutex_lock(&gThreadListLock);
126 thread->pref = &gThreadList;
127 thread->next = thread->pref[0];
128 if (thread->next)
129 thread->next->pref = &thread->next;
130 thread->pref[0] = thread;
131 pthread_mutex_unlock(&gThreadListLock);
132 }
133
134 pthread_internal_t*
__get_thread(void)135 __get_thread(void)
136 {
137 void** tls = (void**)__get_tls();
138
139 return (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
140 }
141
142
143 void*
__get_stack_base(int * p_stack_size)144 __get_stack_base(int *p_stack_size)
145 {
146 pthread_internal_t* thread = __get_thread();
147
148 *p_stack_size = thread->attr.stack_size;
149 return thread->attr.stack_base;
150 }
151
152
__init_tls(void ** tls,void * thread)153 void __init_tls(void** tls, void* thread)
154 {
155 int nn;
156
157 ((pthread_internal_t*)thread)->tls = tls;
158
159 // slot 0 must point to the tls area, this is required by the implementation
160 // of the x86 Linux kernel thread-local-storage
161 tls[TLS_SLOT_SELF] = (void*)tls;
162 tls[TLS_SLOT_THREAD_ID] = thread;
163 for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++)
164 tls[nn] = 0;
165
166 __set_tls( (void*)tls );
167 }
168
169
170 /*
171 * This trampoline is called from the assembly clone() function
172 */
__thread_entry(int (* func)(void *),void * arg,void ** tls)173 void __thread_entry(int (*func)(void*), void *arg, void **tls)
174 {
175 int retValue;
176 pthread_internal_t * thrInfo;
177
178 // Wait for our creating thread to release us. This lets it have time to
179 // notify gdb about this thread before it starts doing anything.
180 pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF];
181 pthread_mutex_lock(start_mutex);
182 pthread_mutex_destroy(start_mutex);
183
184 thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID];
185
186 __init_tls( tls, thrInfo );
187
188 pthread_exit( (void*)func(arg) );
189 }
190
_init_thread(pthread_internal_t * thread,pid_t kernel_id,pthread_attr_t * attr,void * stack_base)191 void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base)
192 {
193 if (attr == NULL) {
194 thread->attr = gDefaultPthreadAttr;
195 } else {
196 thread->attr = *attr;
197 }
198 thread->attr.stack_base = stack_base;
199 thread->kernel_id = kernel_id;
200
201 // set the scheduling policy/priority of the thread
202 if (thread->attr.sched_policy != SCHED_NORMAL) {
203 struct sched_param param;
204 param.sched_priority = thread->attr.sched_priority;
205 sched_setscheduler(kernel_id, thread->attr.sched_policy, ¶m);
206 }
207
208 pthread_cond_init(&thread->join_cond, NULL);
209 thread->join_count = 0;
210
211 thread->cleanup_stack = NULL;
212
213 _pthread_internal_add(thread);
214 }
215
216
217 /* XXX stacks not reclaimed if thread spawn fails */
218 /* XXX stacks address spaces should be reused if available again */
219
mkstack(size_t size,size_t guard_size)220 static void *mkstack(size_t size, size_t guard_size)
221 {
222 void * stack;
223
224 pthread_mutex_lock(&mmap_lock);
225
226 stack = mmap((void *)gStackBase, size,
227 PROT_READ | PROT_WRITE,
228 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
229 -1, 0);
230
231 if(stack == MAP_FAILED) {
232 stack = NULL;
233 goto done;
234 }
235
236 if(mprotect(stack, guard_size, PROT_NONE)){
237 munmap(stack, size);
238 stack = NULL;
239 goto done;
240 }
241
242 done:
243 pthread_mutex_unlock(&mmap_lock);
244 return stack;
245 }
246
247 /*
248 * Create a new thread. The thread's stack is layed out like so:
249 *
250 * +---------------------------+
251 * | pthread_internal_t |
252 * +---------------------------+
253 * | |
254 * | TLS area |
255 * | |
256 * +---------------------------+
257 * | |
258 * . .
259 * . stack area .
260 * . .
261 * | |
262 * +---------------------------+
263 * | guard page |
264 * +---------------------------+
265 *
266 * note that TLS[0] must be a pointer to itself, this is required
267 * by the thread-local storage implementation of the x86 Linux
268 * kernel, where the TLS pointer is read by reading fs:[0]
269 */
pthread_create(pthread_t * thread_out,pthread_attr_t const * attr,void * (* start_routine)(void *),void * arg)270 int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr,
271 void *(*start_routine)(void *), void * arg)
272 {
273 char* stack;
274 void** tls;
275 int tid;
276 pthread_mutex_t * start_mutex;
277 pthread_internal_t * thread;
278 int madestack = 0;
279 int old_errno = errno;
280
281 /* this will inform the rest of the C library that at least one thread
282 * was created. this will enforce certain functions to acquire/release
283 * locks (e.g. atexit()) to protect shared global structures.
284 *
285 * this works because pthread_create() is not called by the C library
286 * initialization routine that sets up the main thread's data structures.
287 */
288 __isthreaded = 1;
289
290 thread = _pthread_internal_alloc();
291 if (thread == NULL)
292 return ENOMEM;
293
294 if (attr == NULL) {
295 attr = &gDefaultPthreadAttr;
296 }
297
298 // make sure the stack is PAGE_SIZE aligned
299 size_t stackSize = (attr->stack_size +
300 (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
301
302 if (!attr->stack_base) {
303 stack = mkstack(stackSize, attr->guard_size);
304 if(stack == NULL) {
305 _pthread_internal_free(thread);
306 return ENOMEM;
307 }
308 madestack = 1;
309 } else {
310 stack = attr->stack_base;
311 }
312
313 // Make room for TLS
314 tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*));
315
316 // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep
317 // it from doing anything until after we notify the debugger about it
318 start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF];
319 pthread_mutex_init(start_mutex, NULL);
320 pthread_mutex_lock(start_mutex);
321
322 tls[TLS_SLOT_THREAD_ID] = thread;
323
324 tid = __pthread_clone((int(*)(void*))start_routine, tls,
325 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND
326 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED,
327 arg);
328
329 if(tid < 0) {
330 int result;
331 if (madestack)
332 munmap(stack, stackSize);
333 _pthread_internal_free(thread);
334 result = errno;
335 errno = old_errno;
336 return result;
337 }
338
339 _init_thread(thread, tid, (pthread_attr_t*)attr, stack);
340
341 if (!madestack)
342 thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK;
343
344 // Notify any debuggers about the new thread
345 pthread_mutex_lock(&gDebuggerNotificationLock);
346 _thread_created_hook(tid);
347 pthread_mutex_unlock(&gDebuggerNotificationLock);
348
349 // Let the thread do it's thing
350 pthread_mutex_unlock(start_mutex);
351
352 *thread_out = (pthread_t)thread;
353 return 0;
354 }
355
356
pthread_attr_init(pthread_attr_t * attr)357 int pthread_attr_init(pthread_attr_t * attr)
358 {
359 *attr = gDefaultPthreadAttr;
360 return 0;
361 }
362
pthread_attr_destroy(pthread_attr_t * attr)363 int pthread_attr_destroy(pthread_attr_t * attr)
364 {
365 memset(attr, 0x42, sizeof(pthread_attr_t));
366 return 0;
367 }
368
pthread_attr_setdetachstate(pthread_attr_t * attr,int state)369 int pthread_attr_setdetachstate(pthread_attr_t * attr, int state)
370 {
371 if (state == PTHREAD_CREATE_DETACHED) {
372 attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
373 } else if (state == PTHREAD_CREATE_JOINABLE) {
374 attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
375 } else {
376 return EINVAL;
377 }
378 return 0;
379 }
380
pthread_attr_getdetachstate(pthread_attr_t const * attr,int * state)381 int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state)
382 {
383 *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED)
384 ? PTHREAD_CREATE_DETACHED
385 : PTHREAD_CREATE_JOINABLE;
386 return 0;
387 }
388
pthread_attr_setschedpolicy(pthread_attr_t * attr,int policy)389 int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy)
390 {
391 attr->sched_policy = policy;
392 return 0;
393 }
394
pthread_attr_getschedpolicy(pthread_attr_t const * attr,int * policy)395 int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy)
396 {
397 *policy = attr->sched_policy;
398 return 0;
399 }
400
pthread_attr_setschedparam(pthread_attr_t * attr,struct sched_param const * param)401 int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param)
402 {
403 attr->sched_priority = param->sched_priority;
404 return 0;
405 }
406
pthread_attr_getschedparam(pthread_attr_t const * attr,struct sched_param * param)407 int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param)
408 {
409 param->sched_priority = attr->sched_priority;
410 return 0;
411 }
412
pthread_attr_setstacksize(pthread_attr_t * attr,size_t stack_size)413 int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size)
414 {
415 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
416 return EINVAL;
417 }
418 attr->stack_size = stack_size;
419 return 0;
420 }
421
pthread_attr_getstacksize(pthread_attr_t const * attr,size_t * stack_size)422 int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size)
423 {
424 *stack_size = attr->stack_size;
425 return 0;
426 }
427
pthread_attr_setstackaddr(pthread_attr_t * attr,void * stack_addr)428 int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr)
429 {
430 #if 1
431 // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now.
432 return ENOSYS;
433 #else
434 if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) {
435 return EINVAL;
436 }
437 attr->stack_base = stack_addr;
438 return 0;
439 #endif
440 }
441
pthread_attr_getstackaddr(pthread_attr_t const * attr,void ** stack_addr)442 int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr)
443 {
444 *stack_addr = attr->stack_base + attr->stack_size;
445 return 0;
446 }
447
pthread_attr_setstack(pthread_attr_t * attr,void * stack_base,size_t stack_size)448 int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size)
449 {
450 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
451 return EINVAL;
452 }
453 if ((uint32_t)stack_base & (PAGE_SIZE - 1)) {
454 return EINVAL;
455 }
456 attr->stack_base = stack_base;
457 attr->stack_size = stack_size;
458 return 0;
459 }
460
pthread_attr_getstack(pthread_attr_t const * attr,void ** stack_base,size_t * stack_size)461 int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size)
462 {
463 *stack_base = attr->stack_base;
464 *stack_size = attr->stack_size;
465 return 0;
466 }
467
pthread_attr_setguardsize(pthread_attr_t * attr,size_t guard_size)468 int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size)
469 {
470 if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) {
471 return EINVAL;
472 }
473
474 attr->guard_size = guard_size;
475 return 0;
476 }
477
pthread_attr_getguardsize(pthread_attr_t const * attr,size_t * guard_size)478 int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size)
479 {
480 *guard_size = attr->guard_size;
481 return 0;
482 }
483
pthread_getattr_np(pthread_t thid,pthread_attr_t * attr)484 int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr)
485 {
486 pthread_internal_t * thread = (pthread_internal_t *)thid;
487 *attr = thread->attr;
488 return 0;
489 }
490
pthread_attr_setscope(pthread_attr_t * attr,int scope)491 int pthread_attr_setscope(pthread_attr_t *attr, int scope)
492 {
493 if (scope == PTHREAD_SCOPE_SYSTEM)
494 return 0;
495 if (scope == PTHREAD_SCOPE_PROCESS)
496 return ENOTSUP;
497
498 return EINVAL;
499 }
500
pthread_attr_getscope(pthread_attr_t const * attr)501 int pthread_attr_getscope(pthread_attr_t const *attr)
502 {
503 return PTHREAD_SCOPE_SYSTEM;
504 }
505
506
507 /* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
508 * and thread cancelation
509 */
510
__pthread_cleanup_push(__pthread_cleanup_t * c,__pthread_cleanup_func_t routine,void * arg)511 void __pthread_cleanup_push( __pthread_cleanup_t* c,
512 __pthread_cleanup_func_t routine,
513 void* arg )
514 {
515 pthread_internal_t* thread = __get_thread();
516
517 c->__cleanup_routine = routine;
518 c->__cleanup_arg = arg;
519 c->__cleanup_prev = thread->cleanup_stack;
520 thread->cleanup_stack = c;
521 }
522
__pthread_cleanup_pop(__pthread_cleanup_t * c,int execute)523 void __pthread_cleanup_pop( __pthread_cleanup_t* c, int execute )
524 {
525 pthread_internal_t* thread = __get_thread();
526
527 thread->cleanup_stack = c->__cleanup_prev;
528 if (execute)
529 c->__cleanup_routine(c->__cleanup_arg);
530 }
531
532 /* used by pthread_exit() to clean all TLS keys of the current thread */
533 static void pthread_key_clean_all(void);
534
pthread_exit(void * retval)535 void pthread_exit(void * retval)
536 {
537 pthread_internal_t* thread = __get_thread();
538 void* stack_base = thread->attr.stack_base;
539 int stack_size = thread->attr.stack_size;
540 int user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
541
542 // call the cleanup handlers first
543 while (thread->cleanup_stack) {
544 __pthread_cleanup_t* c = thread->cleanup_stack;
545 thread->cleanup_stack = c->__cleanup_prev;
546 c->__cleanup_routine(c->__cleanup_arg);
547 }
548
549 // call the TLS destructors, it is important to do that before removing this
550 // thread from the global list. this will ensure that if someone else deletes
551 // a TLS key, the corresponding value will be set to NULL in this thread's TLS
552 // space (see pthread_key_delete)
553 pthread_key_clean_all();
554
555 // if the thread is detached, destroy the pthread_internal_t
556 // otherwise, keep it in memory and signal any joiners
557 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
558 _pthread_internal_remove(thread);
559 _pthread_internal_free(thread);
560 } else {
561 /* the join_count field is used to store the number of threads waiting for
562 * the termination of this thread with pthread_join(),
563 *
564 * if it is positive we need to signal the waiters, and we do not touch
565 * the count (it will be decremented by the waiters, the last one will
566 * also remove/free the thread structure
567 *
568 * if it is zero, we set the count value to -1 to indicate that the
569 * thread is in 'zombie' state: it has stopped executing, and its stack
570 * is gone (as well as its TLS area). when another thread calls pthread_join()
571 * on it, it will immediately free the thread and return.
572 */
573 pthread_mutex_lock(&gThreadListLock);
574 thread->return_value = retval;
575 if (thread->join_count > 0) {
576 pthread_cond_broadcast(&thread->join_cond);
577 } else {
578 thread->join_count = -1; /* zombie thread */
579 }
580 pthread_mutex_unlock(&gThreadListLock);
581 }
582
583 // destroy the thread stack
584 if (user_stack)
585 _exit_thread((int)retval);
586 else
587 _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
588 }
589
pthread_join(pthread_t thid,void ** ret_val)590 int pthread_join(pthread_t thid, void ** ret_val)
591 {
592 pthread_internal_t* thread = (pthread_internal_t*)thid;
593 int count;
594
595 // check that the thread still exists and is not detached
596 pthread_mutex_lock(&gThreadListLock);
597
598 for (thread = gThreadList; thread != NULL; thread = thread->next)
599 if (thread == (pthread_internal_t*)thid)
600 break;
601
602 if (!thread) {
603 pthread_mutex_unlock(&gThreadListLock);
604 return ESRCH;
605 }
606
607 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
608 pthread_mutex_unlock(&gThreadListLock);
609 return EINVAL;
610 }
611
612 /* wait for thread death when needed
613 *
614 * if the 'join_count' is negative, this is a 'zombie' thread that
615 * is already dead and without stack/TLS
616 *
617 * otherwise, we need to increment 'join-count' and wait to be signaled
618 */
619 count = thread->join_count;
620 if (count >= 0) {
621 thread->join_count += 1;
622 pthread_cond_wait( &thread->join_cond, &gThreadListLock );
623 count = --thread->join_count;
624 }
625 if (ret_val)
626 *ret_val = thread->return_value;
627
628 /* remove thread descriptor when we're the last joiner or when the
629 * thread was already a zombie.
630 */
631 if (count <= 0) {
632 _pthread_internal_remove_locked(thread);
633 _pthread_internal_free(thread);
634 }
635 pthread_mutex_unlock(&gThreadListLock);
636 return 0;
637 }
638
pthread_detach(pthread_t thid)639 int pthread_detach( pthread_t thid )
640 {
641 pthread_internal_t* thread;
642 int result = 0;
643 int flags;
644
645 pthread_mutex_lock(&gThreadListLock);
646 for (thread = gThreadList; thread != NULL; thread = thread->next)
647 if (thread == (pthread_internal_t*)thid)
648 goto FoundIt;
649
650 result = ESRCH;
651 goto Exit;
652
653 FoundIt:
654 do {
655 flags = thread->attr.flags;
656
657 if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) {
658 /* thread is not joinable ! */
659 result = EINVAL;
660 goto Exit;
661 }
662 }
663 while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
664 (volatile int*)&thread->attr.flags ) != 0 );
665 Exit:
666 pthread_mutex_unlock(&gThreadListLock);
667 return result;
668 }
669
pthread_self(void)670 pthread_t pthread_self(void)
671 {
672 return (pthread_t)__get_thread();
673 }
674
pthread_equal(pthread_t one,pthread_t two)675 int pthread_equal(pthread_t one, pthread_t two)
676 {
677 return (one == two ? 1 : 0);
678 }
679
pthread_getschedparam(pthread_t thid,int * policy,struct sched_param * param)680 int pthread_getschedparam(pthread_t thid, int * policy,
681 struct sched_param * param)
682 {
683 int old_errno = errno;
684
685 pthread_internal_t * thread = (pthread_internal_t *)thid;
686 int err = sched_getparam(thread->kernel_id, param);
687 if (!err) {
688 *policy = sched_getscheduler(thread->kernel_id);
689 } else {
690 err = errno;
691 errno = old_errno;
692 }
693 return err;
694 }
695
pthread_setschedparam(pthread_t thid,int policy,struct sched_param const * param)696 int pthread_setschedparam(pthread_t thid, int policy,
697 struct sched_param const * param)
698 {
699 pthread_internal_t * thread = (pthread_internal_t *)thid;
700 int old_errno = errno;
701 int ret;
702
703 ret = sched_setscheduler(thread->kernel_id, policy, param);
704 if (ret < 0) {
705 ret = errno;
706 errno = old_errno;
707 }
708 return ret;
709 }
710
711
712 int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
713 int __futex_wake(volatile void *ftx, int count);
714
715 // mutex lock states
716 //
717 // 0: unlocked
718 // 1: locked, no waiters
719 // 2: locked, maybe waiters
720
721 /* a mutex is implemented as a 32-bit integer holding the following fields
722 *
723 * bits: name description
724 * 31-16 tid owner thread's kernel id (recursive and errorcheck only)
725 * 15-14 type mutex type
726 * 13-2 counter counter of recursive mutexes
727 * 1-0 state lock state (0, 1 or 2)
728 */
729
730
731 #define MUTEX_OWNER(m) (((m)->value >> 16) & 0xffff)
732 #define MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff)
733
734 #define MUTEX_TYPE_MASK 0xc000
735 #define MUTEX_TYPE_NORMAL 0x0000
736 #define MUTEX_TYPE_RECURSIVE 0x4000
737 #define MUTEX_TYPE_ERRORCHECK 0x8000
738
739 #define MUTEX_COUNTER_SHIFT 2
740 #define MUTEX_COUNTER_MASK 0x3ffc
741
742
743
744
pthread_mutexattr_init(pthread_mutexattr_t * attr)745 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
746 {
747 if (attr) {
748 *attr = PTHREAD_MUTEX_DEFAULT;
749 return 0;
750 } else {
751 return EINVAL;
752 }
753 }
754
pthread_mutexattr_destroy(pthread_mutexattr_t * attr)755 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
756 {
757 if (attr) {
758 *attr = -1;
759 return 0;
760 } else {
761 return EINVAL;
762 }
763 }
764
pthread_mutexattr_gettype(const pthread_mutexattr_t * attr,int * type)765 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
766 {
767 if (attr && *attr >= PTHREAD_MUTEX_NORMAL &&
768 *attr <= PTHREAD_MUTEX_ERRORCHECK ) {
769 *type = *attr;
770 return 0;
771 }
772 return EINVAL;
773 }
774
pthread_mutexattr_settype(pthread_mutexattr_t * attr,int type)775 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
776 {
777 if (attr && type >= PTHREAD_MUTEX_NORMAL &&
778 type <= PTHREAD_MUTEX_ERRORCHECK ) {
779 *attr = type;
780 return 0;
781 }
782 return EINVAL;
783 }
784
785 /* process-shared mutexes are not supported at the moment */
786
pthread_mutexattr_setpshared(pthread_mutexattr_t * attr,int pshared)787 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
788 {
789 if (!attr)
790 return EINVAL;
791
792 return (pshared == PTHREAD_PROCESS_PRIVATE) ? 0 : ENOTSUP;
793 }
794
pthread_mutexattr_getpshared(pthread_mutexattr_t * attr,int * pshared)795 int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
796 {
797 if (!attr)
798 return EINVAL;
799
800 *pshared = PTHREAD_PROCESS_PRIVATE;
801 return 0;
802 }
803
pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * attr)804 int pthread_mutex_init(pthread_mutex_t *mutex,
805 const pthread_mutexattr_t *attr)
806 {
807 if ( mutex ) {
808 if (attr == NULL) {
809 mutex->value = MUTEX_TYPE_NORMAL;
810 return 0;
811 }
812 switch ( *attr ) {
813 case PTHREAD_MUTEX_NORMAL:
814 mutex->value = MUTEX_TYPE_NORMAL;
815 return 0;
816
817 case PTHREAD_MUTEX_RECURSIVE:
818 mutex->value = MUTEX_TYPE_RECURSIVE;
819 return 0;
820
821 case PTHREAD_MUTEX_ERRORCHECK:
822 mutex->value = MUTEX_TYPE_ERRORCHECK;
823 return 0;
824 }
825 }
826 return EINVAL;
827 }
828
pthread_mutex_destroy(pthread_mutex_t * mutex)829 int pthread_mutex_destroy(pthread_mutex_t *mutex)
830 {
831 mutex->value = 0xdead10cc;
832 return 0;
833 }
834
835
836 /*
837 * Lock a non-recursive mutex.
838 *
839 * As noted above, there are three states:
840 * 0 (unlocked, no contention)
841 * 1 (locked, no contention)
842 * 2 (locked, contention)
843 *
844 * Non-recursive mutexes don't use the thread-id or counter fields, and the
845 * "type" value is zero, so the only bits that will be set are the ones in
846 * the lock state field.
847 */
848 static __inline__ void
_normal_lock(pthread_mutex_t * mutex)849 _normal_lock(pthread_mutex_t* mutex)
850 {
851 /*
852 * The common case is an unlocked mutex, so we begin by trying to
853 * change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0
854 * if it made the swap successfully. If the result is nonzero, this
855 * lock is already held by another thread.
856 */
857 if (__atomic_cmpxchg(0, 1, &mutex->value ) != 0) {
858 /*
859 * We want to go to sleep until the mutex is available, which
860 * requires promoting it to state 2. We need to swap in the new
861 * state value and then wait until somebody wakes us up.
862 *
863 * __atomic_swap() returns the previous value. We swap 2 in and
864 * see if we got zero back; if so, we have acquired the lock. If
865 * not, another thread still holds the lock and we wait again.
866 *
867 * The second argument to the __futex_wait() call is compared
868 * against the current value. If it doesn't match, __futex_wait()
869 * returns immediately (otherwise, it sleeps for a time specified
870 * by the third argument; 0 means sleep forever). This ensures
871 * that the mutex is in state 2 when we go to sleep on it, which
872 * guarantees a wake-up call.
873 */
874 while (__atomic_swap(2, &mutex->value ) != 0)
875 __futex_wait(&mutex->value, 2, 0);
876 }
877 }
878
879 /*
880 * Release a non-recursive mutex. The caller is responsible for determining
881 * that we are in fact the owner of this lock.
882 */
883 static __inline__ void
_normal_unlock(pthread_mutex_t * mutex)884 _normal_unlock(pthread_mutex_t* mutex)
885 {
886 /*
887 * The mutex value will be 1 or (rarely) 2. We use an atomic decrement
888 * to release the lock. __atomic_dec() returns the previous value;
889 * if it wasn't 1 we have to do some additional work.
890 */
891 if (__atomic_dec(&mutex->value) != 1) {
892 /*
893 * Start by releasing the lock. The decrement changed it from
894 * "contended lock" to "uncontended lock", which means we still
895 * hold it, and anybody who tries to sneak in will push it back
896 * to state 2.
897 *
898 * Once we set it to zero the lock is up for grabs. We follow
899 * this with a __futex_wake() to ensure that one of the waiting
900 * threads has a chance to grab it.
901 *
902 * This doesn't cause a race with the swap/wait pair in
903 * _normal_lock(), because the __futex_wait() call there will
904 * return immediately if the mutex value isn't 2.
905 */
906 mutex->value = 0;
907
908 /*
909 * Wake up one waiting thread. We don't know which thread will be
910 * woken or when it'll start executing -- futexes make no guarantees
911 * here. There may not even be a thread waiting.
912 *
913 * The newly-woken thread will replace the 0 we just set above
914 * with 2, which means that when it eventually releases the mutex
915 * it will also call FUTEX_WAKE. This results in one extra wake
916 * call whenever a lock is contended, but lets us avoid forgetting
917 * anyone without requiring us to track the number of sleepers.
918 *
919 * It's possible for another thread to sneak in and grab the lock
920 * between the zero assignment above and the wake call below. If
921 * the new thread is "slow" and holds the lock for a while, we'll
922 * wake up a sleeper, which will swap in a 2 and then go back to
923 * sleep since the lock is still held. If the new thread is "fast",
924 * running to completion before we call wake, the thread we
925 * eventually wake will find an unlocked mutex and will execute.
926 * Either way we have correct behavior and nobody is orphaned on
927 * the wait queue.
928 */
929 __futex_wake(&mutex->value, 1);
930 }
931 }
932
933 static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
934
935 static void
_recursive_lock(void)936 _recursive_lock(void)
937 {
938 _normal_lock( &__recursive_lock);
939 }
940
941 static void
_recursive_unlock(void)942 _recursive_unlock(void)
943 {
944 _normal_unlock( &__recursive_lock );
945 }
946
947 #define __likely(cond) __builtin_expect(!!(cond), 1)
948 #define __unlikely(cond) __builtin_expect(!!(cond), 0)
949
pthread_mutex_lock(pthread_mutex_t * mutex)950 int pthread_mutex_lock(pthread_mutex_t *mutex)
951 {
952 if (__likely(mutex != NULL))
953 {
954 int mtype = (mutex->value & MUTEX_TYPE_MASK);
955
956 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
957 _normal_lock(mutex);
958 }
959 else
960 {
961 int tid = __get_thread()->kernel_id;
962
963 if ( tid == MUTEX_OWNER(mutex) )
964 {
965 int oldv, counter;
966
967 if (mtype == MUTEX_TYPE_ERRORCHECK) {
968 /* trying to re-lock a mutex we already acquired */
969 return EDEADLK;
970 }
971 /*
972 * We own the mutex, but other threads are able to change
973 * the contents (e.g. promoting it to "contended"), so we
974 * need to hold the global lock.
975 */
976 _recursive_lock();
977 oldv = mutex->value;
978 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
979 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
980 _recursive_unlock();
981 }
982 else
983 {
984 /*
985 * If the new lock is available immediately, we grab it in
986 * the "uncontended" state.
987 */
988 int new_lock_type = 1;
989
990 for (;;) {
991 int oldv;
992
993 _recursive_lock();
994 oldv = mutex->value;
995 if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
996 mutex->value = ((tid << 16) | mtype | new_lock_type);
997 } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
998 oldv ^= 3;
999 mutex->value = oldv;
1000 }
1001 _recursive_unlock();
1002
1003 if (oldv == mtype)
1004 break;
1005
1006 /*
1007 * The lock was held, possibly contended by others. From
1008 * now on, if we manage to acquire the lock, we have to
1009 * assume that others are still contending for it so that
1010 * we'll wake them when we unlock it.
1011 */
1012 new_lock_type = 2;
1013
1014 __futex_wait( &mutex->value, oldv, 0 );
1015 }
1016 }
1017 }
1018 return 0;
1019 }
1020 return EINVAL;
1021 }
1022
1023
pthread_mutex_unlock(pthread_mutex_t * mutex)1024 int pthread_mutex_unlock(pthread_mutex_t *mutex)
1025 {
1026 if (__likely(mutex != NULL))
1027 {
1028 int mtype = (mutex->value & MUTEX_TYPE_MASK);
1029
1030 if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
1031 _normal_unlock(mutex);
1032 }
1033 else
1034 {
1035 int tid = __get_thread()->kernel_id;
1036
1037 if ( tid == MUTEX_OWNER(mutex) )
1038 {
1039 int oldv;
1040
1041 _recursive_lock();
1042 oldv = mutex->value;
1043 if (oldv & MUTEX_COUNTER_MASK) {
1044 mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
1045 oldv = 0;
1046 } else {
1047 mutex->value = mtype;
1048 }
1049 _recursive_unlock();
1050
1051 if ((oldv & 3) == 2)
1052 __futex_wake( &mutex->value, 1 );
1053 }
1054 else {
1055 /* trying to unlock a lock we do not own */
1056 return EPERM;
1057 }
1058 }
1059 return 0;
1060 }
1061 return EINVAL;
1062 }
1063
1064
pthread_mutex_trylock(pthread_mutex_t * mutex)1065 int pthread_mutex_trylock(pthread_mutex_t *mutex)
1066 {
1067 if (__likely(mutex != NULL))
1068 {
1069 int mtype = (mutex->value & MUTEX_TYPE_MASK);
1070
1071 if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
1072 {
1073 if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
1074 return 0;
1075
1076 return EBUSY;
1077 }
1078 else
1079 {
1080 int tid = __get_thread()->kernel_id;
1081 int oldv;
1082
1083 if ( tid == MUTEX_OWNER(mutex) )
1084 {
1085 int oldv, counter;
1086
1087 if (mtype == MUTEX_TYPE_ERRORCHECK) {
1088 /* already locked by ourselves */
1089 return EDEADLK;
1090 }
1091
1092 _recursive_lock();
1093 oldv = mutex->value;
1094 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1095 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1096 _recursive_unlock();
1097 return 0;
1098 }
1099
1100 /* try to lock it */
1101 _recursive_lock();
1102 oldv = mutex->value;
1103 if (oldv == mtype) /* uncontended released lock => state 1 */
1104 mutex->value = ((tid << 16) | mtype | 1);
1105 _recursive_unlock();
1106
1107 if (oldv != mtype)
1108 return EBUSY;
1109
1110 return 0;
1111 }
1112 }
1113 return EINVAL;
1114 }
1115
1116
1117 /* XXX *technically* there is a race condition that could allow
1118 * XXX a signal to be missed. If thread A is preempted in _wait()
1119 * XXX after unlocking the mutex and before waiting, and if other
1120 * XXX threads call signal or broadcast UINT_MAX times (exactly),
1121 * XXX before thread A is scheduled again and calls futex_wait(),
1122 * XXX then the signal will be lost.
1123 */
1124
pthread_cond_init(pthread_cond_t * cond,const pthread_condattr_t * attr)1125 int pthread_cond_init(pthread_cond_t *cond,
1126 const pthread_condattr_t *attr)
1127 {
1128 cond->value = 0;
1129 return 0;
1130 }
1131
pthread_cond_destroy(pthread_cond_t * cond)1132 int pthread_cond_destroy(pthread_cond_t *cond)
1133 {
1134 cond->value = 0xdeadc04d;
1135 return 0;
1136 }
1137
pthread_cond_broadcast(pthread_cond_t * cond)1138 int pthread_cond_broadcast(pthread_cond_t *cond)
1139 {
1140 __atomic_dec(&cond->value);
1141 __futex_wake(&cond->value, INT_MAX);
1142 return 0;
1143 }
1144
pthread_cond_signal(pthread_cond_t * cond)1145 int pthread_cond_signal(pthread_cond_t *cond)
1146 {
1147 __atomic_dec(&cond->value);
1148 __futex_wake(&cond->value, 1);
1149 return 0;
1150 }
1151
pthread_cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)1152 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
1153 {
1154 return pthread_cond_timedwait(cond, mutex, NULL);
1155 }
1156
__pthread_cond_timedwait_relative(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * reltime)1157 int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
1158 pthread_mutex_t * mutex,
1159 const struct timespec *reltime)
1160 {
1161 int status;
1162 int oldvalue = cond->value;
1163
1164 pthread_mutex_unlock(mutex);
1165 status = __futex_wait(&cond->value, oldvalue, reltime);
1166 pthread_mutex_lock(mutex);
1167
1168 if (status == (-ETIMEDOUT)) return ETIMEDOUT;
1169 return 0;
1170 }
1171
__pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime,clockid_t clock)1172 int __pthread_cond_timedwait(pthread_cond_t *cond,
1173 pthread_mutex_t * mutex,
1174 const struct timespec *abstime,
1175 clockid_t clock)
1176 {
1177 struct timespec ts;
1178 struct timespec * tsp;
1179
1180 if (abstime != NULL) {
1181 clock_gettime(clock, &ts);
1182 ts.tv_sec = abstime->tv_sec - ts.tv_sec;
1183 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
1184 if (ts.tv_nsec < 0) {
1185 ts.tv_sec--;
1186 ts.tv_nsec += 1000000000;
1187 }
1188 if((ts.tv_nsec < 0) || (ts.tv_sec < 0)) {
1189 return ETIMEDOUT;
1190 }
1191 tsp = &ts;
1192 } else {
1193 tsp = NULL;
1194 }
1195
1196 return __pthread_cond_timedwait_relative(cond, mutex, tsp);
1197 }
1198
pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1199 int pthread_cond_timedwait(pthread_cond_t *cond,
1200 pthread_mutex_t * mutex,
1201 const struct timespec *abstime)
1202 {
1203 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
1204 }
1205
1206
pthread_cond_timedwait_monotonic(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)1207 int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
1208 pthread_mutex_t * mutex,
1209 const struct timespec *abstime)
1210 {
1211 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1212 }
1213
pthread_cond_timeout_np(pthread_cond_t * cond,pthread_mutex_t * mutex,unsigned msecs)1214 int pthread_cond_timeout_np(pthread_cond_t *cond,
1215 pthread_mutex_t * mutex,
1216 unsigned msecs)
1217 {
1218 int oldvalue;
1219 struct timespec ts;
1220 int status;
1221
1222 ts.tv_sec = msecs / 1000;
1223 ts.tv_nsec = (msecs % 1000) * 1000000;
1224
1225 oldvalue = cond->value;
1226
1227 pthread_mutex_unlock(mutex);
1228 status = __futex_wait(&cond->value, oldvalue, &ts);
1229 pthread_mutex_lock(mutex);
1230
1231 if(status == (-ETIMEDOUT)) return ETIMEDOUT;
1232
1233 return 0;
1234 }
1235
1236
1237
1238 /* A technical note regarding our thread-local-storage (TLS) implementation:
1239 *
1240 * There can be up to TLSMAP_SIZE independent TLS keys in a given process,
1241 * though the first TLSMAP_START keys are reserved for Bionic to hold
1242 * special thread-specific variables like errno or a pointer to
1243 * the current thread's descriptor.
1244 *
1245 * while stored in the TLS area, these entries cannot be accessed through
1246 * pthread_getspecific() / pthread_setspecific() and pthread_key_delete()
1247 *
1248 * also, some entries in the key table are pre-allocated (see tlsmap_lock)
1249 * to greatly simplify and speedup some OpenGL-related operations. though the
1250 * initialy value will be NULL on all threads.
1251 *
1252 * you can use pthread_getspecific()/setspecific() on these, and in theory
1253 * you could also call pthread_key_delete() as well, though this would
1254 * probably break some apps.
1255 *
1256 * The 'tlsmap_t' type defined below implements a shared global map of
1257 * currently created/allocated TLS keys and the destructors associated
1258 * with them. You should use tlsmap_lock/unlock to access it to avoid
1259 * any race condition.
1260 *
1261 * the global TLS map simply contains a bitmap of allocated keys, and
1262 * an array of destructors.
1263 *
1264 * each thread has a TLS area that is a simple array of TLSMAP_SIZE void*
1265 * pointers. the TLS area of the main thread is stack-allocated in
1266 * __libc_init_common, while the TLS area of other threads is placed at
1267 * the top of their stack in pthread_create.
1268 *
1269 * when pthread_key_create() is called, it finds the first free key in the
1270 * bitmap, then set it to 1, saving the destructor altogether
1271 *
1272 * when pthread_key_delete() is called. it will erase the key's bitmap bit
1273 * and its destructor, and will also clear the key data in the TLS area of
1274 * all created threads. As mandated by Posix, it is the responsability of
1275 * the caller of pthread_key_delete() to properly reclaim the objects that
1276 * were pointed to by these data fields (either before or after the call).
1277 *
1278 */
1279
1280 /* TLS Map implementation
1281 */
1282
1283 #define TLSMAP_START (TLS_SLOT_MAX_WELL_KNOWN+1)
1284 #define TLSMAP_SIZE BIONIC_TLS_SLOTS
1285 #define TLSMAP_BITS 32
1286 #define TLSMAP_WORDS ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS)
1287 #define TLSMAP_WORD(m,k) (m)->map[(k)/TLSMAP_BITS]
1288 #define TLSMAP_MASK(k) (1U << ((k)&(TLSMAP_BITS-1)))
1289
1290 /* this macro is used to quickly check that a key belongs to a reasonable range */
1291 #define TLSMAP_VALIDATE_KEY(key) \
1292 ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE)
1293
1294 /* the type of tls key destructor functions */
1295 typedef void (*tls_dtor_t)(void*);
1296
1297 typedef struct {
1298 int init; /* see comment in tlsmap_lock() */
1299 uint32_t map[TLSMAP_WORDS]; /* bitmap of allocated keys */
1300 tls_dtor_t dtors[TLSMAP_SIZE]; /* key destructors */
1301 } tlsmap_t;
1302
1303 static pthread_mutex_t _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER;
1304 static tlsmap_t _tlsmap;
1305
1306 /* lock the global TLS map lock and return a handle to it */
tlsmap_lock(void)1307 static __inline__ tlsmap_t* tlsmap_lock(void)
1308 {
1309 tlsmap_t* m = &_tlsmap;
1310
1311 pthread_mutex_lock(&_tlsmap_lock);
1312 /* we need to initialize the first entry of the 'map' array
1313 * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically
1314 * when declaring _tlsmap is a bit awkward and is going to
1315 * produce warnings, so do it the first time we use the map
1316 * instead
1317 */
1318 if (__unlikely(!m->init)) {
1319 TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP;
1320 m->init = 1;
1321 }
1322 return m;
1323 }
1324
1325 /* unlock the global TLS map */
tlsmap_unlock(tlsmap_t * m)1326 static __inline__ void tlsmap_unlock(tlsmap_t* m)
1327 {
1328 pthread_mutex_unlock(&_tlsmap_lock);
1329 (void)m; /* a good compiler is a happy compiler */
1330 }
1331
1332 /* test to see wether a key is allocated */
tlsmap_test(tlsmap_t * m,int key)1333 static __inline__ int tlsmap_test(tlsmap_t* m, int key)
1334 {
1335 return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0;
1336 }
1337
1338 /* set the destructor and bit flag on a newly allocated key */
tlsmap_set(tlsmap_t * m,int key,tls_dtor_t dtor)1339 static __inline__ void tlsmap_set(tlsmap_t* m, int key, tls_dtor_t dtor)
1340 {
1341 TLSMAP_WORD(m,key) |= TLSMAP_MASK(key);
1342 m->dtors[key] = dtor;
1343 }
1344
1345 /* clear the destructor and bit flag on an existing key */
tlsmap_clear(tlsmap_t * m,int key)1346 static __inline__ void tlsmap_clear(tlsmap_t* m, int key)
1347 {
1348 TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key);
1349 m->dtors[key] = NULL;
1350 }
1351
1352 /* allocate a new TLS key, return -1 if no room left */
tlsmap_alloc(tlsmap_t * m,tls_dtor_t dtor)1353 static int tlsmap_alloc(tlsmap_t* m, tls_dtor_t dtor)
1354 {
1355 int key;
1356
1357 for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) {
1358 if ( !tlsmap_test(m, key) ) {
1359 tlsmap_set(m, key, dtor);
1360 return key;
1361 }
1362 }
1363 return -1;
1364 }
1365
1366
pthread_key_create(pthread_key_t * key,void (* destructor_function)(void *))1367 int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *))
1368 {
1369 uint32_t err = ENOMEM;
1370 tlsmap_t* map = tlsmap_lock();
1371 int k = tlsmap_alloc(map, destructor_function);
1372
1373 if (k >= 0) {
1374 *key = k;
1375 err = 0;
1376 }
1377 tlsmap_unlock(map);
1378 return err;
1379 }
1380
1381
1382 /* This deletes a pthread_key_t. note that the standard mandates that this does
1383 * not call the destructor of non-NULL key values. Instead, it is the
1384 * responsability of the caller to properly dispose of the corresponding data
1385 * and resources, using any mean it finds suitable.
1386 *
1387 * On the other hand, this function will clear the corresponding key data
1388 * values in all known threads. this prevents later (invalid) calls to
1389 * pthread_getspecific() to receive invalid/stale values.
1390 */
pthread_key_delete(pthread_key_t key)1391 int pthread_key_delete(pthread_key_t key)
1392 {
1393 uint32_t err;
1394 pthread_internal_t* thr;
1395 tlsmap_t* map;
1396
1397 if (!TLSMAP_VALIDATE_KEY(key)) {
1398 return EINVAL;
1399 }
1400
1401 map = tlsmap_lock();
1402
1403 if (!tlsmap_test(map, key)) {
1404 err = EINVAL;
1405 goto err1;
1406 }
1407
1408 /* clear value in all threads */
1409 pthread_mutex_lock(&gThreadListLock);
1410 for ( thr = gThreadList; thr != NULL; thr = thr->next ) {
1411 /* avoid zombie threads with a negative 'join_count'. these are really
1412 * already dead and don't have a TLS area anymore.
1413 *
1414 * similarly, it is possible to have thr->tls == NULL for threads that
1415 * were just recently created through pthread_create() but whose
1416 * startup trampoline (__thread_entry) hasn't been run yet by the
1417 * scheduler. so check for this too.
1418 */
1419 if (thr->join_count < 0 || !thr->tls)
1420 continue;
1421
1422 thr->tls[key] = NULL;
1423 }
1424 tlsmap_clear(map, key);
1425
1426 pthread_mutex_unlock(&gThreadListLock);
1427 err = 0;
1428
1429 err1:
1430 tlsmap_unlock(map);
1431 return err;
1432 }
1433
1434
pthread_setspecific(pthread_key_t key,const void * ptr)1435 int pthread_setspecific(pthread_key_t key, const void *ptr)
1436 {
1437 int err = EINVAL;
1438 tlsmap_t* map;
1439
1440 if (TLSMAP_VALIDATE_KEY(key)) {
1441 /* check that we're trying to set data for an allocated key */
1442 map = tlsmap_lock();
1443 if (tlsmap_test(map, key)) {
1444 ((uint32_t *)__get_tls())[key] = (uint32_t)ptr;
1445 err = 0;
1446 }
1447 tlsmap_unlock(map);
1448 }
1449 return err;
1450 }
1451
pthread_getspecific(pthread_key_t key)1452 void * pthread_getspecific(pthread_key_t key)
1453 {
1454 if (!TLSMAP_VALIDATE_KEY(key)) {
1455 return NULL;
1456 }
1457
1458 /* for performance reason, we do not lock/unlock the global TLS map
1459 * to check that the key is properly allocated. if the key was not
1460 * allocated, the value read from the TLS should always be NULL
1461 * due to pthread_key_delete() clearing the values for all threads.
1462 */
1463 return (void *)(((unsigned *)__get_tls())[key]);
1464 }
1465
1466 /* Posix mandates that this be defined in <limits.h> but we don't have
1467 * it just yet.
1468 */
1469 #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
1470 # define PTHREAD_DESTRUCTOR_ITERATIONS 4
1471 #endif
1472
1473 /* this function is called from pthread_exit() to remove all TLS key data
1474 * from this thread's TLS area. this must call the destructor of all keys
1475 * that have a non-NULL data value (and a non-NULL destructor).
1476 *
1477 * because destructors can do funky things like deleting/creating other
1478 * keys, we need to implement this in a loop
1479 */
pthread_key_clean_all(void)1480 static void pthread_key_clean_all(void)
1481 {
1482 tlsmap_t* map;
1483 void** tls = (void**)__get_tls();
1484 int rounds = PTHREAD_DESTRUCTOR_ITERATIONS;
1485
1486 map = tlsmap_lock();
1487
1488 for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--)
1489 {
1490 int kk, count = 0;
1491
1492 for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) {
1493 if ( tlsmap_test(map, kk) )
1494 {
1495 void* data = tls[kk];
1496 tls_dtor_t dtor = map->dtors[kk];
1497
1498 if (data != NULL && dtor != NULL)
1499 {
1500 /* we need to clear the key data now, this will prevent the
1501 * destructor (or a later one) from seeing the old value if
1502 * it calls pthread_getspecific() for some odd reason
1503 *
1504 * we do not do this if 'dtor == NULL' just in case another
1505 * destructor function might be responsible for manually
1506 * releasing the corresponding data.
1507 */
1508 tls[kk] = NULL;
1509
1510 /* because the destructor is free to call pthread_key_create
1511 * and/or pthread_key_delete, we need to temporarily unlock
1512 * the TLS map
1513 */
1514 tlsmap_unlock(map);
1515 (*dtor)(data);
1516 map = tlsmap_lock();
1517
1518 count += 1;
1519 }
1520 }
1521 }
1522
1523 /* if we didn't call any destructor, there is no need to check the
1524 * TLS data again
1525 */
1526 if (count == 0)
1527 break;
1528 }
1529 tlsmap_unlock(map);
1530 }
1531
1532 // man says this should be in <linux/unistd.h>, but it isn't
1533 extern int tkill(int tid, int sig);
1534
pthread_kill(pthread_t tid,int sig)1535 int pthread_kill(pthread_t tid, int sig)
1536 {
1537 int ret;
1538 int old_errno = errno;
1539 pthread_internal_t * thread = (pthread_internal_t *)tid;
1540
1541 ret = tkill(thread->kernel_id, sig);
1542 if (ret < 0) {
1543 ret = errno;
1544 errno = old_errno;
1545 }
1546
1547 return ret;
1548 }
1549
1550 extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
1551
pthread_sigmask(int how,const sigset_t * set,sigset_t * oset)1552 int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
1553 {
1554 return __rt_sigprocmask(how, set, oset, _NSIG / 8);
1555 }
1556
1557
pthread_getcpuclockid(pthread_t tid,clockid_t * clockid)1558 int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid)
1559 {
1560 const int CLOCK_IDTYPE_BITS = 3;
1561 pthread_internal_t* thread = (pthread_internal_t*)tid;
1562
1563 if (!thread)
1564 return ESRCH;
1565
1566 *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
1567 return 0;
1568 }
1569
1570
1571 /* NOTE: this implementation doesn't support a init function that throws a C++ exception
1572 * or calls fork()
1573 */
pthread_once(pthread_once_t * once_control,void (* init_routine)(void))1574 int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
1575 {
1576 static pthread_mutex_t once_lock = PTHREAD_MUTEX_INITIALIZER;
1577
1578 if (*once_control == PTHREAD_ONCE_INIT) {
1579 _normal_lock( &once_lock );
1580 if (*once_control == PTHREAD_ONCE_INIT) {
1581 (*init_routine)();
1582 *once_control = ~PTHREAD_ONCE_INIT;
1583 }
1584 _normal_unlock( &once_lock );
1585 }
1586 return 0;
1587 }
1588