1 #define _GNU_SOURCE
2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
4 #include "libc.h"
5 #include "lock.h"
6 #include <sys/mman.h>
7 #include <string.h>
8 #include <stddef.h>
9 #include <stdarg.h>
10
log_print(const char * info,...)11 void log_print(const char* info,...)
12 {
13 va_list ap;
14 va_start(ap, info);
15 vfprintf(stdout,info, ap);
16 va_end(ap);
17 }
18
dummy_0()19 static void dummy_0()
20 {
21 }
22 weak_alias(dummy_0, __acquire_ptc);
23 weak_alias(dummy_0, __release_ptc);
24 weak_alias(dummy_0, __pthread_tsd_run_dtors);
25 weak_alias(dummy_0, __do_orphaned_stdio_locks);
26 weak_alias(dummy_0, __dl_thread_cleanup);
27 weak_alias(dummy_0, __membarrier_init);
28
29 static int tl_lock_count;
30 static int tl_lock_waiters;
31
__tl_lock(void)32 void __tl_lock(void)
33 {
34 int tid = __pthread_self()->tid;
35 int val = __thread_list_lock;
36 if (val == tid) {
37 tl_lock_count++;
38 return;
39 }
40 while ((val = a_cas(&__thread_list_lock, 0, tid)))
41 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
42 }
43
__tl_unlock(void)44 void __tl_unlock(void)
45 {
46 if (tl_lock_count) {
47 tl_lock_count--;
48 return;
49 }
50 a_store(&__thread_list_lock, 0);
51 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
52 }
53
__tl_sync(pthread_t td)54 void __tl_sync(pthread_t td)
55 {
56 a_barrier();
57 int val = __thread_list_lock;
58 if (!val) return;
59 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
60 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
61 }
62
__pthread_exit(void * result)63 _Noreturn void __pthread_exit(void *result)
64 {
65 pthread_t self = __pthread_self();
66 sigset_t set;
67
68 self->canceldisable = 1;
69 self->cancelasync = 0;
70 self->result = result;
71
72 while (self->cancelbuf) {
73 void (*f)(void *) = self->cancelbuf->__f;
74 void *x = self->cancelbuf->__x;
75 self->cancelbuf = self->cancelbuf->__next;
76 f(x);
77 }
78
79 __pthread_tsd_run_dtors();
80
81 /* Access to target the exiting thread with syscalls that use
82 * its kernel tid is controlled by killlock. For detached threads,
83 * any use past this point would have undefined behavior, but for
84 * joinable threads it's a valid usage that must be handled. */
85 LOCK(self->killlock);
86
87 /* The thread list lock must be AS-safe, and thus requires
88 * application signals to be blocked before it can be taken. */
89 __block_app_sigs(&set);
90 __tl_lock();
91
92 /* If this is the only thread in the list, don't proceed with
93 * termination of the thread, but restore the previous lock and
94 * signal state to prepare for exit to call atexit handlers. */
95 if (self->next == self) {
96 __tl_unlock();
97 __restore_sigs(&set);
98 UNLOCK(self->killlock);
99 exit(0);
100 }
101
102 /* At this point we are committed to thread termination. Unlink
103 * the thread from the list. This change will not be visible
104 * until the lock is released, which only happens after SYS_exit
105 * has been called, via the exit futex address pointing at the lock. */
106 libc.threads_minus_1--;
107 self->next->prev = self->prev;
108 self->prev->next = self->next;
109 self->prev = self->next = self;
110
111 /* Process robust list in userspace to handle non-pshared mutexes
112 * and the detached thread case where the robust list head will
113 * be invalid when the kernel would process it. */
114 __vm_lock();
115 volatile void *volatile *rp;
116 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
117 pthread_mutex_t *m = (void *)((char *)rp
118 - offsetof(pthread_mutex_t, _m_next));
119 int waiters = m->_m_waiters;
120 int priv = (m->_m_type & 128) ^ 128;
121 self->robust_list.pending = rp;
122 self->robust_list.head = *rp;
123 int cont = a_swap(&m->_m_lock, 0x40000000);
124 self->robust_list.pending = 0;
125 if (cont < 0 || waiters)
126 __wake(&m->_m_lock, 1, priv);
127 }
128 __vm_unlock();
129
130 __do_orphaned_stdio_locks();
131 __dl_thread_cleanup();
132
133 /* This atomic potentially competes with a concurrent pthread_detach
134 * call; the loser is responsible for freeing thread resources. */
135 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
136
137 if (state==DT_DETACHED && self->map_base) {
138 /* Detached threads must block even implementation-internal
139 * signals, since they will not have a stack in their last
140 * moments of existence. */
141 __block_all_sigs(&set);
142
143 /* Robust list will no longer be valid, and was already
144 * processed above, so unregister it with the kernel. */
145 if (self->robust_list.off)
146 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
147
148 /* Since __unmapself bypasses the normal munmap code path,
149 * explicitly wait for vmlock holders first. */
150 __vm_wait();
151
152 /* The following call unmaps the thread's stack mapping
153 * and then exits without touching the stack. */
154 __unmapself(self->map_base, self->map_size);
155 }
156
157 /* Wake any joiner. */
158 __wake(&self->detach_state, 1, 1);
159
160 /* After the kernel thread exits, its tid may be reused. Clear it
161 * to prevent inadvertent use and inform functions that would use
162 * it that it's no longer available. */
163 self->tid = 0;
164 UNLOCK(self->killlock);
165
166 for (;;) __syscall(SYS_exit, 0);
167 }
168
__do_cleanup_push(struct __ptcb * cb)169 void __do_cleanup_push(struct __ptcb *cb)
170 {
171 struct pthread *self = __pthread_self();
172 cb->__next = self->cancelbuf;
173 self->cancelbuf = cb;
174 }
175
__do_cleanup_pop(struct __ptcb * cb)176 void __do_cleanup_pop(struct __ptcb *cb)
177 {
178 __pthread_self()->cancelbuf = cb->__next;
179 }
180
181 struct start_args {
182 void *(*start_func)(void *);
183 void *start_arg;
184 volatile int control;
185 unsigned long sig_mask[_NSIG/8/sizeof(long)];
186 };
187
start(void * p)188 static int start(void *p)
189 {
190 struct start_args *args = p;
191 int state = args->control;
192 if (state) {
193 if (a_cas(&args->control, 1, 2)==1)
194 __wait(&args->control, 0, 2, 1);
195 if (args->control) {
196 __syscall(SYS_set_tid_address, &args->control);
197 for (;;) __syscall(SYS_exit, 0);
198 }
199 }
200 __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
201 __pthread_exit(args->start_func(args->start_arg));
202 return 0;
203 }
204
start_c11(void * p)205 static int start_c11(void *p)
206 {
207 struct start_args *args = p;
208 int (*start)(void*) = (int(*)(void*)) args->start_func;
209 __pthread_exit((void *)(uintptr_t)start(args->start_arg));
210 return 0;
211 }
212
213 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
214
215 /* pthread_key_create.c overrides this */
216 static volatile size_t dummy = 0;
217 weak_alias(dummy, __pthread_tsd_size);
218 static void *dummy_tsd[1] = { 0 };
219 weak_alias(dummy_tsd, __pthread_tsd_main);
220
221 static FILE *volatile dummy_file = 0;
222 weak_alias(dummy_file, __stdin_used);
223 weak_alias(dummy_file, __stdout_used);
224 weak_alias(dummy_file, __stderr_used);
225
init_file_lock(FILE * f)226 static void init_file_lock(FILE *f)
227 {
228 if (f && f->lock<0) f->lock = 0;
229 }
230
__pthread_create(pthread_t * restrict res,const pthread_attr_t * restrict attrp,void * (* entry)(void *),void * restrict arg)231 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
232 {
233 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
234 size_t size, guard;
235 struct pthread *self, *new;
236 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
237 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
238 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
239 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
240 pthread_attr_t attr = { 0 };
241 sigset_t set;
242
243 if (!libc.can_do_threads) return ENOSYS;
244 self = __pthread_self();
245 if (!libc.threaded) {
246 for (FILE *f=*__ofl_lock(); f; f=f->next)
247 init_file_lock(f);
248 __ofl_unlock();
249 init_file_lock(__stdin_used);
250 init_file_lock(__stdout_used);
251 init_file_lock(__stderr_used);
252 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
253 self->tsd = (void **)__pthread_tsd_main;
254 __membarrier_init();
255 libc.threaded = 1;
256 }
257 if (attrp && !c11) attr = *attrp;
258
259 __acquire_ptc();
260 if (!attrp || c11) {
261 attr._a_stacksize = __default_stacksize;
262 attr._a_guardsize = __default_guardsize;
263 }
264
265 if (attr._a_stackaddr) {
266 size_t need = libc.tls_size + __pthread_tsd_size;
267 size = attr._a_stacksize;
268 stack = (void *)(attr._a_stackaddr & -16);
269 stack_limit = (void *)(attr._a_stackaddr - size);
270 /* Use application-provided stack for TLS only when
271 * it does not take more than ~12% or 2k of the
272 * application's stack space. */
273 if (need < size/8 && need < 2048) {
274 tsd = stack - __pthread_tsd_size;
275 stack = tsd - libc.tls_size;
276 memset(stack, 0, need);
277 } else {
278 size = ROUND(need);
279 }
280 guard = 0;
281 } else {
282 guard = ROUND(attr._a_guardsize);
283 size = guard + ROUND(attr._a_stacksize
284 + libc.tls_size + __pthread_tsd_size);
285 }
286
287 if (!tsd) {
288 if (guard) {
289 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
290 if (map == MAP_FAILED) goto fail;
291 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
292 && errno != ENOSYS) {
293 __munmap(map, size);
294 goto fail;
295 }
296 } else {
297 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
298 if (map == MAP_FAILED) goto fail;
299 }
300 tsd = map + size - __pthread_tsd_size;
301 if (!stack) {
302 stack = tsd - libc.tls_size;
303 stack_limit = map + guard;
304 }
305 }
306
307 new = __copy_tls(tsd - libc.tls_size);
308 new->map_base = map;
309 new->map_size = size;
310 new->stack = stack;
311 new->stack_size = stack - stack_limit;
312 new->guard_size = guard;
313 new->self = new;
314 new->tsd = (void *)tsd;
315 new->locale = &libc.global_locale;
316 if (attr._a_detach) {
317 new->detach_state = DT_DETACHED;
318 } else {
319 new->detach_state = DT_JOINABLE;
320 }
321 new->robust_list.head = &new->robust_list.head;
322 new->CANARY = self->CANARY;
323 new->sysinfo = self->sysinfo;
324
325 /* Setup argument structure for the new thread on its stack.
326 * It's safe to access from the caller only until the thread
327 * list is unlocked. */
328 stack -= (uintptr_t)stack % sizeof(uintptr_t);
329 stack -= sizeof(struct start_args);
330 struct start_args *args = (void *)stack;
331 args->start_func = entry;
332 args->start_arg = arg;
333 args->control = attr._a_sched ? 1 : 0;
334
335 /* Application signals (but not the synccall signal) must be
336 * blocked before the thread list lock can be taken, to ensure
337 * that the lock is AS-safe. */
338 __block_app_sigs(&set);
339
340 /* Ensure SIGCANCEL is unblocked in new thread. This requires
341 * working with a copy of the set so we can restore the
342 * original mask in the calling thread. */
343 memcpy(&args->sig_mask, &set, sizeof args->sig_mask);
344 args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &=
345 ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
346
347 __tl_lock();
348 libc.threads_minus_1++;
349 ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
350
351 /* All clone failures translate to EAGAIN. If explicit scheduling
352 * was requested, attempt it before unlocking the thread list so
353 * that the failed thread is never exposed and so that we can
354 * clean up all transient resource usage before returning. */
355 if (ret < 0) {
356 ret = -EAGAIN;
357 } else if (attr._a_sched) {
358 ret = __syscall(SYS_sched_setscheduler,
359 new->tid, attr._a_policy, &attr._a_prio);
360 if (a_swap(&args->control, ret ? 3 : 0)==2)
361 __wake(&args->control, 1, 1);
362 if (ret)
363 __wait(&args->control, 0, 3, 0);
364 }
365
366 if (ret >= 0) {
367 new->next = self->next;
368 new->prev = self;
369 new->next->prev = new;
370 new->prev->next = new;
371 } else {
372 libc.threads_minus_1--;
373 }
374 __tl_unlock();
375 __restore_sigs(&set);
376 __release_ptc();
377
378 if (ret < 0) {
379 if (map) __munmap(map, size);
380 return -ret;
381 }
382
383 *res = new;
384 return 0;
385 fail:
386 __release_ptc();
387 return EAGAIN;
388 }
389
390 weak_alias(__pthread_exit, pthread_exit);
391 weak_alias(__pthread_create, pthread_create);
392
__pthread_list_find(pthread_t thread_id,const char * info)393 struct pthread* __pthread_list_find(pthread_t thread_id, const char* info)
394 {
395 struct pthread *thread = (struct pthread *)thread_id;
396 if (NULL == thread) {
397 log_print("invalid pthread_t (0) passed to %s\n", info);
398 return NULL;
399 }
400
401 struct pthread *self = __pthread_self();
402 if (thread == self) {
403 return thread;
404 }
405 struct pthread *t = self;
406 t = t->next ;
407 while (t != self) {
408 if (t == thread) return thread;
409 t = t->next ;
410 }
411 log_print("invalid pthread_t %p passed to %s\n", thread, info);
412 return NULL;
413 }
414
__pthread_gettid_np(pthread_t t)415 pid_t __pthread_gettid_np(pthread_t t)
416 {
417 __tl_lock();
418 struct pthread* thread = __pthread_list_find(t, "pthread_gettid_np");
419 __tl_unlock();
420 return thread ? thread->tid : -1;
421 }
422 weak_alias(__pthread_gettid_np, pthread_gettid_np);