1 #define _GNU_SOURCE
2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
4 #include "libc.h"
5 #include "lock.h"
6 #include <sys/mman.h>
7 #include <string.h>
8 #include <stddef.h>
9
dummy_0()10 static void dummy_0()
11 {
12 }
13 weak_alias(dummy_0, __acquire_ptc);
14 weak_alias(dummy_0, __release_ptc);
15 weak_alias(dummy_0, __pthread_tsd_run_dtors);
16 weak_alias(dummy_0, __do_orphaned_stdio_locks);
17 weak_alias(dummy_0, __dl_thread_cleanup);
18
19 static int tl_lock_count;
20 static int tl_lock_waiters;
21
__tl_lock(void)22 void __tl_lock(void)
23 {
24 int tid = __pthread_self()->tid;
25 int val = __thread_list_lock;
26 if (val == tid) {
27 tl_lock_count++;
28 return;
29 }
30 while ((val = a_cas(&__thread_list_lock, 0, tid)))
31 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
32 }
33
__tl_unlock(void)34 void __tl_unlock(void)
35 {
36 if (tl_lock_count) {
37 tl_lock_count--;
38 return;
39 }
40 a_store(&__thread_list_lock, 0);
41 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
42 }
43
__tl_sync(pthread_t td)44 void __tl_sync(pthread_t td)
45 {
46 a_barrier();
47 int val = __thread_list_lock;
48 if (!val) return;
49 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
50 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
51 }
52
__pthread_exit(void * result)53 _Noreturn void __pthread_exit(void *result)
54 {
55 pthread_t self = __pthread_self();
56 sigset_t set;
57
58 self->canceldisable = 1;
59 self->cancelasync = 0;
60 self->result = result;
61
62 while (self->cancelbuf) {
63 void (*f)(void *) = self->cancelbuf->__f;
64 void *x = self->cancelbuf->__x;
65 self->cancelbuf = self->cancelbuf->__next;
66 f(x);
67 }
68
69 __pthread_tsd_run_dtors();
70
71 __block_app_sigs(&set);
72
73 /* Access to target the exiting thread with syscalls that use
74 * its kernel tid is controlled by killlock. For detached threads,
75 * any use past this point would have undefined behavior, but for
76 * joinable threads it's a valid usage that must be handled.
77 * Signals must be blocked since pthread_kill must be AS-safe. */
78 LOCK(self->killlock);
79
80 /* The thread list lock must be AS-safe, and thus depends on
81 * application signals being blocked above. */
82 __tl_lock();
83
84 /* If this is the only thread in the list, don't proceed with
85 * termination of the thread, but restore the previous lock and
86 * signal state to prepare for exit to call atexit handlers. */
87 if (self->next == self) {
88 __tl_unlock();
89 UNLOCK(self->killlock);
90 __restore_sigs(&set);
91 exit(0);
92 }
93
94 /* At this point we are committed to thread termination. */
95
96 /* Process robust list in userspace to handle non-pshared mutexes
97 * and the detached thread case where the robust list head will
98 * be invalid when the kernel would process it. */
99 __vm_lock();
100 volatile void *volatile *rp;
101 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
102 pthread_mutex_t *m = (void *)((char *)rp
103 - offsetof(pthread_mutex_t, _m_next));
104 int waiters = m->_m_waiters;
105 int priv = (m->_m_type & 128) ^ 128;
106 self->robust_list.pending = rp;
107 self->robust_list.head = *rp;
108 int cont = a_swap(&m->_m_lock, 0x40000000);
109 self->robust_list.pending = 0;
110 if (cont < 0 || waiters)
111 __wake(&m->_m_lock, 1, priv);
112 }
113 __vm_unlock();
114
115 __do_orphaned_stdio_locks();
116 __dl_thread_cleanup();
117
118 /* Last, unlink thread from the list. This change will not be visible
119 * until the lock is released, which only happens after SYS_exit
120 * has been called, via the exit futex address pointing at the lock.
121 * This needs to happen after any possible calls to LOCK() that might
122 * skip locking if process appears single-threaded. */
123 if (!--libc.threads_minus_1) libc.need_locks = -1;
124 self->next->prev = self->prev;
125 self->prev->next = self->next;
126 self->prev = self->next = self;
127
128 /* This atomic potentially competes with a concurrent pthread_detach
129 * call; the loser is responsible for freeing thread resources. */
130 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
131
132 #if 0
133 if (state==DT_DETACHED && self->map_base) {
134 /* Robust list will no longer be valid, and was already
135 * processed above, so unregister it with the kernel. */
136 if (self->robust_list.off)
137 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
138
139 /* The following call unmaps the thread's stack mapping
140 * and then exits without touching the stack. */
141 __unmapself(self->map_base, self->map_size);
142 }
143
144 /* Wake any joiner. */
145 a_store(&self->detach_state, DT_EXITED);
146 __wake(&self->detach_state, 1, 1);
147 #endif
148
149 /* After the kernel thread exits, its tid may be reused. Clear it
150 * to prevent inadvertent use and inform functions that would use
151 * it that it's no longer available. */
152 if (self->detach_state == DT_DETACHED) {
153 /* Detached threads must block even implementation-internal
154 * signals, since they will not have a stack in their last
155 * moments of existence. */
156 __block_all_sigs(&set);
157 self->tid = 0;
158 }
159
160 __tl_unlock();
161 UNLOCK(self->killlock);
162
163 for (;;) __syscall(SYS_exit, 0);
164 }
165
__do_cleanup_push(struct __ptcb * cb)166 void __do_cleanup_push(struct __ptcb *cb)
167 {
168 struct pthread *self = __pthread_self();
169 cb->__next = self->cancelbuf;
170 self->cancelbuf = cb;
171 }
172
__do_cleanup_pop(struct __ptcb * cb)173 void __do_cleanup_pop(struct __ptcb *cb)
174 {
175 __pthread_self()->cancelbuf = cb->__next;
176 }
177
178 struct start_args {
179 void *(*start_func)(void *);
180 void *start_arg;
181 volatile int control;
182 unsigned long sig_mask[_NSIG/8/sizeof(long)];
183 };
184
start(void * p)185 static int start(void *p)
186 {
187 struct start_args *args = (struct start_args *)p;
188 __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
189 __pthread_exit(args->start_func(args->start_arg));
190 return 0;
191 }
192
start_c11(void * p)193 static int start_c11(void *p)
194 {
195 struct start_args *args = (struct start_args *)p;
196 int (*start)(void*) = (int(*)(void*)) args->start_func;
197 __pthread_exit((void *)(uintptr_t)start(args->start_arg));
198 return 0;
199 }
200
201 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
202
203 /* pthread_key_create.c overrides this */
204 static volatile size_t dummy = 0;
205 weak_alias(dummy, __pthread_tsd_size);
206 static void *dummy_tsd[1] = { 0 };
207 weak_alias(dummy_tsd, __pthread_tsd_main);
208
__pthread_init_and_check_attr(const pthread_attr_t * restrict attrp,pthread_attr_t * attr)209 int __pthread_init_and_check_attr(const pthread_attr_t *restrict attrp, pthread_attr_t *attr)
210 {
211 int policy = 0;
212 struct sched_param param = {0};
213 int c11 = (attrp == __ATTRP_C11_THREAD);
214 int ret;
215
216 if (attrp && !c11) memcpy(attr, attrp, sizeof(pthread_attr_t));
217
218 if (!attrp || c11) {
219 pthread_attr_init(attr);
220 }
221
222 if (!attr->_a_sched) {
223 ret = pthread_getschedparam(pthread_self(), &policy, ¶m);
224 if (ret) return ret;
225 attr->_a_policy = policy;
226 attr->_a_prio = param.sched_priority;
227 attr->_a_deadline = param.sched_deadline;
228 attr->_a_period = param.sched_period;
229 }
230
231 return 0;
232 }
233
__pthread_create(pthread_t * restrict res,const pthread_attr_t * restrict attrp,void * (* entry)(void *),void * restrict arg)234 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
235 {
236 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
237 size_t size, guard;
238 struct pthread *self, *new;
239 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
240 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
241 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
242 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
243 pthread_attr_t attr = { 0 };
244 sigset_t set;
245
246 if (!libc.can_do_threads) return ENOSYS;
247 if (!entry) return EINVAL;
248 self = __pthread_self();
249 __acquire_ptc();
250
251 ret = __pthread_init_and_check_attr(attrp, &attr);
252 if (ret) {
253 __release_ptc();
254 return ret;
255 }
256
257 if (attr._a_stackaddr) {
258 size_t need = libc.tls_size + __pthread_tsd_size;
259 size = attr._a_stacksize;
260 stack = (void *)(attr._a_stackaddr & -16);
261 stack_limit = (void *)(attr._a_stackaddr - size);
262 /* Use application-provided stack for TLS only when
263 * it does not take more than ~12% or 2k of the
264 * application's stack space. */
265 if (need < size/8 && need < 2048) {
266 tsd = stack - __pthread_tsd_size;
267 stack = tsd - libc.tls_size;
268 memset(stack, 0, need);
269 } else {
270 size = ROUND(need);
271 }
272 guard = 0;
273 } else {
274 guard = ROUND(attr._a_guardsize);
275 size = guard + ROUND(attr._a_stacksize
276 + libc.tls_size + __pthread_tsd_size);
277 }
278
279 if (!tsd) {
280 if (guard) {
281 map = __mmap(0, size, PROT_READ|PROT_WRITE|PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
282 if (map == MAP_FAILED) goto fail;
283 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
284 && errno != ENOSYS) {
285 __munmap(map, size);
286 goto fail;
287 }
288 } else {
289 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
290 if (map == MAP_FAILED) goto fail;
291 }
292 tsd = map + size - __pthread_tsd_size;
293 if (!stack) {
294 stack = tsd - libc.tls_size;
295 stack_limit = map + guard;
296 }
297 }
298
299 new = __copy_tls(tsd - libc.tls_size);
300 new->map_base = map;
301 new->map_size = size;
302 new->stack = stack;
303 new->stack_size = stack - stack_limit;
304 new->guard_size = guard;
305 new->self = new;
306 new->tsd = (void *)tsd;
307 new->locale = &libc.global_locale;
308 if (attr._a_detach) {
309 new->detach_state = DT_DETACHED;
310 } else {
311 new->detach_state = DT_JOINABLE;
312 }
313 new->robust_list.head = &new->robust_list.head;
314 new->canary = self->canary;
315 new->sysinfo = self->sysinfo;
316
317 /* Setup argument structure for the new thread on its stack.
318 * It's safe to access from the caller only until the thread
319 * list is unlocked. */
320 stack -= (uintptr_t)stack % sizeof(uintptr_t);
321 stack -= sizeof(struct start_args);
322 struct start_args *args = (void *)stack;
323 args->start_func = entry;
324 args->start_arg = arg;
325 args->control = attr._a_sched ? 1 : 0;
326
327 /* Application signals (but not the synccall signal) must be
328 * blocked before the thread list lock can be taken, to ensure
329 * that the lock is AS-safe. */
330 __block_app_sigs(&set);
331
332 /* Ensure SIGCANCEL is unblocked in new thread. This requires
333 * working with a copy of the set so we can restore the
334 * original mask in the calling thread. */
335 memcpy(&args->sig_mask, &set, sizeof args->sig_mask);
336 args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &=
337 ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
338
339 __tl_lock();
340 if (!libc.threads_minus_1++) libc.need_locks = 1;
341 ret = __thread_clone((c11 ? start_c11 : start), flags, new, stack);
342
343 /* All clone failures translate to EAGAIN. If explicit scheduling
344 * was requested, attempt it before unlocking the thread list so
345 * that the failed thread is never exposed and so that we can
346 * clean up all transient resource usage before returning. */
347 if (ret < 0) {
348 ret = -EAGAIN;
349 } else {
350 new->next = self->next;
351 new->prev = self;
352 new->next->prev = new;
353 new->prev->next = new;
354
355 *res = new;
356 __tl_unlock();
357 __restore_sigs(&set);
358 __release_ptc();
359 ret = __syscall(SYS_sched_setscheduler,
360 new->tid, attr._a_policy, &attr._a_prio, MUSL_TYPE_THREAD);
361 }
362
363 if (ret < 0) {
364 if (!--libc.threads_minus_1) libc.need_locks = 0;
365 __tl_unlock();
366 __restore_sigs(&set);
367 __release_ptc();
368 if (map) __munmap(map, size);
369 return -ret;
370 }
371
372 return 0;
373 fail:
374 __release_ptc();
375 return EAGAIN;
376 }
377
378 weak_alias(__pthread_exit, pthread_exit);
379 weak_alias(__pthread_create, pthread_create);
380