• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define _GNU_SOURCE
2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
4 #include "libc.h"
5 #include "lock.h"
6 #include <sys/mman.h>
7 #include <string.h>
8 #include <stddef.h>
9 
dummy_0()10 static void dummy_0()
11 {
12 }
13 weak_alias(dummy_0, __acquire_ptc);
14 weak_alias(dummy_0, __release_ptc);
15 weak_alias(dummy_0, __pthread_tsd_run_dtors);
16 weak_alias(dummy_0, __do_orphaned_stdio_locks);
17 weak_alias(dummy_0, __dl_thread_cleanup);
18 weak_alias(dummy_0, __membarrier_init);
19 
20 static int tl_lock_count;
21 static int tl_lock_waiters;
22 
__tl_lock(void)23 void __tl_lock(void)
24 {
25 	int tid = __pthread_self()->tid;
26 	int val = __thread_list_lock;
27 	if (val == tid) {
28 		tl_lock_count++;
29 		return;
30 	}
31 	while ((val = a_cas(&__thread_list_lock, 0, tid)))
32 		__wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
33 }
34 
__tl_unlock(void)35 void __tl_unlock(void)
36 {
37 	if (tl_lock_count) {
38 		tl_lock_count--;
39 		return;
40 	}
41 	a_store(&__thread_list_lock, 0);
42 	if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
43 }
44 
__tl_sync(pthread_t td)45 void __tl_sync(pthread_t td)
46 {
47 	a_barrier();
48 	int val = __thread_list_lock;
49 	if (!val) return;
50 	__wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
51 	if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
52 }
53 
__pthread_exit(void * result)54 _Noreturn void __pthread_exit(void *result)
55 {
56 	pthread_t self = __pthread_self();
57 	sigset_t set;
58 
59 	self->canceldisable = 1;
60 	self->cancelasync = 0;
61 	self->result = result;
62 
63 	while (self->cancelbuf) {
64 		void (*f)(void *) = self->cancelbuf->__f;
65 		void *x = self->cancelbuf->__x;
66 		self->cancelbuf = self->cancelbuf->__next;
67 		f(x);
68 	}
69 
70 	__pthread_tsd_run_dtors();
71 
72 	__block_app_sigs(&set);
73 
74 	/* This atomic potentially competes with a concurrent pthread_detach
75 	 * call; the loser is responsible for freeing thread resources. */
76 	int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
77 
78 	if (state==DT_DETACHED && self->map_base) {
79 		/* Since __unmapself bypasses the normal munmap code path,
80 		 * explicitly wait for vmlock holders first. This must be
81 		 * done before any locks are taken, to avoid lock ordering
82 		 * issues that could lead to deadlock. */
83 		__vm_wait();
84 	}
85 
86 	/* Access to target the exiting thread with syscalls that use
87 	 * its kernel tid is controlled by killlock. For detached threads,
88 	 * any use past this point would have undefined behavior, but for
89 	 * joinable threads it's a valid usage that must be handled.
90 	 * Signals must be blocked since pthread_kill must be AS-safe. */
91 	LOCK(self->killlock);
92 
93 	/* The thread list lock must be AS-safe, and thus depends on
94 	 * application signals being blocked above. */
95 	__tl_lock();
96 
97 	/* If this is the only thread in the list, don't proceed with
98 	 * termination of the thread, but restore the previous lock and
99 	 * signal state to prepare for exit to call atexit handlers. */
100 	if (self->next == self) {
101 		__tl_unlock();
102 		UNLOCK(self->killlock);
103 		self->detach_state = state;
104 		__restore_sigs(&set);
105 		exit(0);
106 	}
107 
108 	/* At this point we are committed to thread termination. */
109 
110 	/* Process robust list in userspace to handle non-pshared mutexes
111 	 * and the detached thread case where the robust list head will
112 	 * be invalid when the kernel would process it. */
113 	__vm_lock();
114 	volatile void *volatile *rp;
115 	while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
116 		pthread_mutex_t *m = (void *)((char *)rp
117 			- offsetof(pthread_mutex_t, _m_next));
118 		int waiters = m->_m_waiters;
119 		int priv = (m->_m_type & 128) ^ 128;
120 		self->robust_list.pending = rp;
121 		self->robust_list.head = *rp;
122 		int cont = a_swap(&m->_m_lock, 0x40000000);
123 		self->robust_list.pending = 0;
124 		if (cont < 0 || waiters)
125 			__wake(&m->_m_lock, 1, priv);
126 	}
127 	__vm_unlock();
128 
129 	__do_orphaned_stdio_locks();
130 	__dl_thread_cleanup();
131 
132 	/* Last, unlink thread from the list. This change will not be visible
133 	 * until the lock is released, which only happens after SYS_exit
134 	 * has been called, via the exit futex address pointing at the lock.
135 	 * This needs to happen after any possible calls to LOCK() that might
136 	 * skip locking if process appears single-threaded. */
137 	if (!--libc.threads_minus_1) libc.need_locks = -1;
138 	self->next->prev = self->prev;
139 	self->prev->next = self->next;
140 	self->prev = self->next = self;
141 
142 	if (state==DT_DETACHED && self->map_base) {
143 		/* Detached threads must block even implementation-internal
144 		 * signals, since they will not have a stack in their last
145 		 * moments of existence. */
146 		__block_all_sigs(&set);
147 
148 		/* Robust list will no longer be valid, and was already
149 		 * processed above, so unregister it with the kernel. */
150 		if (self->robust_list.off)
151 			__syscall(SYS_set_robust_list, 0, 3*sizeof(long));
152 
153 		/* The following call unmaps the thread's stack mapping
154 		 * and then exits without touching the stack. */
155 		__unmapself(self->map_base, self->map_size);
156 	}
157 
158 	/* Wake any joiner. */
159 	a_store(&self->detach_state, DT_EXITED);
160 	__wake(&self->detach_state, 1, 1);
161 
162 	/* After the kernel thread exits, its tid may be reused. Clear it
163 	 * to prevent inadvertent use and inform functions that would use
164 	 * it that it's no longer available. */
165 	self->tid = 0;
166 	UNLOCK(self->killlock);
167 
168 	for (;;) __syscall(SYS_exit, 0);
169 }
170 
__do_cleanup_push(struct __ptcb * cb)171 void __do_cleanup_push(struct __ptcb *cb)
172 {
173 	struct pthread *self = __pthread_self();
174 	cb->__next = self->cancelbuf;
175 	self->cancelbuf = cb;
176 }
177 
__do_cleanup_pop(struct __ptcb * cb)178 void __do_cleanup_pop(struct __ptcb *cb)
179 {
180 	__pthread_self()->cancelbuf = cb->__next;
181 }
182 
183 struct start_args {
184 	void *(*start_func)(void *);
185 	void *start_arg;
186 	volatile int control;
187 	unsigned long sig_mask[_NSIG/8/sizeof(long)];
188 };
189 
start(void * p)190 static int start(void *p)
191 {
192 	struct start_args *args = p;
193 	int state = args->control;
194 	if (state) {
195 		if (a_cas(&args->control, 1, 2)==1)
196 			__wait(&args->control, 0, 2, 1);
197 		if (args->control) {
198 			__syscall(SYS_set_tid_address, &args->control);
199 			for (;;) __syscall(SYS_exit, 0);
200 		}
201 	}
202 	__syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
203 	__pthread_exit(args->start_func(args->start_arg));
204 	return 0;
205 }
206 
start_c11(void * p)207 static int start_c11(void *p)
208 {
209 	struct start_args *args = p;
210 	int (*start)(void*) = (int(*)(void*)) args->start_func;
211 	__pthread_exit((void *)(uintptr_t)start(args->start_arg));
212 	return 0;
213 }
214 
215 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
216 
217 /* pthread_key_create.c overrides this */
218 static volatile size_t dummy = 0;
219 weak_alias(dummy, __pthread_tsd_size);
220 static void *dummy_tsd[1] = { 0 };
221 weak_alias(dummy_tsd, __pthread_tsd_main);
222 
223 static FILE *volatile dummy_file = 0;
224 weak_alias(dummy_file, __stdin_used);
225 weak_alias(dummy_file, __stdout_used);
226 weak_alias(dummy_file, __stderr_used);
227 
init_file_lock(FILE * f)228 static void init_file_lock(FILE *f)
229 {
230 	if (f && f->lock<0) f->lock = 0;
231 }
232 
__pthread_create(pthread_t * restrict res,const pthread_attr_t * restrict attrp,void * (* entry)(void *),void * restrict arg)233 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
234 {
235 	int ret, c11 = (attrp == __ATTRP_C11_THREAD);
236 	size_t size, guard;
237 	struct pthread *self, *new;
238 	unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
239 	unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
240 		| CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
241 		| CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
242 	pthread_attr_t attr = { 0 };
243 	sigset_t set;
244 
245 	if (!libc.can_do_threads) return ENOSYS;
246 	self = __pthread_self();
247 	if (!libc.threaded) {
248 		for (FILE *f=*__ofl_lock(); f; f=f->next)
249 			init_file_lock(f);
250 		__ofl_unlock();
251 		init_file_lock(__stdin_used);
252 		init_file_lock(__stdout_used);
253 		init_file_lock(__stderr_used);
254 		__syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
255 		self->tsd = (void **)__pthread_tsd_main;
256 		__membarrier_init();
257 		libc.threaded = 1;
258 	}
259 	if (attrp && !c11) attr = *attrp;
260 
261 	__acquire_ptc();
262 	if (!attrp || c11) {
263 		attr._a_stacksize = __default_stacksize;
264 		attr._a_guardsize = __default_guardsize;
265 	}
266 
267 	if (attr._a_stackaddr) {
268 		size_t need = libc.tls_size + __pthread_tsd_size;
269 		size = attr._a_stacksize;
270 		stack = (void *)(attr._a_stackaddr & -16);
271 		stack_limit = (void *)(attr._a_stackaddr - size);
272 		/* Use application-provided stack for TLS only when
273 		 * it does not take more than ~12% or 2k of the
274 		 * application's stack space. */
275 		if (need < size/8 && need < 2048) {
276 			tsd = stack - __pthread_tsd_size;
277 			stack = tsd - libc.tls_size;
278 			memset(stack, 0, need);
279 		} else {
280 			size = ROUND(need);
281 		}
282 		guard = 0;
283 	} else {
284 		guard = ROUND(attr._a_guardsize);
285 		size = guard + ROUND(attr._a_stacksize
286 			+ libc.tls_size +  __pthread_tsd_size);
287 	}
288 
289 	if (!tsd) {
290 		if (guard) {
291 			map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
292 			if (map == MAP_FAILED) goto fail;
293 			if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
294 			    && errno != ENOSYS) {
295 				__munmap(map, size);
296 				goto fail;
297 			}
298 		} else {
299 			map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
300 			if (map == MAP_FAILED) goto fail;
301 		}
302 		tsd = map + size - __pthread_tsd_size;
303 		if (!stack) {
304 			stack = tsd - libc.tls_size;
305 			stack_limit = map + guard;
306 		}
307 	}
308 
309 	new = __copy_tls(tsd - libc.tls_size);
310 	new->map_base = map;
311 	new->map_size = size;
312 	new->stack = stack;
313 	new->stack_size = stack - stack_limit;
314 	new->guard_size = guard;
315 	new->self = new;
316 	new->tsd = (void *)tsd;
317 	new->locale = &libc.global_locale;
318 	if (attr._a_detach) {
319 		new->detach_state = DT_DETACHED;
320 	} else {
321 		new->detach_state = DT_JOINABLE;
322 	}
323 	new->robust_list.head = &new->robust_list.head;
324 	new->canary = self->canary;
325 	new->sysinfo = self->sysinfo;
326 
327 	/* Setup argument structure for the new thread on its stack.
328 	 * It's safe to access from the caller only until the thread
329 	 * list is unlocked. */
330 	stack -= (uintptr_t)stack % sizeof(uintptr_t);
331 	stack -= sizeof(struct start_args);
332 	struct start_args *args = (void *)stack;
333 	args->start_func = entry;
334 	args->start_arg = arg;
335 	args->control = attr._a_sched ? 1 : 0;
336 
337 	/* Application signals (but not the synccall signal) must be
338 	 * blocked before the thread list lock can be taken, to ensure
339 	 * that the lock is AS-safe. */
340 	__block_app_sigs(&set);
341 
342 	/* Ensure SIGCANCEL is unblocked in new thread. This requires
343 	 * working with a copy of the set so we can restore the
344 	 * original mask in the calling thread. */
345 	memcpy(&args->sig_mask, &set, sizeof args->sig_mask);
346 	args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &=
347 		~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
348 
349 	__tl_lock();
350 	if (!libc.threads_minus_1++) libc.need_locks = 1;
351 	ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
352 
353 	/* All clone failures translate to EAGAIN. If explicit scheduling
354 	 * was requested, attempt it before unlocking the thread list so
355 	 * that the failed thread is never exposed and so that we can
356 	 * clean up all transient resource usage before returning. */
357 	if (ret < 0) {
358 		ret = -EAGAIN;
359 	} else if (attr._a_sched) {
360 		ret = __syscall(SYS_sched_setscheduler,
361 			new->tid, attr._a_policy, &attr._a_prio);
362 		if (a_swap(&args->control, ret ? 3 : 0)==2)
363 			__wake(&args->control, 1, 1);
364 		if (ret)
365 			__wait(&args->control, 0, 3, 0);
366 	}
367 
368 	if (ret >= 0) {
369 		new->next = self->next;
370 		new->prev = self;
371 		new->next->prev = new;
372 		new->prev->next = new;
373 	} else {
374 		if (!--libc.threads_minus_1) libc.need_locks = 0;
375 	}
376 	__tl_unlock();
377 	__restore_sigs(&set);
378 	__release_ptc();
379 
380 	if (ret < 0) {
381 		if (map) __munmap(map, size);
382 		return -ret;
383 	}
384 
385 	*res = new;
386 	return 0;
387 fail:
388 	__release_ptc();
389 	return EAGAIN;
390 }
391 
392 weak_alias(__pthread_exit, pthread_exit);
393 weak_alias(__pthread_create, pthread_create);
394