1 #define _GNU_SOURCE
2 #include "usr_lib_define.h"
3 #include "pthread_impl.h"
4 #include "libc.h"
5 #include "lock.h"
6 #include "string.h"
7 #include "stdbool.h"
8
__pthread_exit(void * result)9 _LIBC_TEXT_SECTION _Noreturn void __pthread_exit(void *result)
10 {
11 pthread_t self = pthread_self();
12
13 self->canceldisable = 1;
14 self->cancelasync = 0;
15 self->result = result;
16
17 while (self->cancelbuf) {
18 void (*f)(void *) = self->cancelbuf->__f;
19 void *x = self->cancelbuf->__x;
20 self->cancelbuf = self->cancelbuf->__next;
21 f(x);
22 }
23
24 /* This atomic potentially competes with a concurrent pthread_detach
25 * call; the loser is responsible for freeing thread resources. */
26 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
27
28 /* After the kernel thread exits, its tid may be reused. Clear it
29 * to prevent inadvertent use and inform functions that would use
30 * it that it's no longer available. */
31 if (self->detach_state == DT_DETACHED) {
32 /* Detached threads must block even implementation-internal
33 * signals, since they will not have a stack in their last
34 * moments of existence. */
35 self->tid = 0;
36 }
37
38 UNLOCK(self->killlock);
39
40 for (;;) __syscall(SYS_exit, 0);
41 }
42
43 struct start_args {
44 void *(*start_func)(void *);
45 void *start_arg;
46 };
47
start(void * p)48 _LIBC_TEXT_SECTION static int start(void *p)
49 {
50 struct start_args *args = (struct start_args *)p;
51 __pthread_exit(args->start_func(args->start_arg));
52 return 0;
53 }
54
start_c11(void * p)55 _LIBC_TEXT_SECTION static int start_c11(void *p)
56 {
57 struct start_args *args = (struct start_args *)p;
58 int (*start)(void*) = (int(*)(void*)) args->start_func;
59 __pthread_exit((void *)(uintptr_t)start(args->start_arg));
60 return 0;
61 }
62
__pthread_init_and_check_attr(const pthread_attr_t * restrict attrp,pthread_attr_t * attr)63 _LIBC_TEXT_SECTION static int __pthread_init_and_check_attr(const pthread_attr_t *restrict attrp, pthread_attr_t *attr)
64 {
65 int policy = 0;
66 struct sched_param param = { 0 };
67 int c11 = (attrp == __ATTRP_C11_THREAD);
68 int ret;
69
70 if (attrp && !c11) memcpy(attr, attrp, sizeof(pthread_attr_t));
71
72 if (!attrp || c11) {
73 pthread_attr_init(attr);
74 }
75
76 if (!attr->_a_sched) {
77 ret = pthread_getschedparam(pthread_self(), &policy, ¶m);
78 if (ret) return ret;
79 attr->_a_policy = policy;
80 attr->_a_prio = param.sched_priority;
81 }
82
83 if (attr->_a_policy != SCHED_RR && attr->_a_policy != SCHED_FIFO) {
84 return EINVAL;
85 }
86
87 if (attr->_a_prio < 0 || attr->_a_prio > PTHREAD_PRIORITY_LOWEST) {
88 return EINVAL;
89 }
90
91 return 0;
92 }
93
__thread_clone(int (* func)(void *),int flags,struct pthread * thread,unsigned char * sp)94 _LIBC_TEXT_SECTION static int __thread_clone(int (*func)(void *), int flags, struct pthread *thread, unsigned char *sp)
95 {
96 int ret;
97 bool join_flag = false;
98 unsigned long user_area, user_sp;
99
100 if (thread->detach_state == DT_JOINABLE) {
101 join_flag = true;
102 }
103
104 user_area = (unsigned long)TP_ADJ(thread);
105 user_sp = (unsigned long)sp;
106 ret = __syscall(SYS_create_user_thread, func, user_area, user_sp, join_flag);
107 if (ret < 0) {
108 return ret;
109 }
110
111 thread->tid = (unsigned long)ret;
112 return 0;
113 }
114
__pthread_create(pthread_t * restrict res,const pthread_attr_t * restrict attrp,void * (* entry)(void *),void * restrict arg)115 _LIBC_TEXT_SECTION int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
116 {
117 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
118 size_t size;
119 struct pthread *self, *new;
120 unsigned char *stack = 0, *stack_limit, *map_base = 0;
121 pthread_attr_t attr = { 0 };
122
123 if (!entry) return EINVAL;
124 self = pthread_self();
125
126 ret = __pthread_init_and_check_attr(attrp, &attr);
127 if (ret) {
128 return ret;
129 }
130
131 size = attr._a_stacksize;
132 if (attr._a_stackaddr) {
133 stack = (void *)(attr._a_stackaddr & -16);
134 stack_limit = (void *)(attr._a_stackaddr - size);
135 } else {
136 map_base = malloc(size);
137 if (map_base == NULL) goto fail;
138 stack_limit = map_base;
139 stack = map_base + size;
140 }
141 new = (struct pthread *)(((uintptr_t)stack - sizeof(struct pthread)) & -16);
142 new->map_base = stack_limit;
143 new->map_size = size;
144 new->stack = new;
145 new->stack_size = (uintptr_t)new->stack - (uintptr_t)stack_limit;
146 new->guard_size = 0;
147 new->self = new;
148 new->tsd = (void *)NULL;
149 new->locale = NULL;
150 if (attr._a_detach) {
151 new->detach_state = DT_DETACHED;
152 } else {
153 new->detach_state = DT_JOINABLE;
154 }
155 new->robust_list.head = &new->robust_list.head;
156 new->CANARY = self->CANARY;
157 new->sysinfo = self->sysinfo;
158
159 /* Setup argument structure for the new thread on its stack.
160 * It's safe to access from the caller only until the thread
161 * list is unlocked. */
162 stack -= (uintptr_t)new->stack % sizeof(uintptr_t);
163 stack -= sizeof(struct start_args);
164 struct start_args *args = (void *)stack;
165 args->start_func = entry;
166 args->start_arg = arg;
167
168 ret = __thread_clone((c11 ? start_c11 : start), 0, new, stack);
169
170 /* All clone failures translate to EAGAIN. If explicit scheduling
171 * was requested, attempt it before unlocking the thread list so
172 * that the failed thread is never exposed and so that we can
173 * clean up all transient resource usage before returning. */
174 if (ret < 0) {
175 ret = -EAGAIN;
176 } else {
177 *res = new;
178 ret = __syscall(SYS_sched_setscheduler,
179 new->tid, attr._a_policy, attr._a_prio);
180 }
181
182 if (ret < 0) {
183 if (map_base) free(map_base);
184 return -ret;
185 }
186
187 return 0;
188 fail:
189 return EAGAIN;
190 }
191
192 weak_alias(__pthread_exit, pthread_exit);
193 weak_alias(__pthread_create, pthread_create);
194