• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <pthread.h>
30 
31 #include <inttypes.h>
32 #include <stdio.h>
33 #include <string.h>
34 #include <sys/resource.h>
35 #include <unistd.h>
36 
37 #include <async_safe/log.h>
38 
39 #include "private/bionic_defs.h"
40 #include "private/ErrnoRestorer.h"
41 #include "pthread_internal.h"
42 
43 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_init(pthread_attr_t * attr)44 int pthread_attr_init(pthread_attr_t* attr) {
45   attr->flags = 0;
46   attr->stack_base = nullptr;
47   attr->stack_size = PTHREAD_STACK_SIZE_DEFAULT;
48   attr->guard_size = PTHREAD_GUARD_SIZE;
49   attr->sched_policy = SCHED_NORMAL;
50   attr->sched_priority = 0;
51   return 0;
52 }
53 
54 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_destroy(pthread_attr_t * attr)55 int pthread_attr_destroy(pthread_attr_t* attr) {
56   memset(attr, 0x42, sizeof(pthread_attr_t));
57   return 0;
58 }
59 
60 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setinheritsched(pthread_attr_t * attr,int flag)61 int pthread_attr_setinheritsched(pthread_attr_t* attr, int flag) {
62   if (flag == PTHREAD_EXPLICIT_SCHED) {
63     attr->flags &= ~PTHREAD_ATTR_FLAG_INHERIT;
64     attr->flags |= PTHREAD_ATTR_FLAG_EXPLICIT;
65   } else if (flag == PTHREAD_INHERIT_SCHED) {
66     attr->flags |= PTHREAD_ATTR_FLAG_INHERIT;
67     attr->flags &= ~PTHREAD_ATTR_FLAG_EXPLICIT;
68   } else {
69     return EINVAL;
70   }
71   return 0;
72 }
73 
74 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getinheritsched(const pthread_attr_t * attr,int * flag)75 int pthread_attr_getinheritsched(const pthread_attr_t* attr, int* flag) {
76   if ((attr->flags & PTHREAD_ATTR_FLAG_INHERIT) != 0) {
77     *flag = PTHREAD_INHERIT_SCHED;
78   } else if ((attr->flags & PTHREAD_ATTR_FLAG_EXPLICIT) != 0) {
79     *flag = PTHREAD_EXPLICIT_SCHED;
80   } else {
81     // Historical behavior before P, when pthread_attr_setinheritsched was added.
82     *flag = (attr->sched_policy != SCHED_NORMAL) ? PTHREAD_EXPLICIT_SCHED : PTHREAD_INHERIT_SCHED;
83   }
84   return 0;
85 }
86 
87 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setdetachstate(pthread_attr_t * attr,int state)88 int pthread_attr_setdetachstate(pthread_attr_t* attr, int state) {
89   if (state == PTHREAD_CREATE_DETACHED) {
90     attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
91   } else if (state == PTHREAD_CREATE_JOINABLE) {
92     attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
93   } else {
94     return EINVAL;
95   }
96   return 0;
97 }
98 
99 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getdetachstate(const pthread_attr_t * attr,int * state)100 int pthread_attr_getdetachstate(const pthread_attr_t* attr, int* state) {
101   *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED) ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE;
102   return 0;
103 }
104 
105 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setschedpolicy(pthread_attr_t * attr,int policy)106 int pthread_attr_setschedpolicy(pthread_attr_t* attr, int policy) {
107   attr->sched_policy = policy;
108   return 0;
109 }
110 
111 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getschedpolicy(const pthread_attr_t * attr,int * policy)112 int pthread_attr_getschedpolicy(const pthread_attr_t* attr, int* policy) {
113   *policy = attr->sched_policy;
114   return 0;
115 }
116 
117 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setschedparam(pthread_attr_t * attr,const sched_param * param)118 int pthread_attr_setschedparam(pthread_attr_t* attr, const sched_param* param) {
119   attr->sched_priority = param->sched_priority;
120   return 0;
121 }
122 
123 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getschedparam(const pthread_attr_t * attr,sched_param * param)124 int pthread_attr_getschedparam(const pthread_attr_t* attr, sched_param* param) {
125   param->sched_priority = attr->sched_priority;
126   return 0;
127 }
128 
129 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setstacksize(pthread_attr_t * attr,size_t stack_size)130 int pthread_attr_setstacksize(pthread_attr_t* attr, size_t stack_size) {
131   if (stack_size < PTHREAD_STACK_MIN) {
132     return EINVAL;
133   }
134   attr->stack_size = stack_size;
135   return 0;
136 }
137 
138 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getstacksize(const pthread_attr_t * attr,size_t * stack_size)139 int pthread_attr_getstacksize(const pthread_attr_t* attr, size_t* stack_size) {
140   void* unused;
141   return pthread_attr_getstack(attr, &unused, stack_size);
142 }
143 
144 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setstack(pthread_attr_t * attr,void * stack_base,size_t stack_size)145 int pthread_attr_setstack(pthread_attr_t* attr, void* stack_base, size_t stack_size) {
146   if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
147     return EINVAL;
148   }
149   if (reinterpret_cast<uintptr_t>(stack_base) & (PAGE_SIZE - 1)) {
150     return EINVAL;
151   }
152   attr->stack_base = stack_base;
153   attr->stack_size = stack_size;
154   return 0;
155 }
156 
__get_main_stack_startstack()157 static uintptr_t __get_main_stack_startstack() {
158   FILE* fp = fopen("/proc/self/stat", "re");
159   if (fp == nullptr) {
160     async_safe_fatal("couldn't open /proc/self/stat: %s", strerror(errno));
161   }
162 
163   char line[BUFSIZ];
164   if (fgets(line, sizeof(line), fp) == nullptr) {
165     async_safe_fatal("couldn't read /proc/self/stat: %s", strerror(errno));
166   }
167 
168   fclose(fp);
169 
170   // See man 5 proc. There's no reason comm can't contain ' ' or ')',
171   // so we search backwards for the end of it. We're looking for this field:
172   //
173   //  startstack %lu (28) The address of the start (i.e., bottom) of the stack.
174   uintptr_t startstack = 0;
175   const char* end_of_comm = strrchr(line, ')');
176   if (sscanf(end_of_comm + 1, " %*c "
177              "%*d %*d %*d %*d %*d "
178              "%*u %*u %*u %*u %*u %*u %*u "
179              "%*d %*d %*d %*d %*d %*d "
180              "%*u %*u %*d %*u %*u %*u %" SCNuPTR, &startstack) != 1) {
181     async_safe_fatal("couldn't parse /proc/self/stat");
182   }
183 
184   return startstack;
185 }
186 
__pthread_attr_getstack_main_thread(void ** stack_base,size_t * stack_size)187 static int __pthread_attr_getstack_main_thread(void** stack_base, size_t* stack_size) {
188   ErrnoRestorer errno_restorer;
189 
190   rlimit stack_limit;
191   if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
192     return errno;
193   }
194 
195   // If the current RLIMIT_STACK is RLIM_INFINITY, only admit to an 8MiB stack
196   // in case callers such as ART take infinity too literally.
197   if (stack_limit.rlim_cur == RLIM_INFINITY) {
198     stack_limit.rlim_cur = 8 * 1024 * 1024;
199   }
200 
201   // Ask the kernel where our main thread's stack started.
202   uintptr_t startstack = __get_main_stack_startstack();
203 
204   // Hunt for the region that contains that address.
205   FILE* fp = fopen("/proc/self/maps", "re");
206   if (fp == nullptr) {
207     async_safe_fatal("couldn't open /proc/self/maps: %s", strerror(errno));
208   }
209   char line[BUFSIZ];
210   while (fgets(line, sizeof(line), fp) != nullptr) {
211     uintptr_t lo, hi;
212     if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR, &lo, &hi) == 2) {
213       if (lo <= startstack && startstack <= hi) {
214         *stack_size = stack_limit.rlim_cur;
215         *stack_base = reinterpret_cast<void*>(hi - *stack_size);
216         fclose(fp);
217         return 0;
218       }
219     }
220   }
221   async_safe_fatal("Stack not found in /proc/self/maps");
222 }
223 
224 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getstack(const pthread_attr_t * attr,void ** stack_base,size_t * stack_size)225 int pthread_attr_getstack(const pthread_attr_t* attr, void** stack_base, size_t* stack_size) {
226   *stack_base = attr->stack_base;
227   *stack_size = attr->stack_size;
228   return 0;
229 }
230 
231 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setguardsize(pthread_attr_t * attr,size_t guard_size)232 int pthread_attr_setguardsize(pthread_attr_t* attr, size_t guard_size) {
233   attr->guard_size = guard_size;
234   return 0;
235 }
236 
237 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getguardsize(const pthread_attr_t * attr,size_t * guard_size)238 int pthread_attr_getguardsize(const pthread_attr_t* attr, size_t* guard_size) {
239   *guard_size = attr->guard_size;
240   return 0;
241 }
242 
243 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_getattr_np(pthread_t t,pthread_attr_t * attr)244 int pthread_getattr_np(pthread_t t, pthread_attr_t* attr) {
245   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
246   *attr = thread->attr;
247   // We prefer reading join_state here to setting thread->attr.flags in pthread_detach.
248   // Because data race exists in the latter case.
249   if (atomic_load(&thread->join_state) == THREAD_DETACHED) {
250     attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
251   }
252   // The main thread's stack information is not stored in thread->attr, and we need to
253   // collect that at runtime.
254   if (thread->tid == getpid()) {
255     return __pthread_attr_getstack_main_thread(&attr->stack_base, &attr->stack_size);
256   }
257   return 0;
258 }
259 
260 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_setscope(pthread_attr_t *,int scope)261 int pthread_attr_setscope(pthread_attr_t*, int scope) {
262   if (scope == PTHREAD_SCOPE_SYSTEM) {
263     return 0;
264   }
265   if (scope == PTHREAD_SCOPE_PROCESS) {
266     return ENOTSUP;
267   }
268   return EINVAL;
269 }
270 
271 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_attr_getscope(const pthread_attr_t *,int * scope)272 int pthread_attr_getscope(const pthread_attr_t*, int* scope) {
273   *scope = PTHREAD_SCOPE_SYSTEM;
274   return 0;
275 }
276