• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #pragma once
30 
31 /**
32  * @file pthread.h
33  * @brief POSIX threads.
34  */
35 
36 #include <sys/cdefs.h>
37 
38 #include <limits.h>
39 #include <bits/page_size.h>
40 #include <bits/pthread_types.h>
41 #include <sched.h>
42 #include <sys/types.h>
43 #include <time.h>
44 
45 __BEGIN_DECLS
46 
47 enum {
48   PTHREAD_MUTEX_NORMAL = 0,
49   PTHREAD_MUTEX_RECURSIVE = 1,
50   PTHREAD_MUTEX_ERRORCHECK = 2,
51 
52   PTHREAD_MUTEX_ERRORCHECK_NP = PTHREAD_MUTEX_ERRORCHECK,
53   PTHREAD_MUTEX_RECURSIVE_NP  = PTHREAD_MUTEX_RECURSIVE,
54 
55   PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL
56 };
57 
58 #define PTHREAD_MUTEX_INITIALIZER { { ((PTHREAD_MUTEX_NORMAL & 3) << 14) } }
59 #define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP { { ((PTHREAD_MUTEX_RECURSIVE & 3) << 14) } }
60 #define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP { { ((PTHREAD_MUTEX_ERRORCHECK & 3) << 14) } }
61 
62 #define PTHREAD_COND_INITIALIZER  { { 0 } }
63 #define PTHREAD_COND_INITIALIZER_MONOTONIC_NP  { { 1 << 1 } }
64 
65 #define PTHREAD_RWLOCK_INITIALIZER  { { 0 } }
66 
67 enum {
68   PTHREAD_RWLOCK_PREFER_READER_NP = 0,
69   PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP = 1,
70 };
71 
72 #define PTHREAD_ONCE_INIT 0
73 
74 #define PTHREAD_BARRIER_SERIAL_THREAD (-1)
75 
76 #if defined(__LP64__)
77 #define PTHREAD_STACK_MIN 16384
78 #else
79 #define PTHREAD_STACK_MIN 8192
80 #endif
81 
82 #define PTHREAD_CREATE_DETACHED 1
83 #define PTHREAD_CREATE_JOINABLE 0
84 
85 #define PTHREAD_EXPLICIT_SCHED 0
86 #define PTHREAD_INHERIT_SCHED 1
87 
88 #define PTHREAD_PRIO_NONE 0
89 #define PTHREAD_PRIO_INHERIT 1
90 
91 #define PTHREAD_PROCESS_PRIVATE 0
92 #define PTHREAD_PROCESS_SHARED 1
93 
94 #define PTHREAD_SCOPE_SYSTEM 0
95 #define PTHREAD_SCOPE_PROCESS 1
96 
97 int pthread_atfork(void (* _Nullable __prepare)(void), void (* _Nullable __parent)(void), void (* _Nullable __child)(void));
98 
99 int pthread_attr_destroy(pthread_attr_t* _Nonnull __attr);
100 int pthread_attr_getdetachstate(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __state);
101 int pthread_attr_getguardsize(const pthread_attr_t* _Nonnull __attr, size_t* _Nonnull __size);
102 
103 #if __BIONIC_AVAILABILITY_GUARD(28)
104 int pthread_attr_getinheritsched(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __flag) __INTRODUCED_IN(28);
105 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
106 
107 int pthread_attr_getschedparam(const pthread_attr_t* _Nonnull __attr, struct sched_param* _Nonnull __param);
108 int pthread_attr_getschedpolicy(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __policy);
109 int pthread_attr_getscope(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __scope);
110 int pthread_attr_getstack(const pthread_attr_t* _Nonnull __attr, void* _Nullable * _Nonnull __addr, size_t* _Nonnull __size);
111 int pthread_attr_getstacksize(const pthread_attr_t* _Nonnull __attr, size_t* _Nonnull __size);
112 int pthread_attr_init(pthread_attr_t* _Nonnull __attr);
113 int pthread_attr_setdetachstate(pthread_attr_t* _Nonnull __attr, int __state);
114 int pthread_attr_setguardsize(pthread_attr_t* _Nonnull __attr, size_t __size);
115 
116 #if __BIONIC_AVAILABILITY_GUARD(28)
117 int pthread_attr_setinheritsched(pthread_attr_t* _Nonnull __attr, int __flag) __INTRODUCED_IN(28);
118 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
119 
120 int pthread_attr_setschedparam(pthread_attr_t* _Nonnull __attr, const struct sched_param* _Nonnull __param);
121 int pthread_attr_setschedpolicy(pthread_attr_t* _Nonnull __attr, int __policy);
122 int pthread_attr_setscope(pthread_attr_t* _Nonnull __attr, int __scope);
123 int pthread_attr_setstack(pthread_attr_t* _Nonnull __attr, void* _Nonnull __addr, size_t __size);
124 int pthread_attr_setstacksize(pthread_attr_t* _Nonnull __addr, size_t __size);
125 
126 int pthread_condattr_destroy(pthread_condattr_t* _Nonnull __attr);
127 int pthread_condattr_getclock(const pthread_condattr_t* _Nonnull __attr, clockid_t* _Nonnull __clock);
128 int pthread_condattr_getpshared(const pthread_condattr_t* _Nonnull __attr, int* _Nonnull __shared);
129 int pthread_condattr_init(pthread_condattr_t* _Nonnull __attr);
130 int pthread_condattr_setclock(pthread_condattr_t* _Nonnull __attr, clockid_t __clock);
131 int pthread_condattr_setpshared(pthread_condattr_t* _Nonnull __attr, int __shared);
132 
133 int pthread_cond_broadcast(pthread_cond_t* _Nonnull __cond);
134 
135 #if __BIONIC_AVAILABILITY_GUARD(30)
136 int pthread_cond_clockwait(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex, clockid_t __clock,
137                            const struct timespec* _Nullable __timeout) __INTRODUCED_IN(30);
138 #endif /* __BIONIC_AVAILABILITY_GUARD(30) */
139 
140 int pthread_cond_destroy(pthread_cond_t* _Nonnull __cond);
141 int pthread_cond_init(pthread_cond_t* _Nonnull __cond, const pthread_condattr_t* _Nullable __attr);
142 int pthread_cond_signal(pthread_cond_t* _Nonnull __cond);
143 int pthread_cond_timedwait(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex, const struct timespec* _Nullable __timeout);
144 /*
145  * Condition variables use CLOCK_REALTIME by default for their timeouts, however that is
146  * typically inappropriate, since that clock can change dramatically, causing the timeout to
147  * either expire earlier or much later than intended.
148  * Condition variables have an initialization option to use CLOCK_MONOTONIC, and in addition,
149  * Android provides pthread_cond_timedwait_monotonic_np to use CLOCK_MONOTONIC on a condition
150  * variable for this single wait no matter how it was initialized.
151  * Note that pthread_cond_clockwait() allows specifying an arbitrary clock and has superseded this
152  * function.
153  */
154 
155 #if (!defined(__LP64__)) || (defined(__LP64__) && __ANDROID_API__ >= 28)
156 int pthread_cond_timedwait_monotonic_np(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex,
157                                         const struct timespec* _Nullable __timeout) __INTRODUCED_IN_64(28);
158 #endif /* (!defined(__LP64__)) || (defined(__LP64__) && __ANDROID_API__ >= 28) */
159 
160 int pthread_cond_wait(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex);
161 
162 int pthread_create(pthread_t* _Nonnull __pthread_ptr, pthread_attr_t const* _Nullable __attr, void* _Nullable (* _Nonnull __start_routine)(void* _Nullable), void* _Nullable);
163 
164 int pthread_detach(pthread_t __pthread);
165 void pthread_exit(void* _Nullable __return_value) __noreturn;
166 
167 int pthread_equal(pthread_t __lhs, pthread_t __rhs);
168 
169 int pthread_getattr_np(pthread_t __pthread, pthread_attr_t* _Nonnull __attr);
170 
171 int pthread_getcpuclockid(pthread_t __pthread, clockid_t* _Nonnull __clock);
172 
173 pid_t pthread_gettid_np(pthread_t __pthread);
174 
175 int pthread_join(pthread_t __pthread, void* _Nullable * _Nullable __return_value_ptr);
176 
177 /**
178  * [pthread_key_create(3)](https://man7.org/linux/man-pages/man3/pthread_key_create.3p.html)
179  * creates a key for thread-specific data.
180  *
181  * There is a limit of `PTHREAD_KEYS_MAX` keys per process, but most callers
182  * should just use the C or C++ `thread_local` storage specifier anyway. When
183  * targeting new enough OS versions, the compiler will automatically use
184  * ELF TLS; when targeting old OS versions the emutls implementation will
185  * multiplex pthread keys behind the scenes, using one per library rather than
186  * one per thread-local variable. If you are implementing the runtime for a
187  * different language, you should consider similar implementation choices and
188  * avoid a direct one-to-one mapping from thread locals to pthread keys.
189  *
190  * The destructor function is only called for non-null values.
191  *
192  * Returns 0 on success and returns an error number on failure.
193  */
194 int pthread_key_create(pthread_key_t* _Nonnull __key_ptr, void (* _Nullable __key_destructor)(void* _Nullable));
195 
196 /**
197  * [pthread_key_delete(3)](https://man7.org/linux/man-pages/man3/pthread_key_delete.3p.html)
198  * deletes a key for thread-specific data.
199  *
200  * Note that pthread_key_delete() does _not_ run destructor functions:
201  * the caller must take care of any necessary cleanup of thread-specific data themselves.
202  * This function only deletes the key itself.
203  *
204  * Returns 0 on success and returns an error number on failure.
205  */
206 int pthread_key_delete(pthread_key_t __key);
207 
208 /**
209  * [pthread_getspecific(3)](https://man7.org/linux/man-pages/man3/pthread_getspecific.3p.html)
210  * returns the calling thread's thread-specific value for the given key.
211  */
212 void* _Nullable pthread_getspecific(pthread_key_t __key);
213 
214 /**
215  * [pthread_setspecific(3)](https://man7.org/linux/man-pages/man3/pthread_setspecific.3p.html)
216  * sets the calling thread's thread-specific value for the given key.
217  *
218  * Returns 0 on success and returns an error number on failure.
219  */
220 int pthread_setspecific(pthread_key_t __key, const void* _Nullable __value);
221 
222 int pthread_mutexattr_destroy(pthread_mutexattr_t* _Nonnull __attr);
223 int pthread_mutexattr_getpshared(const pthread_mutexattr_t* _Nonnull __attr, int* _Nonnull __shared);
224 int pthread_mutexattr_gettype(const pthread_mutexattr_t* _Nonnull __attr, int* _Nonnull __type);
225 
226 #if __BIONIC_AVAILABILITY_GUARD(28)
227 int pthread_mutexattr_getprotocol(const pthread_mutexattr_t* _Nonnull __attr, int* _Nonnull __protocol) __INTRODUCED_IN(28);
228 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
229 
230 int pthread_mutexattr_init(pthread_mutexattr_t* _Nonnull __attr);
231 int pthread_mutexattr_setpshared(pthread_mutexattr_t* _Nonnull __attr, int __shared);
232 int pthread_mutexattr_settype(pthread_mutexattr_t* _Nonnull __attr, int __type);
233 
234 #if __BIONIC_AVAILABILITY_GUARD(28)
235 int pthread_mutexattr_setprotocol(pthread_mutexattr_t* _Nonnull __attr, int __protocol) __INTRODUCED_IN(28);
236 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
237 
238 
239 
240 #if __BIONIC_AVAILABILITY_GUARD(30)
241 int pthread_mutex_clocklock(pthread_mutex_t* _Nonnull __mutex, clockid_t __clock,
242                             const struct timespec* _Nullable __abstime) __INTRODUCED_IN(30);
243 #endif /* __BIONIC_AVAILABILITY_GUARD(30) */
244 
245 int pthread_mutex_destroy(pthread_mutex_t* _Nonnull __mutex);
246 int pthread_mutex_init(pthread_mutex_t* _Nonnull __mutex, const pthread_mutexattr_t* _Nullable __attr);
247 int pthread_mutex_lock(pthread_mutex_t* _Nonnull __mutex);
248 int pthread_mutex_timedlock(pthread_mutex_t* _Nonnull __mutex, const struct timespec* _Nullable __timeout);
249 
250 /*
251  * POSIX historically only supported using pthread_mutex_timedlock() with CLOCK_REALTIME, however
252  * that is typically inappropriate, since that clock can change dramatically, causing the timeout to
253  * either expire earlier or much later than intended.
254  * This function is added to use a timespec based on CLOCK_MONOTONIC that does not suffer
255  * from this issue.
256  * Note that pthread_mutex_clocklock() allows specifying an arbitrary clock and has superseded this
257  * function.
258  */
259 
260 #if __BIONIC_AVAILABILITY_GUARD(28)
261 int pthread_mutex_timedlock_monotonic_np(pthread_mutex_t* _Nonnull __mutex, const struct timespec* _Nullable __timeout)
262     __INTRODUCED_IN(28);
263 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
264 
265 int pthread_mutex_trylock(pthread_mutex_t* _Nonnull __mutex);
266 int pthread_mutex_unlock(pthread_mutex_t* _Nonnull __mutex);
267 
268 int pthread_once(pthread_once_t* _Nonnull __once, void (* _Nonnull __init_routine)(void));
269 
270 int pthread_rwlockattr_init(pthread_rwlockattr_t* _Nonnull __attr);
271 int pthread_rwlockattr_destroy(pthread_rwlockattr_t* _Nonnull __attr);
272 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* _Nonnull __attr, int* _Nonnull __shared);
273 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* _Nonnull __attr, int __shared);
274 
275 #if __BIONIC_AVAILABILITY_GUARD(23)
276 int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t* _Nonnull __attr, int* _Nonnull __kind)
277   __INTRODUCED_IN(23);
278 int pthread_rwlockattr_setkind_np(pthread_rwlockattr_t* _Nonnull __attr, int __kind) __INTRODUCED_IN(23);
279 #endif /* __BIONIC_AVAILABILITY_GUARD(23) */
280 
281 
282 
283 #if __BIONIC_AVAILABILITY_GUARD(30)
284 int pthread_rwlock_clockrdlock(pthread_rwlock_t* _Nonnull __rwlock, clockid_t __clock,
285                                const struct timespec* _Nullable __timeout) __INTRODUCED_IN(30);
286 int pthread_rwlock_clockwrlock(pthread_rwlock_t* _Nonnull __rwlock, clockid_t __clock,
287                                const struct timespec* _Nullable __timeout) __INTRODUCED_IN(30);
288 #endif /* __BIONIC_AVAILABILITY_GUARD(30) */
289 
290 int pthread_rwlock_destroy(pthread_rwlock_t* _Nonnull __rwlock);
291 int pthread_rwlock_init(pthread_rwlock_t* _Nonnull __rwlock, const pthread_rwlockattr_t* _Nullable __attr);
292 int pthread_rwlock_rdlock(pthread_rwlock_t* _Nonnull __rwlock);
293 int pthread_rwlock_timedrdlock(pthread_rwlock_t* _Nonnull __rwlock, const struct timespec* _Nullable __timeout);
294 /* See the comment on pthread_mutex_timedlock_monotonic_np for usage of this function. */
295 
296 #if __BIONIC_AVAILABILITY_GUARD(28)
297 int pthread_rwlock_timedrdlock_monotonic_np(pthread_rwlock_t* _Nonnull __rwlock,
298                                             const struct timespec* _Nullable __timeout) __INTRODUCED_IN(28);
299 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
300 
301 int pthread_rwlock_timedwrlock(pthread_rwlock_t* _Nonnull __rwlock, const struct timespec* _Nullable __timeout);
302 /* See the comment on pthread_mutex_timedlock_monotonic_np for usage of this function. */
303 
304 #if __BIONIC_AVAILABILITY_GUARD(28)
305 int pthread_rwlock_timedwrlock_monotonic_np(pthread_rwlock_t* _Nonnull __rwlock,
306                                             const struct timespec* _Nullable __timeout) __INTRODUCED_IN(28);
307 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
308 
309 int pthread_rwlock_tryrdlock(pthread_rwlock_t* _Nonnull __rwlock);
310 int pthread_rwlock_trywrlock(pthread_rwlock_t* _Nonnull __rwlock);
311 int pthread_rwlock_unlock(pthread_rwlock_t* _Nonnull __rwlock);
312 int pthread_rwlock_wrlock(pthread_rwlock_t* _Nonnull __rwlock);
313 
314 
315 #if __BIONIC_AVAILABILITY_GUARD(24)
316 int pthread_barrierattr_init(pthread_barrierattr_t* _Nonnull __attr) __INTRODUCED_IN(24);
317 int pthread_barrierattr_destroy(pthread_barrierattr_t* _Nonnull __attr) __INTRODUCED_IN(24);
318 int pthread_barrierattr_getpshared(const pthread_barrierattr_t* _Nonnull __attr, int* _Nonnull __shared) __INTRODUCED_IN(24);
319 int pthread_barrierattr_setpshared(pthread_barrierattr_t* _Nonnull __attr, int __shared) __INTRODUCED_IN(24);
320 
321 int pthread_barrier_init(pthread_barrier_t* _Nonnull __barrier, const pthread_barrierattr_t* _Nullable __attr, unsigned __count) __INTRODUCED_IN(24);
322 int pthread_barrier_destroy(pthread_barrier_t* _Nonnull __barrier) __INTRODUCED_IN(24);
323 int pthread_barrier_wait(pthread_barrier_t* _Nonnull __barrier) __INTRODUCED_IN(24);
324 
325 int pthread_spin_destroy(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
326 int pthread_spin_init(pthread_spinlock_t* _Nonnull __spinlock, int __shared) __INTRODUCED_IN(24);
327 int pthread_spin_lock(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
328 int pthread_spin_trylock(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
329 int pthread_spin_unlock(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
330 #endif /* __BIONIC_AVAILABILITY_GUARD(24) */
331 
332 
333 pthread_t pthread_self(void) __attribute_const__;
334 
335 #if defined(__USE_GNU) && __BIONIC_AVAILABILITY_GUARD(26)
336 /**
337  * [pthread_getname_np(3)](https://man7.org/linux/man-pages/man3/pthread_getname_np.3.html)
338  * gets the name of the given thread.
339  * Names are at most 16 bytes (including '\0').
340  *
341  * Returns 0 on success and returns an error number on failure.
342  *
343  * Available since API level 26.
344  */
345 int pthread_getname_np(pthread_t __pthread, char* _Nonnull __buf, size_t __n) __INTRODUCED_IN(26);
346 #endif
347 
348 /**
349  * [pthread_setname_np(3)](https://man7.org/linux/man-pages/man3/pthread_setname_np.3.html)
350  * sets the name of the given thread.
351  * Names are at most 16 bytes (including '\0').
352  * Truncation must be done by the caller;
353  * calls with longer names will fail with ERANGE.
354  *
355  * Returns 0 on success and returns an error number on failure.
356  *
357  * This should only have been available under _GNU_SOURCE,
358  * but is always available on Android by historical accident.
359  */
360 int pthread_setname_np(pthread_t __pthread, const char* _Nonnull __name);
361 
362 /**
363  * [pthread_getaffinity_np(3)](https://man7.org/linux/man-pages/man3/pthread_getaffinity_np.3.html)
364  * gets the CPU affinity mask for the given thread.
365  *
366  * Returns 0 on success and returns an error number on failure.
367  *
368  * Available since API level 36.
369  * See sched_getaffinity() and pthread_gettid_np() for greater portability.
370  */
371 #if defined(__USE_GNU) && __BIONIC_AVAILABILITY_GUARD(36)
372 int pthread_getaffinity_np(pthread_t __pthread, size_t __cpu_set_size, cpu_set_t* __cpu_set) __INTRODUCED_IN(36);
373 #endif
374 
375 /**
376  * [pthread_setaffinity_np(3)](https://man7.org/linux/man-pages/man3/pthread_setaffinity_np.3.html)
377  * sets the CPU affinity mask for the given thread.
378  *
379  * Returns 0 on success and returns an error number on failure.
380  *
381  * Available since API level 36.
382  * See sched_getaffinity() and pthread_gettid_np() for greater portability.
383  */
384 #if defined(__USE_GNU) && __BIONIC_AVAILABILITY_GUARD(36)
385 int pthread_setaffinity_np(pthread_t __pthread, size_t __cpu_set_size, const cpu_set_t* __cpu_set) __INTRODUCED_IN(36);
386 #endif
387 
388 /**
389  * [pthread_setschedparam(3)](https://man7.org/linux/man-pages/man3/pthread_setschedparam.3.html)
390  * sets the scheduler policy and parameters of the given thread.
391  *
392  * This call is not useful to applications on Android, because they don't
393  * have permission to set their scheduling policy, and the only priority
394  * for their policy is 0 anyway. If you only need to set your scheduling
395  * priority, see setpriority() instead.
396  *
397  * Returns 0 on success and returns an error number on failure.
398  */
399 int pthread_setschedparam(pthread_t __pthread, int __policy, const struct sched_param* _Nonnull __param);
400 
401 /**
402  * [pthread_getschedparam(3)](https://man7.org/linux/man-pages/man3/pthread_getschedparam.3.html)
403  * gets the scheduler policy and parameters of the given thread.
404  *
405  * Returns 0 on success and returns an error number on failure.
406  */
407 int pthread_getschedparam(pthread_t __pthread, int* _Nonnull __policy, struct sched_param* _Nonnull __param);
408 
409 /**
410  * [pthread_setschedprio(3)](https://man7.org/linux/man-pages/man3/pthread_setschedprio.3.html)
411  * sets the scheduler priority of the given thread.
412  *
413  * This call is not useful to applications on Android, because they don't
414  * have permission to set their scheduling policy, and the only priority
415  * for their policy is 0 anyway. If you only need to set your scheduling
416  * priority, see setpriority() instead.
417  *
418  * Returns 0 on success and returns an error number on failure.
419  *
420  * Available since API level 28.
421  */
422 
423 #if __BIONIC_AVAILABILITY_GUARD(28)
424 int pthread_setschedprio(pthread_t __pthread, int __priority) __INTRODUCED_IN(28);
425 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
426 
427 typedef void (* _Nullable __pthread_cleanup_func_t)(void* _Nullable);
428 
429 typedef struct __pthread_cleanup_t {
430   struct __pthread_cleanup_t*   _Nullable __cleanup_prev;
431   __pthread_cleanup_func_t      _Nullable __cleanup_routine;
432   void*                         _Nullable __cleanup_arg;
433 } __pthread_cleanup_t;
434 
435 void __pthread_cleanup_push(__pthread_cleanup_t* _Nonnull c, __pthread_cleanup_func_t _Nullable, void* _Nullable);
436 void __pthread_cleanup_pop(__pthread_cleanup_t* _Nonnull, int);
437 
438 /* Believe or not, the definitions of pthread_cleanup_push and
439  * pthread_cleanup_pop below are correct. Posix states that these
440  * can be implemented as macros that might introduce opening and
441  * closing braces, and that using setjmp/longjmp/return/break/continue
442  * between them results in undefined behavior.
443  */
444 #define  pthread_cleanup_push(routine, arg)                      \
445     do {                                                         \
446         __pthread_cleanup_t  __cleanup;                          \
447         __pthread_cleanup_push( &__cleanup, (routine), (arg) );  \
448 
449 #define  pthread_cleanup_pop(execute)                  \
450         __pthread_cleanup_pop( &__cleanup, (execute)); \
451     } while (0);                                       \
452 
453 __END_DECLS
454