1 #include "pycore_interp.h" // _PyInterpreterState.pythread_stacksize
2
3 /* Posix threads interface */
4
5 #include <stdlib.h>
6 #include <string.h>
7 #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
8 #define destructor xxdestructor
9 #endif
10 #include <pthread.h>
11 #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
12 #undef destructor
13 #endif
14 #include <signal.h>
15
16 #if defined(__linux__)
17 # include <sys/syscall.h> /* syscall(SYS_gettid) */
18 #elif defined(__FreeBSD__)
19 # include <pthread_np.h> /* pthread_getthreadid_np() */
20 #elif defined(__OpenBSD__)
21 # include <unistd.h> /* getthrid() */
22 #elif defined(_AIX)
23 # include <sys/thread.h> /* thread_self() */
24 #elif defined(__NetBSD__)
25 # include <lwp.h> /* _lwp_self() */
26 #endif
27
28 /* The POSIX spec requires that use of pthread_attr_setstacksize
29 be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
30 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
31 #ifndef THREAD_STACK_SIZE
32 #define THREAD_STACK_SIZE 0 /* use default stack size */
33 #endif
34
35 /* The default stack size for new threads on BSD is small enough that
36 * we'll get hard crashes instead of 'maximum recursion depth exceeded'
37 * exceptions.
38 *
39 * The default stack size below is the empirically determined minimal stack
40 * sizes where a simple recursive function doesn't cause a hard crash.
41 *
42 * For macOS the value of THREAD_STACK_SIZE is determined in configure.ac
43 * as it also depends on the other configure options like chosen sanitizer
44 * runtimes.
45 */
46 #if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
47 #undef THREAD_STACK_SIZE
48 #define THREAD_STACK_SIZE 0x400000
49 #endif
50 #if defined(_AIX) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
51 #undef THREAD_STACK_SIZE
52 #define THREAD_STACK_SIZE 0x200000
53 #endif
54 /* bpo-38852: test_threading.test_recursion_limit() checks that 1000 recursive
55 Python calls (default recursion limit) doesn't crash, but raise a regular
56 RecursionError exception. In debug mode, Python function calls allocates
57 more memory on the stack, so use a stack of 8 MiB. */
58 #if defined(__ANDROID__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
59 # ifdef Py_DEBUG
60 # undef THREAD_STACK_SIZE
61 # define THREAD_STACK_SIZE 0x800000
62 # endif
63 #endif
64 #if defined(__VXWORKS__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
65 #undef THREAD_STACK_SIZE
66 #define THREAD_STACK_SIZE 0x100000
67 #endif
68 /* for safety, ensure a viable minimum stacksize */
69 #define THREAD_STACK_MIN 0x8000 /* 32 KiB */
70 #else /* !_POSIX_THREAD_ATTR_STACKSIZE */
71 #ifdef THREAD_STACK_SIZE
72 #error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
73 #endif
74 #endif
75
76 /* The POSIX spec says that implementations supporting the sem_*
77 family of functions must indicate this by defining
78 _POSIX_SEMAPHORES. */
79 #ifdef _POSIX_SEMAPHORES
80 /* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so
81 we need to add 0 to make it work there as well. */
82 #if (_POSIX_SEMAPHORES+0) == -1
83 #define HAVE_BROKEN_POSIX_SEMAPHORES
84 #else
85 #include <semaphore.h>
86 #include <errno.h>
87 #endif
88 #endif
89
90
91 /* Whether or not to use semaphores directly rather than emulating them with
92 * mutexes and condition variables:
93 */
94 #if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
95 (defined(HAVE_SEM_TIMEDWAIT) || defined(HAVE_SEM_CLOCKWAIT)))
96 # define USE_SEMAPHORES
97 #else
98 # undef USE_SEMAPHORES
99 #endif
100
101 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
102 // monotonic is supported statically. It doesn't mean it works on runtime.
103 #define CONDATTR_MONOTONIC
104 #endif
105
106
107 /* On platforms that don't use standard POSIX threads pthread_sigmask()
108 * isn't present. DEC threads uses sigprocmask() instead as do most
109 * other UNIX International compliant systems that don't have the full
110 * pthread implementation.
111 */
112 #if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK)
113 # define SET_THREAD_SIGMASK pthread_sigmask
114 #else
115 # define SET_THREAD_SIGMASK sigprocmask
116 #endif
117
118
119 #define MICROSECONDS_TO_TIMESPEC(microseconds, ts) \
120 do { \
121 struct timeval tv; \
122 gettimeofday(&tv, NULL); \
123 tv.tv_usec += microseconds % 1000000; \
124 tv.tv_sec += microseconds / 1000000; \
125 tv.tv_sec += tv.tv_usec / 1000000; \
126 tv.tv_usec %= 1000000; \
127 ts.tv_sec = tv.tv_sec; \
128 ts.tv_nsec = tv.tv_usec * 1000; \
129 } while(0)
130
131 #if defined(CONDATTR_MONOTONIC) || defined(HAVE_SEM_CLOCKWAIT)
132 static void
monotonic_abs_timeout(long long us,struct timespec * abs)133 monotonic_abs_timeout(long long us, struct timespec *abs)
134 {
135 clock_gettime(CLOCK_MONOTONIC, abs);
136 abs->tv_sec += us / 1000000;
137 abs->tv_nsec += (us % 1000000) * 1000;
138 abs->tv_sec += abs->tv_nsec / 1000000000;
139 abs->tv_nsec %= 1000000000;
140 }
141 #endif
142
143
144 /*
145 * pthread_cond support
146 */
147
148 // NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported.
149 static pthread_condattr_t *condattr_monotonic = NULL;
150
151 static void
init_condattr(void)152 init_condattr(void)
153 {
154 #ifdef CONDATTR_MONOTONIC
155 static pthread_condattr_t ca;
156 pthread_condattr_init(&ca);
157 if (pthread_condattr_setclock(&ca, CLOCK_MONOTONIC) == 0) {
158 condattr_monotonic = &ca; // Use monotonic clock
159 }
160 #endif
161 }
162
163 int
_PyThread_cond_init(PyCOND_T * cond)164 _PyThread_cond_init(PyCOND_T *cond)
165 {
166 return pthread_cond_init(cond, condattr_monotonic);
167 }
168
169
170 void
_PyThread_cond_after(long long us,struct timespec * abs)171 _PyThread_cond_after(long long us, struct timespec *abs)
172 {
173 #ifdef CONDATTR_MONOTONIC
174 if (condattr_monotonic) {
175 monotonic_abs_timeout(us, abs);
176 return;
177 }
178 #endif
179
180 struct timespec ts;
181 MICROSECONDS_TO_TIMESPEC(us, ts);
182 *abs = ts;
183 }
184
185
186 /* A pthread mutex isn't sufficient to model the Python lock type
187 * because, according to Draft 5 of the docs (P1003.4a/D5), both of the
188 * following are undefined:
189 * -> a thread tries to lock a mutex it already has locked
190 * -> a thread tries to unlock a mutex locked by a different thread
191 * pthread mutexes are designed for serializing threads over short pieces
192 * of code anyway, so wouldn't be an appropriate implementation of
193 * Python's locks regardless.
194 *
195 * The pthread_lock struct implements a Python lock as a "locked?" bit
196 * and a <condition, mutex> pair. In general, if the bit can be acquired
197 * instantly, it is, else the pair is used to block the thread until the
198 * bit is cleared. 9 May 1994 tim@ksr.com
199 */
200
201 typedef struct {
202 char locked; /* 0=unlocked, 1=locked */
203 /* a <cond, mutex> pair to handle an acquire of a locked lock */
204 pthread_cond_t lock_released;
205 pthread_mutex_t mut;
206 } pthread_lock;
207
208 #define CHECK_STATUS(name) if (status != 0) { perror(name); error = 1; }
209 #define CHECK_STATUS_PTHREAD(name) if (status != 0) { fprintf(stderr, \
210 "%s: %s\n", name, strerror(status)); error = 1; }
211
212 /*
213 * Initialization.
214 */
215 static void
PyThread__init_thread(void)216 PyThread__init_thread(void)
217 {
218 #if defined(_AIX) && defined(__GNUC__)
219 extern void pthread_init(void);
220 pthread_init();
221 #endif
222 init_condattr();
223 }
224
225 /*
226 * Thread support.
227 */
228
229 /* bpo-33015: pythread_callback struct and pythread_wrapper() cast
230 "void func(void *)" to "void* func(void *)": always return NULL.
231
232 PyThread_start_new_thread() uses "void func(void *)" type, whereas
233 pthread_create() requires a void* return value. */
234 typedef struct {
235 void (*func) (void *);
236 void *arg;
237 } pythread_callback;
238
239 static void *
pythread_wrapper(void * arg)240 pythread_wrapper(void *arg)
241 {
242 /* copy func and func_arg and free the temporary structure */
243 pythread_callback *callback = arg;
244 void (*func)(void *) = callback->func;
245 void *func_arg = callback->arg;
246 PyMem_RawFree(arg);
247
248 func(func_arg);
249 return NULL;
250 }
251
252 unsigned long
PyThread_start_new_thread(void (* func)(void *),void * arg)253 PyThread_start_new_thread(void (*func)(void *), void *arg)
254 {
255 pthread_t th;
256 int status;
257 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
258 pthread_attr_t attrs;
259 #endif
260 #if defined(THREAD_STACK_SIZE)
261 size_t tss;
262 #endif
263
264 dprintf(("PyThread_start_new_thread called\n"));
265 if (!initialized)
266 PyThread_init_thread();
267
268 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
269 if (pthread_attr_init(&attrs) != 0)
270 return PYTHREAD_INVALID_THREAD_ID;
271 #endif
272 #if defined(THREAD_STACK_SIZE)
273 PyThreadState *tstate = _PyThreadState_GET();
274 size_t stacksize = tstate ? tstate->interp->pythread_stacksize : 0;
275 tss = (stacksize != 0) ? stacksize : THREAD_STACK_SIZE;
276 if (tss != 0) {
277 if (pthread_attr_setstacksize(&attrs, tss) != 0) {
278 pthread_attr_destroy(&attrs);
279 return PYTHREAD_INVALID_THREAD_ID;
280 }
281 }
282 #endif
283 #if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
284 pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
285 #endif
286
287 pythread_callback *callback = PyMem_RawMalloc(sizeof(pythread_callback));
288
289 if (callback == NULL) {
290 return PYTHREAD_INVALID_THREAD_ID;
291 }
292
293 callback->func = func;
294 callback->arg = arg;
295
296 status = pthread_create(&th,
297 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
298 &attrs,
299 #else
300 (pthread_attr_t*)NULL,
301 #endif
302 pythread_wrapper, callback);
303
304 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
305 pthread_attr_destroy(&attrs);
306 #endif
307
308 if (status != 0) {
309 PyMem_RawFree(callback);
310 return PYTHREAD_INVALID_THREAD_ID;
311 }
312
313 pthread_detach(th);
314
315 #if SIZEOF_PTHREAD_T <= SIZEOF_LONG
316 return (unsigned long) th;
317 #else
318 return (unsigned long) *(unsigned long *) &th;
319 #endif
320 }
321
322 /* XXX This implementation is considered (to quote Tim Peters) "inherently
323 hosed" because:
324 - It does not guarantee the promise that a non-zero integer is returned.
325 - The cast to unsigned long is inherently unsafe.
326 - It is not clear that the 'volatile' (for AIX?) are any longer necessary.
327 */
328 unsigned long
PyThread_get_thread_ident(void)329 PyThread_get_thread_ident(void)
330 {
331 volatile pthread_t threadid;
332 if (!initialized)
333 PyThread_init_thread();
334 threadid = pthread_self();
335 return (unsigned long) threadid;
336 }
337
338 #ifdef PY_HAVE_THREAD_NATIVE_ID
339 unsigned long
PyThread_get_thread_native_id(void)340 PyThread_get_thread_native_id(void)
341 {
342 if (!initialized)
343 PyThread_init_thread();
344 #ifdef __APPLE__
345 uint64_t native_id;
346 (void) pthread_threadid_np(NULL, &native_id);
347 #elif defined(__linux__)
348 pid_t native_id;
349 native_id = syscall(SYS_gettid);
350 #elif defined(__FreeBSD__)
351 int native_id;
352 native_id = pthread_getthreadid_np();
353 #elif defined(__OpenBSD__)
354 pid_t native_id;
355 native_id = getthrid();
356 #elif defined(_AIX)
357 tid_t native_id;
358 native_id = thread_self();
359 #elif defined(__NetBSD__)
360 lwpid_t native_id;
361 native_id = _lwp_self();
362 #endif
363 return (unsigned long) native_id;
364 }
365 #endif
366
367 void _Py_NO_RETURN
PyThread_exit_thread(void)368 PyThread_exit_thread(void)
369 {
370 dprintf(("PyThread_exit_thread called\n"));
371 if (!initialized)
372 exit(0);
373 pthread_exit(0);
374 }
375
376 #ifdef USE_SEMAPHORES
377
378 /*
379 * Lock support.
380 */
381
382 PyThread_type_lock
PyThread_allocate_lock(void)383 PyThread_allocate_lock(void)
384 {
385 sem_t *lock;
386 int status, error = 0;
387
388 dprintf(("PyThread_allocate_lock called\n"));
389 if (!initialized)
390 PyThread_init_thread();
391
392 lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
393
394 if (lock) {
395 status = sem_init(lock,0,1);
396 CHECK_STATUS("sem_init");
397
398 if (error) {
399 PyMem_RawFree((void *)lock);
400 lock = NULL;
401 }
402 }
403
404 dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock));
405 return (PyThread_type_lock)lock;
406 }
407
408 void
PyThread_free_lock(PyThread_type_lock lock)409 PyThread_free_lock(PyThread_type_lock lock)
410 {
411 sem_t *thelock = (sem_t *)lock;
412 int status, error = 0;
413
414 (void) error; /* silence unused-but-set-variable warning */
415 dprintf(("PyThread_free_lock(%p) called\n", lock));
416
417 if (!thelock)
418 return;
419
420 status = sem_destroy(thelock);
421 CHECK_STATUS("sem_destroy");
422
423 PyMem_RawFree((void *)thelock);
424 }
425
426 /*
427 * As of February 2002, Cygwin thread implementations mistakenly report error
428 * codes in the return value of the sem_ calls (like the pthread_ functions).
429 * Correct implementations return -1 and put the code in errno. This supports
430 * either.
431 */
432 static int
fix_status(int status)433 fix_status(int status)
434 {
435 return (status == -1) ? errno : status;
436 }
437
438 PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock lock,PY_TIMEOUT_T microseconds,int intr_flag)439 PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
440 int intr_flag)
441 {
442 PyLockStatus success;
443 sem_t *thelock = (sem_t *)lock;
444 int status, error = 0;
445 struct timespec ts;
446 #ifndef HAVE_SEM_CLOCKWAIT
447 _PyTime_t deadline = 0;
448 #endif
449
450 (void) error; /* silence unused-but-set-variable warning */
451 dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
452 lock, microseconds, intr_flag));
453
454 if (microseconds > PY_TIMEOUT_MAX) {
455 Py_FatalError("Timeout larger than PY_TIMEOUT_MAX");
456 }
457
458 if (microseconds > 0) {
459 #ifdef HAVE_SEM_CLOCKWAIT
460 monotonic_abs_timeout(microseconds, &ts);
461 #else
462 MICROSECONDS_TO_TIMESPEC(microseconds, ts);
463
464 if (!intr_flag) {
465 /* cannot overflow thanks to (microseconds > PY_TIMEOUT_MAX)
466 check done above */
467 _PyTime_t timeout = _PyTime_FromNanoseconds(microseconds * 1000);
468 deadline = _PyTime_GetMonotonicClock() + timeout;
469 }
470 #endif
471 }
472
473 while (1) {
474 if (microseconds > 0) {
475 #ifdef HAVE_SEM_CLOCKWAIT
476 status = fix_status(sem_clockwait(thelock, CLOCK_MONOTONIC,
477 &ts));
478 #else
479 status = fix_status(sem_timedwait(thelock, &ts));
480 #endif
481 }
482 else if (microseconds == 0) {
483 status = fix_status(sem_trywait(thelock));
484 }
485 else {
486 status = fix_status(sem_wait(thelock));
487 }
488
489 /* Retry if interrupted by a signal, unless the caller wants to be
490 notified. */
491 if (intr_flag || status != EINTR) {
492 break;
493 }
494
495 // sem_clockwait() uses an absolute timeout, there is no need
496 // to recompute the relative timeout.
497 #ifndef HAVE_SEM_CLOCKWAIT
498 if (microseconds > 0) {
499 /* wait interrupted by a signal (EINTR): recompute the timeout */
500 _PyTime_t dt = deadline - _PyTime_GetMonotonicClock();
501 if (dt < 0) {
502 status = ETIMEDOUT;
503 break;
504 }
505 else if (dt > 0) {
506 _PyTime_t realtime_deadline = _PyTime_GetSystemClock() + dt;
507 if (_PyTime_AsTimespec(realtime_deadline, &ts) < 0) {
508 /* Cannot occur thanks to (microseconds > PY_TIMEOUT_MAX)
509 check done above */
510 Py_UNREACHABLE();
511 }
512 /* no need to update microseconds value, the code only care
513 if (microseconds > 0 or (microseconds == 0). */
514 }
515 else {
516 microseconds = 0;
517 }
518 }
519 #endif
520 }
521
522 /* Don't check the status if we're stopping because of an interrupt. */
523 if (!(intr_flag && status == EINTR)) {
524 if (microseconds > 0) {
525 if (status != ETIMEDOUT) {
526 #ifdef HAVE_SEM_CLOCKWAIT
527 CHECK_STATUS("sem_clockwait");
528 #else
529 CHECK_STATUS("sem_timedwait");
530 #endif
531 }
532 }
533 else if (microseconds == 0) {
534 if (status != EAGAIN)
535 CHECK_STATUS("sem_trywait");
536 }
537 else {
538 CHECK_STATUS("sem_wait");
539 }
540 }
541
542 if (status == 0) {
543 success = PY_LOCK_ACQUIRED;
544 } else if (intr_flag && status == EINTR) {
545 success = PY_LOCK_INTR;
546 } else {
547 success = PY_LOCK_FAILURE;
548 }
549
550 dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
551 lock, microseconds, intr_flag, success));
552 return success;
553 }
554
555 void
PyThread_release_lock(PyThread_type_lock lock)556 PyThread_release_lock(PyThread_type_lock lock)
557 {
558 sem_t *thelock = (sem_t *)lock;
559 int status, error = 0;
560
561 (void) error; /* silence unused-but-set-variable warning */
562 dprintf(("PyThread_release_lock(%p) called\n", lock));
563
564 status = sem_post(thelock);
565 CHECK_STATUS("sem_post");
566 }
567
568 #else /* USE_SEMAPHORES */
569
570 /*
571 * Lock support.
572 */
573 PyThread_type_lock
PyThread_allocate_lock(void)574 PyThread_allocate_lock(void)
575 {
576 pthread_lock *lock;
577 int status, error = 0;
578
579 dprintf(("PyThread_allocate_lock called\n"));
580 if (!initialized)
581 PyThread_init_thread();
582
583 lock = (pthread_lock *) PyMem_RawCalloc(1, sizeof(pthread_lock));
584 if (lock) {
585 lock->locked = 0;
586
587 status = pthread_mutex_init(&lock->mut, NULL);
588 CHECK_STATUS_PTHREAD("pthread_mutex_init");
589 /* Mark the pthread mutex underlying a Python mutex as
590 pure happens-before. We can't simply mark the
591 Python-level mutex as a mutex because it can be
592 acquired and released in different threads, which
593 will cause errors. */
594 _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
595
596 status = _PyThread_cond_init(&lock->lock_released);
597 CHECK_STATUS_PTHREAD("pthread_cond_init");
598
599 if (error) {
600 PyMem_RawFree((void *)lock);
601 lock = 0;
602 }
603 }
604
605 dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock));
606 return (PyThread_type_lock) lock;
607 }
608
609 void
PyThread_free_lock(PyThread_type_lock lock)610 PyThread_free_lock(PyThread_type_lock lock)
611 {
612 pthread_lock *thelock = (pthread_lock *)lock;
613 int status, error = 0;
614
615 (void) error; /* silence unused-but-set-variable warning */
616 dprintf(("PyThread_free_lock(%p) called\n", lock));
617
618 /* some pthread-like implementations tie the mutex to the cond
619 * and must have the cond destroyed first.
620 */
621 status = pthread_cond_destroy( &thelock->lock_released );
622 CHECK_STATUS_PTHREAD("pthread_cond_destroy");
623
624 status = pthread_mutex_destroy( &thelock->mut );
625 CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
626
627 PyMem_RawFree((void *)thelock);
628 }
629
630 PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock lock,PY_TIMEOUT_T microseconds,int intr_flag)631 PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
632 int intr_flag)
633 {
634 PyLockStatus success = PY_LOCK_FAILURE;
635 pthread_lock *thelock = (pthread_lock *)lock;
636 int status, error = 0;
637
638 dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
639 lock, microseconds, intr_flag));
640
641 if (microseconds == 0) {
642 status = pthread_mutex_trylock( &thelock->mut );
643 if (status != EBUSY)
644 CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
645 }
646 else {
647 status = pthread_mutex_lock( &thelock->mut );
648 CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
649 }
650 if (status == 0) {
651 if (thelock->locked == 0) {
652 success = PY_LOCK_ACQUIRED;
653 }
654 else if (microseconds != 0) {
655 struct timespec abs;
656 if (microseconds > 0) {
657 _PyThread_cond_after(microseconds, &abs);
658 }
659 /* continue trying until we get the lock */
660
661 /* mut must be locked by me -- part of the condition
662 * protocol */
663 while (success == PY_LOCK_FAILURE) {
664 if (microseconds > 0) {
665 status = pthread_cond_timedwait(
666 &thelock->lock_released,
667 &thelock->mut, &abs);
668 if (status == 1) {
669 break;
670 }
671 if (status == ETIMEDOUT)
672 break;
673 CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
674 }
675 else {
676 status = pthread_cond_wait(
677 &thelock->lock_released,
678 &thelock->mut);
679 CHECK_STATUS_PTHREAD("pthread_cond_wait");
680 }
681
682 if (intr_flag && status == 0 && thelock->locked) {
683 /* We were woken up, but didn't get the lock. We probably received
684 * a signal. Return PY_LOCK_INTR to allow the caller to handle
685 * it and retry. */
686 success = PY_LOCK_INTR;
687 break;
688 }
689 else if (status == 0 && !thelock->locked) {
690 success = PY_LOCK_ACQUIRED;
691 }
692 }
693 }
694 if (success == PY_LOCK_ACQUIRED) thelock->locked = 1;
695 status = pthread_mutex_unlock( &thelock->mut );
696 CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
697 }
698
699 if (error) success = PY_LOCK_FAILURE;
700 dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
701 lock, microseconds, intr_flag, success));
702 return success;
703 }
704
705 void
PyThread_release_lock(PyThread_type_lock lock)706 PyThread_release_lock(PyThread_type_lock lock)
707 {
708 pthread_lock *thelock = (pthread_lock *)lock;
709 int status, error = 0;
710
711 (void) error; /* silence unused-but-set-variable warning */
712 dprintf(("PyThread_release_lock(%p) called\n", lock));
713
714 status = pthread_mutex_lock( &thelock->mut );
715 CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
716
717 thelock->locked = 0;
718
719 /* wake up someone (anyone, if any) waiting on the lock */
720 status = pthread_cond_signal( &thelock->lock_released );
721 CHECK_STATUS_PTHREAD("pthread_cond_signal");
722
723 status = pthread_mutex_unlock( &thelock->mut );
724 CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
725 }
726
727 #endif /* USE_SEMAPHORES */
728
729 int
_PyThread_at_fork_reinit(PyThread_type_lock * lock)730 _PyThread_at_fork_reinit(PyThread_type_lock *lock)
731 {
732 PyThread_type_lock new_lock = PyThread_allocate_lock();
733 if (new_lock == NULL) {
734 return -1;
735 }
736
737 /* bpo-6721, bpo-40089: The old lock can be in an inconsistent state.
738 fork() can be called in the middle of an operation on the lock done by
739 another thread. So don't call PyThread_free_lock(*lock).
740
741 Leak memory on purpose. Don't release the memory either since the
742 address of a mutex is relevant. Putting two mutexes at the same address
743 can lead to problems. */
744
745 *lock = new_lock;
746 return 0;
747 }
748
749 int
PyThread_acquire_lock(PyThread_type_lock lock,int waitflag)750 PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
751 {
752 return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
753 }
754
755 /* set the thread stack size.
756 * Return 0 if size is valid, -1 if size is invalid,
757 * -2 if setting stack size is not supported.
758 */
759 static int
_pythread_pthread_set_stacksize(size_t size)760 _pythread_pthread_set_stacksize(size_t size)
761 {
762 #if defined(THREAD_STACK_SIZE)
763 pthread_attr_t attrs;
764 size_t tss_min;
765 int rc = 0;
766 #endif
767
768 /* set to default */
769 if (size == 0) {
770 _PyInterpreterState_GET()->pythread_stacksize = 0;
771 return 0;
772 }
773
774 #if defined(THREAD_STACK_SIZE)
775 #if defined(PTHREAD_STACK_MIN)
776 tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
777 : THREAD_STACK_MIN;
778 #else
779 tss_min = THREAD_STACK_MIN;
780 #endif
781 if (size >= tss_min) {
782 /* validate stack size by setting thread attribute */
783 if (pthread_attr_init(&attrs) == 0) {
784 rc = pthread_attr_setstacksize(&attrs, size);
785 pthread_attr_destroy(&attrs);
786 if (rc == 0) {
787 _PyInterpreterState_GET()->pythread_stacksize = size;
788 return 0;
789 }
790 }
791 }
792 return -1;
793 #else
794 return -2;
795 #endif
796 }
797
798 #define THREAD_SET_STACKSIZE(x) _pythread_pthread_set_stacksize(x)
799
800
801 /* Thread Local Storage (TLS) API
802
803 This API is DEPRECATED since Python 3.7. See PEP 539 for details.
804 */
805
806 /* Issue #25658: On platforms where native TLS key is defined in a way that
807 cannot be safely cast to int, PyThread_create_key returns immediately a
808 failure status and other TLS functions all are no-ops. This indicates
809 clearly that the old API is not supported on platforms where it cannot be
810 used reliably, and that no effort will be made to add such support.
811
812 Note: PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT will be unnecessary after
813 removing this API.
814 */
815
816 int
PyThread_create_key(void)817 PyThread_create_key(void)
818 {
819 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
820 pthread_key_t key;
821 int fail = pthread_key_create(&key, NULL);
822 if (fail)
823 return -1;
824 if (key > INT_MAX) {
825 /* Issue #22206: handle integer overflow */
826 pthread_key_delete(key);
827 errno = ENOMEM;
828 return -1;
829 }
830 return (int)key;
831 #else
832 return -1; /* never return valid key value. */
833 #endif
834 }
835
836 void
PyThread_delete_key(int key)837 PyThread_delete_key(int key)
838 {
839 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
840 pthread_key_delete(key);
841 #endif
842 }
843
844 void
PyThread_delete_key_value(int key)845 PyThread_delete_key_value(int key)
846 {
847 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
848 pthread_setspecific(key, NULL);
849 #endif
850 }
851
852 int
PyThread_set_key_value(int key,void * value)853 PyThread_set_key_value(int key, void *value)
854 {
855 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
856 int fail = pthread_setspecific(key, value);
857 return fail ? -1 : 0;
858 #else
859 return -1;
860 #endif
861 }
862
863 void *
PyThread_get_key_value(int key)864 PyThread_get_key_value(int key)
865 {
866 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
867 return pthread_getspecific(key);
868 #else
869 return NULL;
870 #endif
871 }
872
873
874 void
PyThread_ReInitTLS(void)875 PyThread_ReInitTLS(void)
876 {
877 }
878
879
880 /* Thread Specific Storage (TSS) API
881
882 Platform-specific components of TSS API implementation.
883 */
884
885 int
PyThread_tss_create(Py_tss_t * key)886 PyThread_tss_create(Py_tss_t *key)
887 {
888 assert(key != NULL);
889 /* If the key has been created, function is silently skipped. */
890 if (key->_is_initialized) {
891 return 0;
892 }
893
894 int fail = pthread_key_create(&(key->_key), NULL);
895 if (fail) {
896 return -1;
897 }
898 key->_is_initialized = 1;
899 return 0;
900 }
901
902 void
PyThread_tss_delete(Py_tss_t * key)903 PyThread_tss_delete(Py_tss_t *key)
904 {
905 assert(key != NULL);
906 /* If the key has not been created, function is silently skipped. */
907 if (!key->_is_initialized) {
908 return;
909 }
910
911 pthread_key_delete(key->_key);
912 /* pthread has not provided the defined invalid value for the key. */
913 key->_is_initialized = 0;
914 }
915
916 int
PyThread_tss_set(Py_tss_t * key,void * value)917 PyThread_tss_set(Py_tss_t *key, void *value)
918 {
919 assert(key != NULL);
920 int fail = pthread_setspecific(key->_key, value);
921 return fail ? -1 : 0;
922 }
923
924 void *
PyThread_tss_get(Py_tss_t * key)925 PyThread_tss_get(Py_tss_t *key)
926 {
927 assert(key != NULL);
928 return pthread_getspecific(key->_key);
929 }
930