• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /* Posix threads interface */
3 
4 #include <stdlib.h>
5 #include <string.h>
6 #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
7 #define destructor xxdestructor
8 #endif
9 #include <pthread.h>
10 #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
11 #undef destructor
12 #endif
13 #include <signal.h>
14 
15 #if defined(__linux__)
16 #   include <sys/syscall.h>     /* syscall(SYS_gettid) */
17 #elif defined(__FreeBSD__)
18 #   include <pthread_np.h>      /* pthread_getthreadid_np() */
19 #elif defined(__OpenBSD__)
20 #   include <unistd.h>          /* getthrid() */
21 #elif defined(_AIX)
22 #   include <sys/thread.h>      /* thread_self() */
23 #elif defined(__NetBSD__)
24 #   include <lwp.h>             /* _lwp_self() */
25 #endif
26 
27 /* The POSIX spec requires that use of pthread_attr_setstacksize
28    be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
29 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
30 #ifndef THREAD_STACK_SIZE
31 #define THREAD_STACK_SIZE       0       /* use default stack size */
32 #endif
33 
34 /* The default stack size for new threads on OSX and BSD is small enough that
35  * we'll get hard crashes instead of 'maximum recursion depth exceeded'
36  * exceptions.
37  *
38  * The default stack sizes below are the empirically determined minimal stack
39  * sizes where a simple recursive function doesn't cause a hard crash.
40  */
41 #if defined(__APPLE__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
42 #undef  THREAD_STACK_SIZE
43 /* Note: This matches the value of -Wl,-stack_size in configure.ac */
44 #define THREAD_STACK_SIZE       0x1000000
45 #endif
46 #if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
47 #undef  THREAD_STACK_SIZE
48 #define THREAD_STACK_SIZE       0x400000
49 #endif
50 #if defined(_AIX) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
51 #undef  THREAD_STACK_SIZE
52 #define THREAD_STACK_SIZE       0x200000
53 #endif
54 /* for safety, ensure a viable minimum stacksize */
55 #define THREAD_STACK_MIN        0x8000  /* 32 KiB */
56 #else  /* !_POSIX_THREAD_ATTR_STACKSIZE */
57 #ifdef THREAD_STACK_SIZE
58 #error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
59 #endif
60 #endif
61 
62 /* The POSIX spec says that implementations supporting the sem_*
63    family of functions must indicate this by defining
64    _POSIX_SEMAPHORES. */
65 #ifdef _POSIX_SEMAPHORES
66 /* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so
67    we need to add 0 to make it work there as well. */
68 #if (_POSIX_SEMAPHORES+0) == -1
69 #define HAVE_BROKEN_POSIX_SEMAPHORES
70 #else
71 #include <semaphore.h>
72 #include <errno.h>
73 #endif
74 #endif
75 
76 
77 /* Whether or not to use semaphores directly rather than emulating them with
78  * mutexes and condition variables:
79  */
80 #if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
81      defined(HAVE_SEM_TIMEDWAIT))
82 #  define USE_SEMAPHORES
83 #else
84 #  undef USE_SEMAPHORES
85 #endif
86 
87 
88 /* On platforms that don't use standard POSIX threads pthread_sigmask()
89  * isn't present.  DEC threads uses sigprocmask() instead as do most
90  * other UNIX International compliant systems that don't have the full
91  * pthread implementation.
92  */
93 #if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK)
94 #  define SET_THREAD_SIGMASK pthread_sigmask
95 #else
96 #  define SET_THREAD_SIGMASK sigprocmask
97 #endif
98 
99 
100 /* We assume all modern POSIX systems have gettimeofday() */
101 #ifdef GETTIMEOFDAY_NO_TZ
102 #define GETTIMEOFDAY(ptv) gettimeofday(ptv)
103 #else
104 #define GETTIMEOFDAY(ptv) gettimeofday(ptv, (struct timezone *)NULL)
105 #endif
106 
107 #define MICROSECONDS_TO_TIMESPEC(microseconds, ts) \
108 do { \
109     struct timeval tv; \
110     GETTIMEOFDAY(&tv); \
111     tv.tv_usec += microseconds % 1000000; \
112     tv.tv_sec += microseconds / 1000000; \
113     tv.tv_sec += tv.tv_usec / 1000000; \
114     tv.tv_usec %= 1000000; \
115     ts.tv_sec = tv.tv_sec; \
116     ts.tv_nsec = tv.tv_usec * 1000; \
117 } while(0)
118 
119 
120 /*
121  * pthread_cond support
122  */
123 
124 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
125 // monotonic is supported statically.  It doesn't mean it works on runtime.
126 #define CONDATTR_MONOTONIC
127 #endif
128 
129 // NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported.
130 static pthread_condattr_t *condattr_monotonic = NULL;
131 
132 static void
init_condattr(void)133 init_condattr(void)
134 {
135 #ifdef CONDATTR_MONOTONIC
136     static pthread_condattr_t ca;
137     pthread_condattr_init(&ca);
138     if (pthread_condattr_setclock(&ca, CLOCK_MONOTONIC) == 0) {
139         condattr_monotonic = &ca;  // Use monotonic clock
140     }
141 #endif
142 }
143 
144 int
_PyThread_cond_init(PyCOND_T * cond)145 _PyThread_cond_init(PyCOND_T *cond)
146 {
147     return pthread_cond_init(cond, condattr_monotonic);
148 }
149 
150 void
_PyThread_cond_after(long long us,struct timespec * abs)151 _PyThread_cond_after(long long us, struct timespec *abs)
152 {
153 #ifdef CONDATTR_MONOTONIC
154     if (condattr_monotonic) {
155         clock_gettime(CLOCK_MONOTONIC, abs);
156         abs->tv_sec  += us / 1000000;
157         abs->tv_nsec += (us % 1000000) * 1000;
158         abs->tv_sec  += abs->tv_nsec / 1000000000;
159         abs->tv_nsec %= 1000000000;
160         return;
161     }
162 #endif
163 
164     struct timespec ts;
165     MICROSECONDS_TO_TIMESPEC(us, ts);
166     *abs = ts;
167 }
168 
169 
170 /* A pthread mutex isn't sufficient to model the Python lock type
171  * because, according to Draft 5 of the docs (P1003.4a/D5), both of the
172  * following are undefined:
173  *  -> a thread tries to lock a mutex it already has locked
174  *  -> a thread tries to unlock a mutex locked by a different thread
175  * pthread mutexes are designed for serializing threads over short pieces
176  * of code anyway, so wouldn't be an appropriate implementation of
177  * Python's locks regardless.
178  *
179  * The pthread_lock struct implements a Python lock as a "locked?" bit
180  * and a <condition, mutex> pair.  In general, if the bit can be acquired
181  * instantly, it is, else the pair is used to block the thread until the
182  * bit is cleared.     9 May 1994 tim@ksr.com
183  */
184 
185 typedef struct {
186     char             locked; /* 0=unlocked, 1=locked */
187     /* a <cond, mutex> pair to handle an acquire of a locked lock */
188     pthread_cond_t   lock_released;
189     pthread_mutex_t  mut;
190 } pthread_lock;
191 
192 #define CHECK_STATUS(name)  if (status != 0) { perror(name); error = 1; }
193 #define CHECK_STATUS_PTHREAD(name)  if (status != 0) { fprintf(stderr, \
194     "%s: %s\n", name, strerror(status)); error = 1; }
195 
196 /*
197  * Initialization.
198  */
199 static void
PyThread__init_thread(void)200 PyThread__init_thread(void)
201 {
202 #if defined(_AIX) && defined(__GNUC__)
203     extern void pthread_init(void);
204     pthread_init();
205 #endif
206     init_condattr();
207 }
208 
209 /*
210  * Thread support.
211  */
212 
213 /* bpo-33015: pythread_callback struct and pythread_wrapper() cast
214    "void func(void *)" to "void* func(void *)": always return NULL.
215 
216    PyThread_start_new_thread() uses "void func(void *)" type, whereas
217    pthread_create() requires a void* return value. */
218 typedef struct {
219     void (*func) (void *);
220     void *arg;
221 } pythread_callback;
222 
223 static void *
pythread_wrapper(void * arg)224 pythread_wrapper(void *arg)
225 {
226     /* copy func and func_arg and free the temporary structure */
227     pythread_callback *callback = arg;
228     void (*func)(void *) = callback->func;
229     void *func_arg = callback->arg;
230     PyMem_RawFree(arg);
231 
232     func(func_arg);
233     return NULL;
234 }
235 
236 unsigned long
PyThread_start_new_thread(void (* func)(void *),void * arg)237 PyThread_start_new_thread(void (*func)(void *), void *arg)
238 {
239     pthread_t th;
240     int status;
241 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
242     pthread_attr_t attrs;
243 #endif
244 #if defined(THREAD_STACK_SIZE)
245     size_t      tss;
246 #endif
247 
248     dprintf(("PyThread_start_new_thread called\n"));
249     if (!initialized)
250         PyThread_init_thread();
251 
252 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
253     if (pthread_attr_init(&attrs) != 0)
254         return PYTHREAD_INVALID_THREAD_ID;
255 #endif
256 #if defined(THREAD_STACK_SIZE)
257     PyThreadState *tstate = _PyThreadState_GET();
258     size_t stacksize = tstate ? tstate->interp->pythread_stacksize : 0;
259     tss = (stacksize != 0) ? stacksize : THREAD_STACK_SIZE;
260     if (tss != 0) {
261         if (pthread_attr_setstacksize(&attrs, tss) != 0) {
262             pthread_attr_destroy(&attrs);
263             return PYTHREAD_INVALID_THREAD_ID;
264         }
265     }
266 #endif
267 #if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
268     pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
269 #endif
270 
271     pythread_callback *callback = PyMem_RawMalloc(sizeof(pythread_callback));
272 
273     if (callback == NULL) {
274       return PYTHREAD_INVALID_THREAD_ID;
275     }
276 
277     callback->func = func;
278     callback->arg = arg;
279 
280     status = pthread_create(&th,
281 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
282                              &attrs,
283 #else
284                              (pthread_attr_t*)NULL,
285 #endif
286                              pythread_wrapper, callback);
287 
288 #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
289     pthread_attr_destroy(&attrs);
290 #endif
291 
292     if (status != 0) {
293         PyMem_RawFree(callback);
294         return PYTHREAD_INVALID_THREAD_ID;
295     }
296 
297     pthread_detach(th);
298 
299 #if SIZEOF_PTHREAD_T <= SIZEOF_LONG
300     return (unsigned long) th;
301 #else
302     return (unsigned long) *(unsigned long *) &th;
303 #endif
304 }
305 
306 /* XXX This implementation is considered (to quote Tim Peters) "inherently
307    hosed" because:
308      - It does not guarantee the promise that a non-zero integer is returned.
309      - The cast to unsigned long is inherently unsafe.
310      - It is not clear that the 'volatile' (for AIX?) are any longer necessary.
311 */
312 unsigned long
PyThread_get_thread_ident(void)313 PyThread_get_thread_ident(void)
314 {
315     volatile pthread_t threadid;
316     if (!initialized)
317         PyThread_init_thread();
318     threadid = pthread_self();
319     return (unsigned long) threadid;
320 }
321 
322 #ifdef PY_HAVE_THREAD_NATIVE_ID
323 unsigned long
PyThread_get_thread_native_id(void)324 PyThread_get_thread_native_id(void)
325 {
326     if (!initialized)
327         PyThread_init_thread();
328 #ifdef __APPLE__
329     uint64_t native_id;
330     (void) pthread_threadid_np(NULL, &native_id);
331 #elif defined(__linux__)
332     pid_t native_id;
333     native_id = syscall(SYS_gettid);
334 #elif defined(__FreeBSD__)
335     int native_id;
336     native_id = pthread_getthreadid_np();
337 #elif defined(__OpenBSD__)
338     pid_t native_id;
339     native_id = getthrid();
340 #elif defined(_AIX)
341     tid_t native_id;
342     native_id = thread_self();
343 #elif defined(__NetBSD__)
344     lwpid_t native_id;
345     native_id = _lwp_self();
346 #endif
347     return (unsigned long) native_id;
348 }
349 #endif
350 
351 void _Py_NO_RETURN
PyThread_exit_thread(void)352 PyThread_exit_thread(void)
353 {
354     dprintf(("PyThread_exit_thread called\n"));
355     if (!initialized)
356         exit(0);
357     pthread_exit(0);
358 }
359 
360 #ifdef USE_SEMAPHORES
361 
362 /*
363  * Lock support.
364  */
365 
366 PyThread_type_lock
PyThread_allocate_lock(void)367 PyThread_allocate_lock(void)
368 {
369     sem_t *lock;
370     int status, error = 0;
371 
372     dprintf(("PyThread_allocate_lock called\n"));
373     if (!initialized)
374         PyThread_init_thread();
375 
376     lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
377 
378     if (lock) {
379         status = sem_init(lock,0,1);
380         CHECK_STATUS("sem_init");
381 
382         if (error) {
383             PyMem_RawFree((void *)lock);
384             lock = NULL;
385         }
386     }
387 
388     dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock));
389     return (PyThread_type_lock)lock;
390 }
391 
392 void
PyThread_free_lock(PyThread_type_lock lock)393 PyThread_free_lock(PyThread_type_lock lock)
394 {
395     sem_t *thelock = (sem_t *)lock;
396     int status, error = 0;
397 
398     (void) error; /* silence unused-but-set-variable warning */
399     dprintf(("PyThread_free_lock(%p) called\n", lock));
400 
401     if (!thelock)
402         return;
403 
404     status = sem_destroy(thelock);
405     CHECK_STATUS("sem_destroy");
406 
407     PyMem_RawFree((void *)thelock);
408 }
409 
410 /*
411  * As of February 2002, Cygwin thread implementations mistakenly report error
412  * codes in the return value of the sem_ calls (like the pthread_ functions).
413  * Correct implementations return -1 and put the code in errno. This supports
414  * either.
415  */
416 static int
fix_status(int status)417 fix_status(int status)
418 {
419     return (status == -1) ? errno : status;
420 }
421 
422 PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock lock,PY_TIMEOUT_T microseconds,int intr_flag)423 PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
424                             int intr_flag)
425 {
426     PyLockStatus success;
427     sem_t *thelock = (sem_t *)lock;
428     int status, error = 0;
429     struct timespec ts;
430     _PyTime_t deadline = 0;
431 
432     (void) error; /* silence unused-but-set-variable warning */
433     dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
434              lock, microseconds, intr_flag));
435 
436     if (microseconds > PY_TIMEOUT_MAX) {
437         Py_FatalError("Timeout larger than PY_TIMEOUT_MAX");
438     }
439 
440     if (microseconds > 0) {
441         MICROSECONDS_TO_TIMESPEC(microseconds, ts);
442 
443         if (!intr_flag) {
444             /* cannot overflow thanks to (microseconds > PY_TIMEOUT_MAX)
445                check done above */
446             _PyTime_t timeout = _PyTime_FromNanoseconds(microseconds * 1000);
447             deadline = _PyTime_GetMonotonicClock() + timeout;
448         }
449     }
450 
451     while (1) {
452         if (microseconds > 0) {
453             status = fix_status(sem_timedwait(thelock, &ts));
454         }
455         else if (microseconds == 0) {
456             status = fix_status(sem_trywait(thelock));
457         }
458         else {
459             status = fix_status(sem_wait(thelock));
460         }
461 
462         /* Retry if interrupted by a signal, unless the caller wants to be
463            notified.  */
464         if (intr_flag || status != EINTR) {
465             break;
466         }
467 
468         if (microseconds > 0) {
469             /* wait interrupted by a signal (EINTR): recompute the timeout */
470             _PyTime_t dt = deadline - _PyTime_GetMonotonicClock();
471             if (dt < 0) {
472                 status = ETIMEDOUT;
473                 break;
474             }
475             else if (dt > 0) {
476                 _PyTime_t realtime_deadline = _PyTime_GetSystemClock() + dt;
477                 if (_PyTime_AsTimespec(realtime_deadline, &ts) < 0) {
478                     /* Cannot occur thanks to (microseconds > PY_TIMEOUT_MAX)
479                        check done above */
480                     Py_UNREACHABLE();
481                 }
482                 /* no need to update microseconds value, the code only care
483                    if (microseconds > 0 or (microseconds == 0). */
484             }
485             else {
486                 microseconds = 0;
487             }
488         }
489     }
490 
491     /* Don't check the status if we're stopping because of an interrupt.  */
492     if (!(intr_flag && status == EINTR)) {
493         if (microseconds > 0) {
494             if (status != ETIMEDOUT)
495                 CHECK_STATUS("sem_timedwait");
496         }
497         else if (microseconds == 0) {
498             if (status != EAGAIN)
499                 CHECK_STATUS("sem_trywait");
500         }
501         else {
502             CHECK_STATUS("sem_wait");
503         }
504     }
505 
506     if (status == 0) {
507         success = PY_LOCK_ACQUIRED;
508     } else if (intr_flag && status == EINTR) {
509         success = PY_LOCK_INTR;
510     } else {
511         success = PY_LOCK_FAILURE;
512     }
513 
514     dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
515              lock, microseconds, intr_flag, success));
516     return success;
517 }
518 
519 void
PyThread_release_lock(PyThread_type_lock lock)520 PyThread_release_lock(PyThread_type_lock lock)
521 {
522     sem_t *thelock = (sem_t *)lock;
523     int status, error = 0;
524 
525     (void) error; /* silence unused-but-set-variable warning */
526     dprintf(("PyThread_release_lock(%p) called\n", lock));
527 
528     status = sem_post(thelock);
529     CHECK_STATUS("sem_post");
530 }
531 
532 #else /* USE_SEMAPHORES */
533 
534 /*
535  * Lock support.
536  */
537 PyThread_type_lock
PyThread_allocate_lock(void)538 PyThread_allocate_lock(void)
539 {
540     pthread_lock *lock;
541     int status, error = 0;
542 
543     dprintf(("PyThread_allocate_lock called\n"));
544     if (!initialized)
545         PyThread_init_thread();
546 
547     lock = (pthread_lock *) PyMem_RawMalloc(sizeof(pthread_lock));
548     if (lock) {
549         memset((void *)lock, '\0', sizeof(pthread_lock));
550         lock->locked = 0;
551 
552         status = pthread_mutex_init(&lock->mut, NULL);
553         CHECK_STATUS_PTHREAD("pthread_mutex_init");
554         /* Mark the pthread mutex underlying a Python mutex as
555            pure happens-before.  We can't simply mark the
556            Python-level mutex as a mutex because it can be
557            acquired and released in different threads, which
558            will cause errors. */
559         _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
560 
561         status = _PyThread_cond_init(&lock->lock_released);
562         CHECK_STATUS_PTHREAD("pthread_cond_init");
563 
564         if (error) {
565             PyMem_RawFree((void *)lock);
566             lock = 0;
567         }
568     }
569 
570     dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock));
571     return (PyThread_type_lock) lock;
572 }
573 
574 void
PyThread_free_lock(PyThread_type_lock lock)575 PyThread_free_lock(PyThread_type_lock lock)
576 {
577     pthread_lock *thelock = (pthread_lock *)lock;
578     int status, error = 0;
579 
580     (void) error; /* silence unused-but-set-variable warning */
581     dprintf(("PyThread_free_lock(%p) called\n", lock));
582 
583     /* some pthread-like implementations tie the mutex to the cond
584      * and must have the cond destroyed first.
585      */
586     status = pthread_cond_destroy( &thelock->lock_released );
587     CHECK_STATUS_PTHREAD("pthread_cond_destroy");
588 
589     status = pthread_mutex_destroy( &thelock->mut );
590     CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
591 
592     PyMem_RawFree((void *)thelock);
593 }
594 
595 PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock lock,PY_TIMEOUT_T microseconds,int intr_flag)596 PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
597                             int intr_flag)
598 {
599     PyLockStatus success = PY_LOCK_FAILURE;
600     pthread_lock *thelock = (pthread_lock *)lock;
601     int status, error = 0;
602 
603     dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
604              lock, microseconds, intr_flag));
605 
606     if (microseconds == 0) {
607         status = pthread_mutex_trylock( &thelock->mut );
608         if (status != EBUSY)
609             CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
610     }
611     else {
612         status = pthread_mutex_lock( &thelock->mut );
613         CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
614     }
615     if (status == 0) {
616         if (thelock->locked == 0) {
617             success = PY_LOCK_ACQUIRED;
618         }
619         else if (microseconds != 0) {
620             struct timespec abs;
621             if (microseconds > 0) {
622                 _PyThread_cond_after(microseconds, &abs);
623             }
624             /* continue trying until we get the lock */
625 
626             /* mut must be locked by me -- part of the condition
627              * protocol */
628             while (success == PY_LOCK_FAILURE) {
629                 if (microseconds > 0) {
630                     status = pthread_cond_timedwait(
631                         &thelock->lock_released,
632                         &thelock->mut, &abs);
633                     if (status == 1) {
634                         break;
635                     }
636                     if (status == ETIMEDOUT)
637                         break;
638                     CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
639                 }
640                 else {
641                     status = pthread_cond_wait(
642                         &thelock->lock_released,
643                         &thelock->mut);
644                     CHECK_STATUS_PTHREAD("pthread_cond_wait");
645                 }
646 
647                 if (intr_flag && status == 0 && thelock->locked) {
648                     /* We were woken up, but didn't get the lock.  We probably received
649                      * a signal.  Return PY_LOCK_INTR to allow the caller to handle
650                      * it and retry.  */
651                     success = PY_LOCK_INTR;
652                     break;
653                 }
654                 else if (status == 0 && !thelock->locked) {
655                     success = PY_LOCK_ACQUIRED;
656                 }
657             }
658         }
659         if (success == PY_LOCK_ACQUIRED) thelock->locked = 1;
660         status = pthread_mutex_unlock( &thelock->mut );
661         CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
662     }
663 
664     if (error) success = PY_LOCK_FAILURE;
665     dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
666              lock, microseconds, intr_flag, success));
667     return success;
668 }
669 
670 void
PyThread_release_lock(PyThread_type_lock lock)671 PyThread_release_lock(PyThread_type_lock lock)
672 {
673     pthread_lock *thelock = (pthread_lock *)lock;
674     int status, error = 0;
675 
676     (void) error; /* silence unused-but-set-variable warning */
677     dprintf(("PyThread_release_lock(%p) called\n", lock));
678 
679     status = pthread_mutex_lock( &thelock->mut );
680     CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
681 
682     thelock->locked = 0;
683 
684     /* wake up someone (anyone, if any) waiting on the lock */
685     status = pthread_cond_signal( &thelock->lock_released );
686     CHECK_STATUS_PTHREAD("pthread_cond_signal");
687 
688     status = pthread_mutex_unlock( &thelock->mut );
689     CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
690 }
691 
692 #endif /* USE_SEMAPHORES */
693 
694 int
PyThread_acquire_lock(PyThread_type_lock lock,int waitflag)695 PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
696 {
697     return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
698 }
699 
700 /* set the thread stack size.
701  * Return 0 if size is valid, -1 if size is invalid,
702  * -2 if setting stack size is not supported.
703  */
704 static int
_pythread_pthread_set_stacksize(size_t size)705 _pythread_pthread_set_stacksize(size_t size)
706 {
707 #if defined(THREAD_STACK_SIZE)
708     pthread_attr_t attrs;
709     size_t tss_min;
710     int rc = 0;
711 #endif
712 
713     /* set to default */
714     if (size == 0) {
715         _PyInterpreterState_GET_UNSAFE()->pythread_stacksize = 0;
716         return 0;
717     }
718 
719 #if defined(THREAD_STACK_SIZE)
720 #if defined(PTHREAD_STACK_MIN)
721     tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
722                                                    : THREAD_STACK_MIN;
723 #else
724     tss_min = THREAD_STACK_MIN;
725 #endif
726     if (size >= tss_min) {
727         /* validate stack size by setting thread attribute */
728         if (pthread_attr_init(&attrs) == 0) {
729             rc = pthread_attr_setstacksize(&attrs, size);
730             pthread_attr_destroy(&attrs);
731             if (rc == 0) {
732                 _PyInterpreterState_GET_UNSAFE()->pythread_stacksize = size;
733                 return 0;
734             }
735         }
736     }
737     return -1;
738 #else
739     return -2;
740 #endif
741 }
742 
743 #define THREAD_SET_STACKSIZE(x) _pythread_pthread_set_stacksize(x)
744 
745 
746 /* Thread Local Storage (TLS) API
747 
748    This API is DEPRECATED since Python 3.7.  See PEP 539 for details.
749 */
750 
751 /* Issue #25658: On platforms where native TLS key is defined in a way that
752    cannot be safely cast to int, PyThread_create_key returns immediately a
753    failure status and other TLS functions all are no-ops.  This indicates
754    clearly that the old API is not supported on platforms where it cannot be
755    used reliably, and that no effort will be made to add such support.
756 
757    Note: PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT will be unnecessary after
758    removing this API.
759 */
760 
761 int
PyThread_create_key(void)762 PyThread_create_key(void)
763 {
764 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
765     pthread_key_t key;
766     int fail = pthread_key_create(&key, NULL);
767     if (fail)
768         return -1;
769     if (key > INT_MAX) {
770         /* Issue #22206: handle integer overflow */
771         pthread_key_delete(key);
772         errno = ENOMEM;
773         return -1;
774     }
775     return (int)key;
776 #else
777     return -1;  /* never return valid key value. */
778 #endif
779 }
780 
781 void
PyThread_delete_key(int key)782 PyThread_delete_key(int key)
783 {
784 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
785     pthread_key_delete(key);
786 #endif
787 }
788 
789 void
PyThread_delete_key_value(int key)790 PyThread_delete_key_value(int key)
791 {
792 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
793     pthread_setspecific(key, NULL);
794 #endif
795 }
796 
797 int
PyThread_set_key_value(int key,void * value)798 PyThread_set_key_value(int key, void *value)
799 {
800 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
801     int fail = pthread_setspecific(key, value);
802     return fail ? -1 : 0;
803 #else
804     return -1;
805 #endif
806 }
807 
808 void *
PyThread_get_key_value(int key)809 PyThread_get_key_value(int key)
810 {
811 #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
812     return pthread_getspecific(key);
813 #else
814     return NULL;
815 #endif
816 }
817 
818 
819 void
PyThread_ReInitTLS(void)820 PyThread_ReInitTLS(void)
821 {
822 }
823 
824 
825 /* Thread Specific Storage (TSS) API
826 
827    Platform-specific components of TSS API implementation.
828 */
829 
830 int
PyThread_tss_create(Py_tss_t * key)831 PyThread_tss_create(Py_tss_t *key)
832 {
833     assert(key != NULL);
834     /* If the key has been created, function is silently skipped. */
835     if (key->_is_initialized) {
836         return 0;
837     }
838 
839     int fail = pthread_key_create(&(key->_key), NULL);
840     if (fail) {
841         return -1;
842     }
843     key->_is_initialized = 1;
844     return 0;
845 }
846 
847 void
PyThread_tss_delete(Py_tss_t * key)848 PyThread_tss_delete(Py_tss_t *key)
849 {
850     assert(key != NULL);
851     /* If the key has not been created, function is silently skipped. */
852     if (!key->_is_initialized) {
853         return;
854     }
855 
856     pthread_key_delete(key->_key);
857     /* pthread has not provided the defined invalid value for the key. */
858     key->_is_initialized = 0;
859 }
860 
861 int
PyThread_tss_set(Py_tss_t * key,void * value)862 PyThread_tss_set(Py_tss_t *key, void *value)
863 {
864     assert(key != NULL);
865     int fail = pthread_setspecific(key->_key, value);
866     return fail ? -1 : 0;
867 }
868 
869 void *
PyThread_tss_get(Py_tss_t * key)870 PyThread_tss_get(Py_tss_t *key)
871 {
872     assert(key != NULL);
873     return pthread_getspecific(key->_key);
874 }
875