1
2 /* Thread and interpreter state structures and their interfaces */
3
4 #include "Python.h"
5 #include "pycore_abstract.h" // _PyIndex_Check()
6 #include "pycore_ceval.h"
7 #include "pycore_code.h" // stats
8 #include "pycore_critical_section.h" // _PyCriticalSection_Resume()
9 #include "pycore_dtoa.h" // _dtoa_state_INIT()
10 #include "pycore_emscripten_trampoline.h" // _Py_EmscriptenTrampoline_Init()
11 #include "pycore_frame.h"
12 #include "pycore_initconfig.h" // _PyStatus_OK()
13 #include "pycore_object.h" // _PyType_InitCache()
14 #include "pycore_object_stack.h" // _PyObjectStackChunk_ClearFreeList()
15 #include "pycore_parking_lot.h" // _PyParkingLot_AfterFork()
16 #include "pycore_pyerrors.h" // _PyErr_Clear()
17 #include "pycore_pylifecycle.h" // _PyAST_Fini()
18 #include "pycore_pymem.h" // _PyMem_SetDefaultAllocator()
19 #include "pycore_pystate.h"
20 #include "pycore_runtime_init.h" // _PyRuntimeState_INIT
21 #include "pycore_sysmodule.h" // _PySys_Audit()
22 #include "pycore_obmalloc.h" // _PyMem_obmalloc_state_on_heap()
23
24 /* --------------------------------------------------------------------------
25 CAUTION
26
27 Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A
28 number of these functions are advertised as safe to call when the GIL isn't
29 held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
30 debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL
31 to avoid the expense of doing their own locking).
32 -------------------------------------------------------------------------- */
33
34 #ifdef HAVE_DLOPEN
35 # ifdef HAVE_DLFCN_H
36 # include <dlfcn.h>
37 # endif
38 # if !HAVE_DECL_RTLD_LAZY
39 # define RTLD_LAZY 1
40 # endif
41 #endif
42
43
44 /****************************************/
45 /* helpers for the current thread state */
46 /****************************************/
47
48 // API for the current thread state is further down.
49
50 /* "current" means one of:
51 - bound to the current OS thread
52 - holds the GIL
53 */
54
55 //-------------------------------------------------
56 // a highly efficient lookup for the current thread
57 //-------------------------------------------------
58
59 /*
60 The stored thread state is set by PyThreadState_Swap().
61
62 For each of these functions, the GIL must be held by the current thread.
63 */
64
65
66 #ifdef HAVE_THREAD_LOCAL
67 _Py_thread_local PyThreadState *_Py_tss_tstate = NULL;
68 #endif
69
70 static inline PyThreadState *
current_fast_get(void)71 current_fast_get(void)
72 {
73 #ifdef HAVE_THREAD_LOCAL
74 return _Py_tss_tstate;
75 #else
76 // XXX Fall back to the PyThread_tss_*() API.
77 # error "no supported thread-local variable storage classifier"
78 #endif
79 }
80
81 static inline void
current_fast_set(_PyRuntimeState * Py_UNUSED (runtime),PyThreadState * tstate)82 current_fast_set(_PyRuntimeState *Py_UNUSED(runtime), PyThreadState *tstate)
83 {
84 assert(tstate != NULL);
85 #ifdef HAVE_THREAD_LOCAL
86 _Py_tss_tstate = tstate;
87 #else
88 // XXX Fall back to the PyThread_tss_*() API.
89 # error "no supported thread-local variable storage classifier"
90 #endif
91 }
92
93 static inline void
current_fast_clear(_PyRuntimeState * Py_UNUSED (runtime))94 current_fast_clear(_PyRuntimeState *Py_UNUSED(runtime))
95 {
96 #ifdef HAVE_THREAD_LOCAL
97 _Py_tss_tstate = NULL;
98 #else
99 // XXX Fall back to the PyThread_tss_*() API.
100 # error "no supported thread-local variable storage classifier"
101 #endif
102 }
103
104 #define tstate_verify_not_active(tstate) \
105 if (tstate == current_fast_get()) { \
106 _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate); \
107 }
108
109 PyThreadState *
_PyThreadState_GetCurrent(void)110 _PyThreadState_GetCurrent(void)
111 {
112 return current_fast_get();
113 }
114
115
116 //------------------------------------------------
117 // the thread state bound to the current OS thread
118 //------------------------------------------------
119
120 static inline int
tstate_tss_initialized(Py_tss_t * key)121 tstate_tss_initialized(Py_tss_t *key)
122 {
123 return PyThread_tss_is_created(key);
124 }
125
126 static inline int
tstate_tss_init(Py_tss_t * key)127 tstate_tss_init(Py_tss_t *key)
128 {
129 assert(!tstate_tss_initialized(key));
130 return PyThread_tss_create(key);
131 }
132
133 static inline void
tstate_tss_fini(Py_tss_t * key)134 tstate_tss_fini(Py_tss_t *key)
135 {
136 assert(tstate_tss_initialized(key));
137 PyThread_tss_delete(key);
138 }
139
140 static inline PyThreadState *
tstate_tss_get(Py_tss_t * key)141 tstate_tss_get(Py_tss_t *key)
142 {
143 assert(tstate_tss_initialized(key));
144 return (PyThreadState *)PyThread_tss_get(key);
145 }
146
147 static inline int
tstate_tss_set(Py_tss_t * key,PyThreadState * tstate)148 tstate_tss_set(Py_tss_t *key, PyThreadState *tstate)
149 {
150 assert(tstate != NULL);
151 assert(tstate_tss_initialized(key));
152 return PyThread_tss_set(key, (void *)tstate);
153 }
154
155 static inline int
tstate_tss_clear(Py_tss_t * key)156 tstate_tss_clear(Py_tss_t *key)
157 {
158 assert(tstate_tss_initialized(key));
159 return PyThread_tss_set(key, (void *)NULL);
160 }
161
162 #ifdef HAVE_FORK
163 /* Reset the TSS key - called by PyOS_AfterFork_Child().
164 * This should not be necessary, but some - buggy - pthread implementations
165 * don't reset TSS upon fork(), see issue #10517.
166 */
167 static PyStatus
tstate_tss_reinit(Py_tss_t * key)168 tstate_tss_reinit(Py_tss_t *key)
169 {
170 if (!tstate_tss_initialized(key)) {
171 return _PyStatus_OK();
172 }
173 PyThreadState *tstate = tstate_tss_get(key);
174
175 tstate_tss_fini(key);
176 if (tstate_tss_init(key) != 0) {
177 return _PyStatus_NO_MEMORY();
178 }
179
180 /* If the thread had an associated auto thread state, reassociate it with
181 * the new key. */
182 if (tstate && tstate_tss_set(key, tstate) != 0) {
183 return _PyStatus_ERR("failed to re-set autoTSSkey");
184 }
185 return _PyStatus_OK();
186 }
187 #endif
188
189
190 /*
191 The stored thread state is set by bind_tstate() (AKA PyThreadState_Bind().
192
193 The GIL does no need to be held for these.
194 */
195
196 #define gilstate_tss_initialized(runtime) \
197 tstate_tss_initialized(&(runtime)->autoTSSkey)
198 #define gilstate_tss_init(runtime) \
199 tstate_tss_init(&(runtime)->autoTSSkey)
200 #define gilstate_tss_fini(runtime) \
201 tstate_tss_fini(&(runtime)->autoTSSkey)
202 #define gilstate_tss_get(runtime) \
203 tstate_tss_get(&(runtime)->autoTSSkey)
204 #define _gilstate_tss_set(runtime, tstate) \
205 tstate_tss_set(&(runtime)->autoTSSkey, tstate)
206 #define _gilstate_tss_clear(runtime) \
207 tstate_tss_clear(&(runtime)->autoTSSkey)
208 #define gilstate_tss_reinit(runtime) \
209 tstate_tss_reinit(&(runtime)->autoTSSkey)
210
211 static inline void
gilstate_tss_set(_PyRuntimeState * runtime,PyThreadState * tstate)212 gilstate_tss_set(_PyRuntimeState *runtime, PyThreadState *tstate)
213 {
214 assert(tstate != NULL && tstate->interp->runtime == runtime);
215 if (_gilstate_tss_set(runtime, tstate) != 0) {
216 Py_FatalError("failed to set current tstate (TSS)");
217 }
218 }
219
220 static inline void
gilstate_tss_clear(_PyRuntimeState * runtime)221 gilstate_tss_clear(_PyRuntimeState *runtime)
222 {
223 if (_gilstate_tss_clear(runtime) != 0) {
224 Py_FatalError("failed to clear current tstate (TSS)");
225 }
226 }
227
228
229 #ifndef NDEBUG
230 static inline int tstate_is_alive(PyThreadState *tstate);
231
232 static inline int
tstate_is_bound(PyThreadState * tstate)233 tstate_is_bound(PyThreadState *tstate)
234 {
235 return tstate->_status.bound && !tstate->_status.unbound;
236 }
237 #endif // !NDEBUG
238
239 static void bind_gilstate_tstate(PyThreadState *);
240 static void unbind_gilstate_tstate(PyThreadState *);
241
242 static void tstate_mimalloc_bind(PyThreadState *);
243
244 static void
bind_tstate(PyThreadState * tstate)245 bind_tstate(PyThreadState *tstate)
246 {
247 assert(tstate != NULL);
248 assert(tstate_is_alive(tstate) && !tstate->_status.bound);
249 assert(!tstate->_status.unbound); // just in case
250 assert(!tstate->_status.bound_gilstate);
251 assert(tstate != gilstate_tss_get(tstate->interp->runtime));
252 assert(!tstate->_status.active);
253 assert(tstate->thread_id == 0);
254 assert(tstate->native_thread_id == 0);
255
256 // Currently we don't necessarily store the thread state
257 // in thread-local storage (e.g. per-interpreter).
258
259 tstate->thread_id = PyThread_get_thread_ident();
260 #ifdef PY_HAVE_THREAD_NATIVE_ID
261 tstate->native_thread_id = PyThread_get_thread_native_id();
262 #endif
263
264 #ifdef Py_GIL_DISABLED
265 // Initialize biased reference counting inter-thread queue. Note that this
266 // needs to be initialized from the active thread.
267 _Py_brc_init_thread(tstate);
268 #endif
269
270 // mimalloc state needs to be initialized from the active thread.
271 tstate_mimalloc_bind(tstate);
272
273 tstate->_status.bound = 1;
274 }
275
276 static void
unbind_tstate(PyThreadState * tstate)277 unbind_tstate(PyThreadState *tstate)
278 {
279 assert(tstate != NULL);
280 assert(tstate_is_bound(tstate));
281 #ifndef HAVE_PTHREAD_STUBS
282 assert(tstate->thread_id > 0);
283 #endif
284 #ifdef PY_HAVE_THREAD_NATIVE_ID
285 assert(tstate->native_thread_id > 0);
286 #endif
287
288 // We leave thread_id and native_thread_id alone
289 // since they can be useful for debugging.
290 // Check the `_status` field to know if these values
291 // are still valid.
292
293 // We leave tstate->_status.bound set to 1
294 // to indicate it was previously bound.
295 tstate->_status.unbound = 1;
296 }
297
298
299 /* Stick the thread state for this thread in thread specific storage.
300
301 When a thread state is created for a thread by some mechanism
302 other than PyGILState_Ensure(), it's important that the GILState
303 machinery knows about it so it doesn't try to create another
304 thread state for the thread.
305 (This is a better fix for SF bug #1010677 than the first one attempted.)
306
307 The only situation where you can legitimately have more than one
308 thread state for an OS level thread is when there are multiple
309 interpreters.
310
311 Before 3.12, the PyGILState_*() APIs didn't work with multiple
312 interpreters (see bpo-10915 and bpo-15751), so this function used
313 to set TSS only once. Thus, the first thread state created for that
314 given OS level thread would "win", which seemed reasonable behaviour.
315 */
316
317 static void
bind_gilstate_tstate(PyThreadState * tstate)318 bind_gilstate_tstate(PyThreadState *tstate)
319 {
320 assert(tstate != NULL);
321 assert(tstate_is_alive(tstate));
322 assert(tstate_is_bound(tstate));
323 // XXX assert(!tstate->_status.active);
324 assert(!tstate->_status.bound_gilstate);
325
326 _PyRuntimeState *runtime = tstate->interp->runtime;
327 PyThreadState *tcur = gilstate_tss_get(runtime);
328 assert(tstate != tcur);
329
330 if (tcur != NULL) {
331 tcur->_status.bound_gilstate = 0;
332 }
333 gilstate_tss_set(runtime, tstate);
334 tstate->_status.bound_gilstate = 1;
335 }
336
337 static void
unbind_gilstate_tstate(PyThreadState * tstate)338 unbind_gilstate_tstate(PyThreadState *tstate)
339 {
340 assert(tstate != NULL);
341 // XXX assert(tstate_is_alive(tstate));
342 assert(tstate_is_bound(tstate));
343 // XXX assert(!tstate->_status.active);
344 assert(tstate->_status.bound_gilstate);
345 assert(tstate == gilstate_tss_get(tstate->interp->runtime));
346
347 gilstate_tss_clear(tstate->interp->runtime);
348 tstate->_status.bound_gilstate = 0;
349 }
350
351
352 //----------------------------------------------
353 // the thread state that currently holds the GIL
354 //----------------------------------------------
355
356 /* This is not exported, as it is not reliable! It can only
357 ever be compared to the state for the *current* thread.
358 * If not equal, then it doesn't matter that the actual
359 value may change immediately after comparison, as it can't
360 possibly change to the current thread's state.
361 * If equal, then the current thread holds the lock, so the value can't
362 change until we yield the lock.
363 */
364 static int
holds_gil(PyThreadState * tstate)365 holds_gil(PyThreadState *tstate)
366 {
367 // XXX Fall back to tstate->interp->runtime->ceval.gil.last_holder
368 // (and tstate->interp->runtime->ceval.gil.locked).
369 assert(tstate != NULL);
370 /* Must be the tstate for this thread */
371 assert(tstate == gilstate_tss_get(tstate->interp->runtime));
372 return tstate == current_fast_get();
373 }
374
375
376 /****************************/
377 /* the global runtime state */
378 /****************************/
379
380 //----------
381 // lifecycle
382 //----------
383
384 /* Suppress deprecation warning for PyBytesObject.ob_shash */
385 _Py_COMP_DIAG_PUSH
386 _Py_COMP_DIAG_IGNORE_DEPR_DECLS
387 /* We use "initial" if the runtime gets re-used
388 (e.g. Py_Finalize() followed by Py_Initialize().
389 Note that we initialize "initial" relative to _PyRuntime,
390 to ensure pre-initialized pointers point to the active
391 runtime state (and not "initial"). */
392 static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime, "");
393 _Py_COMP_DIAG_POP
394
395 #define LOCKS_INIT(runtime) \
396 { \
397 &(runtime)->interpreters.mutex, \
398 &(runtime)->xi.registry.mutex, \
399 &(runtime)->unicode_state.ids.mutex, \
400 &(runtime)->imports.extensions.mutex, \
401 &(runtime)->ceval.pending_mainthread.mutex, \
402 &(runtime)->ceval.sys_trace_profile_mutex, \
403 &(runtime)->atexit.mutex, \
404 &(runtime)->audit_hooks.mutex, \
405 &(runtime)->allocators.mutex, \
406 &(runtime)->_main_interpreter.types.mutex, \
407 &(runtime)->_main_interpreter.code_state.mutex, \
408 }
409
410 static void
init_runtime(_PyRuntimeState * runtime,void * open_code_hook,void * open_code_userdata,_Py_AuditHookEntry * audit_hook_head,Py_ssize_t unicode_next_index)411 init_runtime(_PyRuntimeState *runtime,
412 void *open_code_hook, void *open_code_userdata,
413 _Py_AuditHookEntry *audit_hook_head,
414 Py_ssize_t unicode_next_index)
415 {
416 assert(!runtime->preinitializing);
417 assert(!runtime->preinitialized);
418 assert(!runtime->core_initialized);
419 assert(!runtime->initialized);
420 assert(!runtime->_initialized);
421
422 runtime->open_code_hook = open_code_hook;
423 runtime->open_code_userdata = open_code_userdata;
424 runtime->audit_hooks.head = audit_hook_head;
425
426 PyPreConfig_InitPythonConfig(&runtime->preconfig);
427
428 // Set it to the ID of the main thread of the main interpreter.
429 runtime->main_thread = PyThread_get_thread_ident();
430
431 runtime->unicode_state.ids.next_index = unicode_next_index;
432
433 #if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
434 _Py_EmscriptenTrampoline_Init(runtime);
435 #endif
436
437 runtime->_initialized = 1;
438 }
439
440 PyStatus
_PyRuntimeState_Init(_PyRuntimeState * runtime)441 _PyRuntimeState_Init(_PyRuntimeState *runtime)
442 {
443 /* We preserve the hook across init, because there is
444 currently no public API to set it between runtime
445 initialization and interpreter initialization. */
446 void *open_code_hook = runtime->open_code_hook;
447 void *open_code_userdata = runtime->open_code_userdata;
448 _Py_AuditHookEntry *audit_hook_head = runtime->audit_hooks.head;
449 // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
450 // is called multiple times.
451 Py_ssize_t unicode_next_index = runtime->unicode_state.ids.next_index;
452
453 if (runtime->_initialized) {
454 // Py_Initialize() must be running again.
455 // Reset to _PyRuntimeState_INIT.
456 memcpy(runtime, &initial, sizeof(*runtime));
457 // Preserve the cookie from the original runtime.
458 memcpy(runtime->debug_offsets.cookie, _Py_Debug_Cookie, 8);
459 assert(!runtime->_initialized);
460 }
461
462 if (gilstate_tss_init(runtime) != 0) {
463 _PyRuntimeState_Fini(runtime);
464 return _PyStatus_NO_MEMORY();
465 }
466
467 if (PyThread_tss_create(&runtime->trashTSSkey) != 0) {
468 _PyRuntimeState_Fini(runtime);
469 return _PyStatus_NO_MEMORY();
470 }
471
472 init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
473 unicode_next_index);
474
475 return _PyStatus_OK();
476 }
477
478 void
_PyRuntimeState_Fini(_PyRuntimeState * runtime)479 _PyRuntimeState_Fini(_PyRuntimeState *runtime)
480 {
481 #ifdef Py_REF_DEBUG
482 /* The count is cleared by _Py_FinalizeRefTotal(). */
483 assert(runtime->object_state.interpreter_leaks == 0);
484 #endif
485
486 if (gilstate_tss_initialized(runtime)) {
487 gilstate_tss_fini(runtime);
488 }
489
490 if (PyThread_tss_is_created(&runtime->trashTSSkey)) {
491 PyThread_tss_delete(&runtime->trashTSSkey);
492 }
493 }
494
495 #ifdef HAVE_FORK
496 /* This function is called from PyOS_AfterFork_Child to ensure that
497 newly created child processes do not share locks with the parent. */
498 PyStatus
_PyRuntimeState_ReInitThreads(_PyRuntimeState * runtime)499 _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
500 {
501 // This was initially set in _PyRuntimeState_Init().
502 runtime->main_thread = PyThread_get_thread_ident();
503
504 // Clears the parking lot. Any waiting threads are dead. This must be
505 // called before releasing any locks that use the parking lot.
506 _PyParkingLot_AfterFork();
507
508 // Re-initialize global locks
509 PyMutex *locks[] = LOCKS_INIT(runtime);
510 for (size_t i = 0; i < Py_ARRAY_LENGTH(locks); i++) {
511 _PyMutex_at_fork_reinit(locks[i]);
512 }
513 #ifdef Py_GIL_DISABLED
514 for (PyInterpreterState *interp = runtime->interpreters.head;
515 interp != NULL; interp = interp->next)
516 {
517 for (int i = 0; i < NUM_WEAKREF_LIST_LOCKS; i++) {
518 _PyMutex_at_fork_reinit(&interp->weakref_locks[i]);
519 }
520 }
521 #endif
522
523 _PyTypes_AfterFork();
524
525 /* bpo-42540: id_mutex is freed by _PyInterpreterState_Delete, which does
526 * not force the default allocator. */
527 if (_PyThread_at_fork_reinit(&runtime->interpreters.main->id_mutex) < 0) {
528 return _PyStatus_ERR("Failed to reinitialize runtime locks");
529 }
530
531 PyStatus status = gilstate_tss_reinit(runtime);
532 if (_PyStatus_EXCEPTION(status)) {
533 return status;
534 }
535
536 if (PyThread_tss_is_created(&runtime->trashTSSkey)) {
537 PyThread_tss_delete(&runtime->trashTSSkey);
538 }
539 if (PyThread_tss_create(&runtime->trashTSSkey) != 0) {
540 return _PyStatus_NO_MEMORY();
541 }
542
543 _PyThread_AfterFork(&runtime->threads);
544
545 return _PyStatus_OK();
546 }
547 #endif
548
549
550 /*************************************/
551 /* the per-interpreter runtime state */
552 /*************************************/
553
554 //----------
555 // lifecycle
556 //----------
557
558 /* Calling this indicates that the runtime is ready to create interpreters. */
559
560 PyStatus
_PyInterpreterState_Enable(_PyRuntimeState * runtime)561 _PyInterpreterState_Enable(_PyRuntimeState *runtime)
562 {
563 struct pyinterpreters *interpreters = &runtime->interpreters;
564 interpreters->next_id = 0;
565 return _PyStatus_OK();
566 }
567
568
569 static PyInterpreterState *
alloc_interpreter(void)570 alloc_interpreter(void)
571 {
572 return PyMem_RawCalloc(1, sizeof(PyInterpreterState));
573 }
574
575 static void
free_interpreter(PyInterpreterState * interp)576 free_interpreter(PyInterpreterState *interp)
577 {
578 // The main interpreter is statically allocated so
579 // should not be freed.
580 if (interp != &_PyRuntime._main_interpreter) {
581 if (_PyMem_obmalloc_state_on_heap(interp)) {
582 // interpreter has its own obmalloc state, free it
583 PyMem_RawFree(interp->obmalloc);
584 interp->obmalloc = NULL;
585 }
586 PyMem_RawFree(interp);
587 }
588 }
589 #ifndef NDEBUG
590 static inline int check_interpreter_whence(long);
591 #endif
592 /* Get the interpreter state to a minimal consistent state.
593 Further init happens in pylifecycle.c before it can be used.
594 All fields not initialized here are expected to be zeroed out,
595 e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
596 The runtime state is not manipulated. Instead it is assumed that
597 the interpreter is getting added to the runtime.
598
599 Note that the main interpreter was statically initialized as part
600 of the runtime and most state is already set properly. That leaves
601 a small number of fields to initialize dynamically, as well as some
602 that are initialized lazily.
603
604 For subinterpreters we memcpy() the main interpreter in
605 PyInterpreterState_New(), leaving it in the same mostly-initialized
606 state. The only difference is that the interpreter has some
607 self-referential state that is statically initializexd to the
608 main interpreter. We fix those fields here, in addition
609 to the other dynamically initialized fields.
610 */
611 static PyStatus
init_interpreter(PyInterpreterState * interp,_PyRuntimeState * runtime,int64_t id,PyInterpreterState * next,long whence)612 init_interpreter(PyInterpreterState *interp,
613 _PyRuntimeState *runtime, int64_t id,
614 PyInterpreterState *next,
615 long whence)
616 {
617 if (interp->_initialized) {
618 return _PyStatus_ERR("interpreter already initialized");
619 }
620
621 assert(interp->_whence == _PyInterpreterState_WHENCE_NOTSET);
622 assert(check_interpreter_whence(whence) == 0);
623 interp->_whence = whence;
624
625 assert(runtime != NULL);
626 interp->runtime = runtime;
627
628 assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
629 interp->id = id;
630
631 assert(runtime->interpreters.head == interp);
632 assert(next != NULL || (interp == runtime->interpreters.main));
633 interp->next = next;
634
635 interp->threads_preallocated = &interp->_initial_thread;
636
637 // We would call _PyObject_InitState() at this point
638 // if interp->feature_flags were alredy set.
639
640 _PyEval_InitState(interp);
641 _PyGC_InitState(&interp->gc);
642 PyConfig_InitPythonConfig(&interp->config);
643 _PyType_InitCache(interp);
644 #ifdef Py_GIL_DISABLED
645 _Py_brc_init_state(interp);
646 #endif
647 llist_init(&interp->mem_free_queue.head);
648 for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
649 interp->monitors.tools[i] = 0;
650 }
651 for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
652 for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
653 interp->monitoring_callables[t][e] = NULL;
654
655 }
656 }
657 interp->sys_profile_initialized = false;
658 interp->sys_trace_initialized = false;
659 #ifdef _Py_TIER2
660 (void)_Py_SetOptimizer(interp, NULL);
661 interp->executor_list_head = NULL;
662 #endif
663 if (interp != &runtime->_main_interpreter) {
664 /* Fix the self-referential, statically initialized fields. */
665 interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
666 }
667
668 interp->_initialized = 1;
669 return _PyStatus_OK();
670 }
671
672
673 PyStatus
_PyInterpreterState_New(PyThreadState * tstate,PyInterpreterState ** pinterp)674 _PyInterpreterState_New(PyThreadState *tstate, PyInterpreterState **pinterp)
675 {
676 *pinterp = NULL;
677
678 // Don't get runtime from tstate since tstate can be NULL
679 _PyRuntimeState *runtime = &_PyRuntime;
680
681 // tstate is NULL when pycore_create_interpreter() calls
682 // _PyInterpreterState_New() to create the main interpreter.
683 if (tstate != NULL) {
684 if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
685 return _PyStatus_ERR("sys.audit failed");
686 }
687 }
688
689 /* We completely serialize creation of multiple interpreters, since
690 it simplifies things here and blocking concurrent calls isn't a problem.
691 Regardless, we must fully block subinterpreter creation until
692 after the main interpreter is created. */
693 HEAD_LOCK(runtime);
694
695 struct pyinterpreters *interpreters = &runtime->interpreters;
696 int64_t id = interpreters->next_id;
697 interpreters->next_id += 1;
698
699 // Allocate the interpreter and add it to the runtime state.
700 PyInterpreterState *interp;
701 PyStatus status;
702 PyInterpreterState *old_head = interpreters->head;
703 if (old_head == NULL) {
704 // We are creating the main interpreter.
705 assert(interpreters->main == NULL);
706 assert(id == 0);
707
708 interp = &runtime->_main_interpreter;
709 assert(interp->id == 0);
710 assert(interp->next == NULL);
711
712 interpreters->main = interp;
713 }
714 else {
715 assert(interpreters->main != NULL);
716 assert(id != 0);
717
718 interp = alloc_interpreter();
719 if (interp == NULL) {
720 status = _PyStatus_NO_MEMORY();
721 goto error;
722 }
723 // Set to _PyInterpreterState_INIT.
724 memcpy(interp, &initial._main_interpreter, sizeof(*interp));
725
726 if (id < 0) {
727 /* overflow or Py_Initialize() not called yet! */
728 status = _PyStatus_ERR("failed to get an interpreter ID");
729 goto error;
730 }
731 }
732 interpreters->head = interp;
733
734 long whence = _PyInterpreterState_WHENCE_UNKNOWN;
735 status = init_interpreter(interp, runtime,
736 id, old_head, whence);
737 if (_PyStatus_EXCEPTION(status)) {
738 goto error;
739 }
740
741 HEAD_UNLOCK(runtime);
742
743 assert(interp != NULL);
744 *pinterp = interp;
745 return _PyStatus_OK();
746
747 error:
748 HEAD_UNLOCK(runtime);
749
750 if (interp != NULL) {
751 free_interpreter(interp);
752 }
753 return status;
754 }
755
756
757 PyInterpreterState *
PyInterpreterState_New(void)758 PyInterpreterState_New(void)
759 {
760 // tstate can be NULL
761 PyThreadState *tstate = current_fast_get();
762
763 PyInterpreterState *interp;
764 PyStatus status = _PyInterpreterState_New(tstate, &interp);
765 if (_PyStatus_EXCEPTION(status)) {
766 Py_ExitStatusException(status);
767 }
768 assert(interp != NULL);
769 return interp;
770 }
771
772 static void
interpreter_clear(PyInterpreterState * interp,PyThreadState * tstate)773 interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
774 {
775 assert(interp != NULL);
776 assert(tstate != NULL);
777 _PyRuntimeState *runtime = interp->runtime;
778
779 /* XXX Conditions we need to enforce:
780
781 * the GIL must be held by the current thread
782 * tstate must be the "current" thread state (current_fast_get())
783 * tstate->interp must be interp
784 * for the main interpreter, tstate must be the main thread
785 */
786 // XXX Ideally, we would not rely on any thread state in this function
787 // (and we would drop the "tstate" argument).
788
789 if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
790 _PyErr_Clear(tstate);
791 }
792
793 // Clear the current/main thread state last.
794 HEAD_LOCK(runtime);
795 PyThreadState *p = interp->threads.head;
796 HEAD_UNLOCK(runtime);
797 while (p != NULL) {
798 // See https://github.com/python/cpython/issues/102126
799 // Must be called without HEAD_LOCK held as it can deadlock
800 // if any finalizer tries to acquire that lock.
801 PyThreadState_Clear(p);
802 HEAD_LOCK(runtime);
803 p = p->next;
804 HEAD_UNLOCK(runtime);
805 }
806 if (tstate->interp == interp) {
807 /* We fix tstate->_status below when we for sure aren't using it
808 (e.g. no longer need the GIL). */
809 // XXX Eliminate the need to do this.
810 tstate->_status.cleared = 0;
811 }
812
813 #ifdef _Py_TIER2
814 _PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL);
815 assert(old != NULL);
816 Py_DECREF(old);
817 #endif
818
819 /* It is possible that any of the objects below have a finalizer
820 that runs Python code or otherwise relies on a thread state
821 or even the interpreter state. For now we trust that isn't
822 a problem.
823 */
824 // XXX Make sure we properly deal with problematic finalizers.
825
826 Py_CLEAR(interp->audit_hooks);
827
828 // At this time, all the threads should be cleared so we don't need atomic
829 // operations for instrumentation_version or eval_breaker.
830 interp->ceval.instrumentation_version = 0;
831 tstate->eval_breaker = 0;
832
833 for (int i = 0; i < _PY_MONITORING_UNGROUPED_EVENTS; i++) {
834 interp->monitors.tools[i] = 0;
835 }
836 for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
837 for (int e = 0; e < _PY_MONITORING_EVENTS; e++) {
838 Py_CLEAR(interp->monitoring_callables[t][e]);
839 }
840 }
841 interp->sys_profile_initialized = false;
842 interp->sys_trace_initialized = false;
843 for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
844 Py_CLEAR(interp->monitoring_tool_names[t]);
845 }
846
847 PyConfig_Clear(&interp->config);
848 _PyCodec_Fini(interp);
849
850 assert(interp->imports.modules == NULL);
851 assert(interp->imports.modules_by_index == NULL);
852 assert(interp->imports.importlib == NULL);
853 assert(interp->imports.import_func == NULL);
854
855 Py_CLEAR(interp->sysdict_copy);
856 Py_CLEAR(interp->builtins_copy);
857 Py_CLEAR(interp->dict);
858 #ifdef HAVE_FORK
859 Py_CLEAR(interp->before_forkers);
860 Py_CLEAR(interp->after_forkers_parent);
861 Py_CLEAR(interp->after_forkers_child);
862 #endif
863
864 _PyAST_Fini(interp);
865 _PyWarnings_Fini(interp);
866 _PyAtExit_Fini(interp);
867
868 // All Python types must be destroyed before the last GC collection. Python
869 // types create a reference cycle to themselves in their in their
870 // PyTypeObject.tp_mro member (the tuple contains the type).
871
872 /* Last garbage collection on this interpreter */
873 _PyGC_CollectNoFail(tstate);
874 _PyGC_Fini(interp);
875
876 /* We don't clear sysdict and builtins until the end of this function.
877 Because clearing other attributes can execute arbitrary Python code
878 which requires sysdict and builtins. */
879 PyDict_Clear(interp->sysdict);
880 PyDict_Clear(interp->builtins);
881 Py_CLEAR(interp->sysdict);
882 Py_CLEAR(interp->builtins);
883
884 if (tstate->interp == interp) {
885 /* We are now safe to fix tstate->_status.cleared. */
886 // XXX Do this (much) earlier?
887 tstate->_status.cleared = 1;
888 }
889
890 for (int i=0; i < DICT_MAX_WATCHERS; i++) {
891 interp->dict_state.watchers[i] = NULL;
892 }
893
894 for (int i=0; i < TYPE_MAX_WATCHERS; i++) {
895 interp->type_watchers[i] = NULL;
896 }
897
898 for (int i=0; i < FUNC_MAX_WATCHERS; i++) {
899 interp->func_watchers[i] = NULL;
900 }
901 interp->active_func_watchers = 0;
902
903 for (int i=0; i < CODE_MAX_WATCHERS; i++) {
904 interp->code_watchers[i] = NULL;
905 }
906 interp->active_code_watchers = 0;
907 // XXX Once we have one allocator per interpreter (i.e.
908 // per-interpreter GC) we must ensure that all of the interpreter's
909 // objects have been cleaned up at the point.
910
911 // If we had a freelist of thread states, we would clear it here.
912 }
913
914
915 void
PyInterpreterState_Clear(PyInterpreterState * interp)916 PyInterpreterState_Clear(PyInterpreterState *interp)
917 {
918 // Use the current Python thread state to call audit hooks and to collect
919 // garbage. It can be different than the current Python thread state
920 // of 'interp'.
921 PyThreadState *current_tstate = current_fast_get();
922 _PyImport_ClearCore(interp);
923 interpreter_clear(interp, current_tstate);
924 }
925
926
927 void
_PyInterpreterState_Clear(PyThreadState * tstate)928 _PyInterpreterState_Clear(PyThreadState *tstate)
929 {
930 _PyImport_ClearCore(tstate->interp);
931 interpreter_clear(tstate->interp, tstate);
932 }
933
934
935 static inline void tstate_deactivate(PyThreadState *tstate);
936 static void tstate_set_detached(PyThreadState *tstate, int detached_state);
937 static void zapthreads(PyInterpreterState *interp);
938
939 void
PyInterpreterState_Delete(PyInterpreterState * interp)940 PyInterpreterState_Delete(PyInterpreterState *interp)
941 {
942 _PyRuntimeState *runtime = interp->runtime;
943 struct pyinterpreters *interpreters = &runtime->interpreters;
944
945 // XXX Clearing the "current" thread state should happen before
946 // we start finalizing the interpreter (or the current thread state).
947 PyThreadState *tcur = current_fast_get();
948 if (tcur != NULL && interp == tcur->interp) {
949 /* Unset current thread. After this, many C API calls become crashy. */
950 _PyThreadState_Detach(tcur);
951 }
952
953 zapthreads(interp);
954
955 // XXX These two calls should be done at the end of clear_interpreter(),
956 // but currently some objects get decref'ed after that.
957 #ifdef Py_REF_DEBUG
958 _PyInterpreterState_FinalizeRefTotal(interp);
959 #endif
960 _PyInterpreterState_FinalizeAllocatedBlocks(interp);
961
962 HEAD_LOCK(runtime);
963 PyInterpreterState **p;
964 for (p = &interpreters->head; ; p = &(*p)->next) {
965 if (*p == NULL) {
966 Py_FatalError("NULL interpreter");
967 }
968 if (*p == interp) {
969 break;
970 }
971 }
972 if (interp->threads.head != NULL) {
973 Py_FatalError("remaining threads");
974 }
975 *p = interp->next;
976
977 if (interpreters->main == interp) {
978 interpreters->main = NULL;
979 if (interpreters->head != NULL) {
980 Py_FatalError("remaining subinterpreters");
981 }
982 }
983 HEAD_UNLOCK(runtime);
984
985 if (interp->id_mutex != NULL) {
986 PyThread_free_lock(interp->id_mutex);
987 }
988
989 _Py_qsbr_fini(interp);
990
991 _PyObject_FiniState(interp);
992
993 free_interpreter(interp);
994 }
995
996
997 #ifdef HAVE_FORK
998 /*
999 * Delete all interpreter states except the main interpreter. If there
1000 * is a current interpreter state, it *must* be the main interpreter.
1001 */
1002 PyStatus
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState * runtime)1003 _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
1004 {
1005 struct pyinterpreters *interpreters = &runtime->interpreters;
1006
1007 PyThreadState *tstate = _PyThreadState_Swap(runtime, NULL);
1008 if (tstate != NULL && tstate->interp != interpreters->main) {
1009 return _PyStatus_ERR("not main interpreter");
1010 }
1011
1012 HEAD_LOCK(runtime);
1013 PyInterpreterState *interp = interpreters->head;
1014 interpreters->head = NULL;
1015 while (interp != NULL) {
1016 if (interp == interpreters->main) {
1017 interpreters->main->next = NULL;
1018 interpreters->head = interp;
1019 interp = interp->next;
1020 continue;
1021 }
1022
1023 // XXX Won't this fail since PyInterpreterState_Clear() requires
1024 // the "current" tstate to be set?
1025 PyInterpreterState_Clear(interp); // XXX must activate?
1026 zapthreads(interp);
1027 if (interp->id_mutex != NULL) {
1028 PyThread_free_lock(interp->id_mutex);
1029 }
1030 PyInterpreterState *prev_interp = interp;
1031 interp = interp->next;
1032 free_interpreter(prev_interp);
1033 }
1034 HEAD_UNLOCK(runtime);
1035
1036 if (interpreters->head == NULL) {
1037 return _PyStatus_ERR("missing main interpreter");
1038 }
1039 _PyThreadState_Swap(runtime, tstate);
1040 return _PyStatus_OK();
1041 }
1042 #endif
1043
1044 static inline void
set_main_thread(PyInterpreterState * interp,PyThreadState * tstate)1045 set_main_thread(PyInterpreterState *interp, PyThreadState *tstate)
1046 {
1047 _Py_atomic_store_ptr_relaxed(&interp->threads.main, tstate);
1048 }
1049
1050 static inline PyThreadState *
get_main_thread(PyInterpreterState * interp)1051 get_main_thread(PyInterpreterState *interp)
1052 {
1053 return _Py_atomic_load_ptr_relaxed(&interp->threads.main);
1054 }
1055
1056 int
_PyInterpreterState_SetRunningMain(PyInterpreterState * interp)1057 _PyInterpreterState_SetRunningMain(PyInterpreterState *interp)
1058 {
1059 if (get_main_thread(interp) != NULL) {
1060 // In 3.14+ we use _PyErr_SetInterpreterAlreadyRunning().
1061 PyErr_SetString(PyExc_InterpreterError, "interpreter already running");
1062 return -1;
1063 }
1064 PyThreadState *tstate = current_fast_get();
1065 _Py_EnsureTstateNotNULL(tstate);
1066 if (tstate->interp != interp) {
1067 PyErr_SetString(PyExc_RuntimeError,
1068 "current tstate has wrong interpreter");
1069 return -1;
1070 }
1071 set_main_thread(interp, tstate);
1072
1073 return 0;
1074 }
1075
1076 void
_PyInterpreterState_SetNotRunningMain(PyInterpreterState * interp)1077 _PyInterpreterState_SetNotRunningMain(PyInterpreterState *interp)
1078 {
1079 assert(get_main_thread(interp) == current_fast_get());
1080 set_main_thread(interp, NULL);
1081 }
1082
1083 int
_PyInterpreterState_IsRunningMain(PyInterpreterState * interp)1084 _PyInterpreterState_IsRunningMain(PyInterpreterState *interp)
1085 {
1086 if (get_main_thread(interp) != NULL) {
1087 return 1;
1088 }
1089 // Embedders might not know to call _PyInterpreterState_SetRunningMain(),
1090 // so their main thread wouldn't show it is running the main interpreter's
1091 // program. (Py_Main() doesn't have this problem.) For now this isn't
1092 // critical. If it were, we would need to infer "running main" from other
1093 // information, like if it's the main interpreter. We used to do that
1094 // but the naive approach led to some inconsistencies that caused problems.
1095 return 0;
1096 }
1097
1098 int
_PyThreadState_IsRunningMain(PyThreadState * tstate)1099 _PyThreadState_IsRunningMain(PyThreadState *tstate)
1100 {
1101 PyInterpreterState *interp = tstate->interp;
1102 // See the note in _PyInterpreterState_IsRunningMain() about
1103 // possible false negatives here for embedders.
1104 return get_main_thread(interp) == tstate;
1105 }
1106
1107 // This has been removed in 3.14.
1108 int
_PyInterpreterState_FailIfRunningMain(PyInterpreterState * interp)1109 _PyInterpreterState_FailIfRunningMain(PyInterpreterState *interp)
1110 {
1111 if (get_main_thread(interp) != NULL) {
1112 PyErr_SetString(PyExc_InterpreterError,
1113 "interpreter already running");
1114 return -1;
1115 }
1116 return 0;
1117 }
1118
1119 void
_PyInterpreterState_ReinitRunningMain(PyThreadState * tstate)1120 _PyInterpreterState_ReinitRunningMain(PyThreadState *tstate)
1121 {
1122 PyInterpreterState *interp = tstate->interp;
1123 if (get_main_thread(interp) != tstate) {
1124 set_main_thread(interp, NULL);
1125 }
1126 }
1127
1128
1129 //----------
1130 // accessors
1131 //----------
1132
1133 int
_PyInterpreterState_IsReady(PyInterpreterState * interp)1134 _PyInterpreterState_IsReady(PyInterpreterState *interp)
1135 {
1136 return interp->_ready;
1137 }
1138
1139 #ifndef NDEBUG
1140 static inline int
check_interpreter_whence(long whence)1141 check_interpreter_whence(long whence)
1142 {
1143 if(whence < 0) {
1144 return -1;
1145 }
1146 if (whence > _PyInterpreterState_WHENCE_MAX) {
1147 return -1;
1148 }
1149 return 0;
1150 }
1151 #endif
1152
1153 long
_PyInterpreterState_GetWhence(PyInterpreterState * interp)1154 _PyInterpreterState_GetWhence(PyInterpreterState *interp)
1155 {
1156 assert(check_interpreter_whence(interp->_whence) == 0);
1157 return interp->_whence;
1158 }
1159
1160 void
_PyInterpreterState_SetWhence(PyInterpreterState * interp,long whence)1161 _PyInterpreterState_SetWhence(PyInterpreterState *interp, long whence)
1162 {
1163 assert(interp->_whence != _PyInterpreterState_WHENCE_NOTSET);
1164 assert(check_interpreter_whence(whence) == 0);
1165 interp->_whence = whence;
1166 }
1167
1168
1169 PyObject *
PyUnstable_InterpreterState_GetMainModule(PyInterpreterState * interp)1170 PyUnstable_InterpreterState_GetMainModule(PyInterpreterState *interp)
1171 {
1172 PyObject *modules = _PyImport_GetModules(interp);
1173 if (modules == NULL) {
1174 PyErr_SetString(PyExc_RuntimeError, "interpreter not initialized");
1175 return NULL;
1176 }
1177 return PyMapping_GetItemString(modules, "__main__");
1178 }
1179
1180
1181 PyObject *
PyInterpreterState_GetDict(PyInterpreterState * interp)1182 PyInterpreterState_GetDict(PyInterpreterState *interp)
1183 {
1184 if (interp->dict == NULL) {
1185 interp->dict = PyDict_New();
1186 if (interp->dict == NULL) {
1187 PyErr_Clear();
1188 }
1189 }
1190 /* Returning NULL means no per-interpreter dict is available. */
1191 return interp->dict;
1192 }
1193
1194
1195 //----------
1196 // interp ID
1197 //----------
1198
1199 int64_t
_PyInterpreterState_ObjectToID(PyObject * idobj)1200 _PyInterpreterState_ObjectToID(PyObject *idobj)
1201 {
1202 if (!_PyIndex_Check(idobj)) {
1203 PyErr_Format(PyExc_TypeError,
1204 "interpreter ID must be an int, got %.100s",
1205 Py_TYPE(idobj)->tp_name);
1206 return -1;
1207 }
1208
1209 // This may raise OverflowError.
1210 // For now, we don't worry about if LLONG_MAX < INT64_MAX.
1211 long long id = PyLong_AsLongLong(idobj);
1212 if (id == -1 && PyErr_Occurred()) {
1213 return -1;
1214 }
1215
1216 if (id < 0) {
1217 PyErr_Format(PyExc_ValueError,
1218 "interpreter ID must be a non-negative int, got %R",
1219 idobj);
1220 return -1;
1221 }
1222 #if LLONG_MAX > INT64_MAX
1223 else if (id > INT64_MAX) {
1224 PyErr_SetString(PyExc_OverflowError, "int too big to convert");
1225 return -1;
1226 }
1227 #endif
1228 else {
1229 return (int64_t)id;
1230 }
1231 }
1232
1233 int64_t
PyInterpreterState_GetID(PyInterpreterState * interp)1234 PyInterpreterState_GetID(PyInterpreterState *interp)
1235 {
1236 if (interp == NULL) {
1237 PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
1238 return -1;
1239 }
1240 return interp->id;
1241 }
1242
1243 PyObject *
_PyInterpreterState_GetIDObject(PyInterpreterState * interp)1244 _PyInterpreterState_GetIDObject(PyInterpreterState *interp)
1245 {
1246 if (_PyInterpreterState_IDInitref(interp) != 0) {
1247 return NULL;
1248 };
1249 int64_t interpid = interp->id;
1250 if (interpid < 0) {
1251 return NULL;
1252 }
1253 assert(interpid < LLONG_MAX);
1254 return PyLong_FromLongLong(interpid);
1255 }
1256
1257
1258 int
_PyInterpreterState_IDInitref(PyInterpreterState * interp)1259 _PyInterpreterState_IDInitref(PyInterpreterState *interp)
1260 {
1261 if (interp->id_mutex != NULL) {
1262 return 0;
1263 }
1264 interp->id_mutex = PyThread_allocate_lock();
1265 if (interp->id_mutex == NULL) {
1266 PyErr_SetString(PyExc_RuntimeError,
1267 "failed to create init interpreter ID mutex");
1268 return -1;
1269 }
1270 interp->id_refcount = 0;
1271 return 0;
1272 }
1273
1274
1275 int
_PyInterpreterState_IDIncref(PyInterpreterState * interp)1276 _PyInterpreterState_IDIncref(PyInterpreterState *interp)
1277 {
1278 if (_PyInterpreterState_IDInitref(interp) < 0) {
1279 return -1;
1280 }
1281
1282 PyThread_acquire_lock(interp->id_mutex, WAIT_LOCK);
1283 interp->id_refcount += 1;
1284 PyThread_release_lock(interp->id_mutex);
1285 return 0;
1286 }
1287
1288
1289 void
_PyInterpreterState_IDDecref(PyInterpreterState * interp)1290 _PyInterpreterState_IDDecref(PyInterpreterState *interp)
1291 {
1292 assert(interp->id_mutex != NULL);
1293 _PyRuntimeState *runtime = interp->runtime;
1294
1295 PyThread_acquire_lock(interp->id_mutex, WAIT_LOCK);
1296 assert(interp->id_refcount != 0);
1297 interp->id_refcount -= 1;
1298 int64_t refcount = interp->id_refcount;
1299 PyThread_release_lock(interp->id_mutex);
1300
1301 if (refcount == 0 && interp->requires_idref) {
1302 PyThreadState *tstate =
1303 _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_FINI);
1304
1305 // XXX Possible GILState issues?
1306 PyThreadState *save_tstate = _PyThreadState_Swap(runtime, tstate);
1307 Py_EndInterpreter(tstate);
1308 _PyThreadState_Swap(runtime, save_tstate);
1309 }
1310 }
1311
1312 int
_PyInterpreterState_RequiresIDRef(PyInterpreterState * interp)1313 _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
1314 {
1315 return interp->requires_idref;
1316 }
1317
1318 void
_PyInterpreterState_RequireIDRef(PyInterpreterState * interp,int required)1319 _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
1320 {
1321 interp->requires_idref = required ? 1 : 0;
1322 }
1323
1324
1325 //-----------------------------
1326 // look up an interpreter state
1327 //-----------------------------
1328
1329 /* Return the interpreter associated with the current OS thread.
1330
1331 The GIL must be held.
1332 */
1333
1334 PyInterpreterState*
PyInterpreterState_Get(void)1335 PyInterpreterState_Get(void)
1336 {
1337 PyThreadState *tstate = current_fast_get();
1338 _Py_EnsureTstateNotNULL(tstate);
1339 PyInterpreterState *interp = tstate->interp;
1340 if (interp == NULL) {
1341 Py_FatalError("no current interpreter");
1342 }
1343 return interp;
1344 }
1345
1346
1347 static PyInterpreterState *
interp_look_up_id(_PyRuntimeState * runtime,int64_t requested_id)1348 interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
1349 {
1350 PyInterpreterState *interp = runtime->interpreters.head;
1351 while (interp != NULL) {
1352 int64_t id = PyInterpreterState_GetID(interp);
1353 if (id < 0) {
1354 return NULL;
1355 }
1356 if (requested_id == id) {
1357 return interp;
1358 }
1359 interp = PyInterpreterState_Next(interp);
1360 }
1361 return NULL;
1362 }
1363
1364 /* Return the interpreter state with the given ID.
1365
1366 Fail with RuntimeError if the interpreter is not found. */
1367
1368 PyInterpreterState *
_PyInterpreterState_LookUpID(int64_t requested_id)1369 _PyInterpreterState_LookUpID(int64_t requested_id)
1370 {
1371 PyInterpreterState *interp = NULL;
1372 if (requested_id >= 0) {
1373 _PyRuntimeState *runtime = &_PyRuntime;
1374 HEAD_LOCK(runtime);
1375 interp = interp_look_up_id(runtime, requested_id);
1376 HEAD_UNLOCK(runtime);
1377 }
1378 if (interp == NULL && !PyErr_Occurred()) {
1379 PyErr_Format(PyExc_InterpreterNotFoundError,
1380 "unrecognized interpreter ID %lld", requested_id);
1381 }
1382 return interp;
1383 }
1384
1385 PyInterpreterState *
_PyInterpreterState_LookUpIDObject(PyObject * requested_id)1386 _PyInterpreterState_LookUpIDObject(PyObject *requested_id)
1387 {
1388 int64_t id = _PyInterpreterState_ObjectToID(requested_id);
1389 if (id < 0) {
1390 return NULL;
1391 }
1392 return _PyInterpreterState_LookUpID(id);
1393 }
1394
1395
1396 /********************************/
1397 /* the per-thread runtime state */
1398 /********************************/
1399
1400 #ifndef NDEBUG
1401 static inline int
tstate_is_alive(PyThreadState * tstate)1402 tstate_is_alive(PyThreadState *tstate)
1403 {
1404 return (tstate->_status.initialized &&
1405 !tstate->_status.finalized &&
1406 !tstate->_status.cleared &&
1407 !tstate->_status.finalizing);
1408 }
1409 #endif
1410
1411
1412 //----------
1413 // lifecycle
1414 //----------
1415
1416 /* Minimum size of data stack chunk */
1417 #define DATA_STACK_CHUNK_SIZE (16*1024)
1418
1419 static _PyStackChunk*
allocate_chunk(int size_in_bytes,_PyStackChunk * previous)1420 allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
1421 {
1422 assert(size_in_bytes % sizeof(PyObject **) == 0);
1423 _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
1424 if (res == NULL) {
1425 return NULL;
1426 }
1427 res->previous = previous;
1428 res->size = size_in_bytes;
1429 res->top = 0;
1430 return res;
1431 }
1432
1433 static void
reset_threadstate(_PyThreadStateImpl * tstate)1434 reset_threadstate(_PyThreadStateImpl *tstate)
1435 {
1436 // Set to _PyThreadState_INIT directly?
1437 memcpy(tstate,
1438 &initial._main_interpreter._initial_thread,
1439 sizeof(*tstate));
1440 }
1441
1442 static _PyThreadStateImpl *
alloc_threadstate(PyInterpreterState * interp)1443 alloc_threadstate(PyInterpreterState *interp)
1444 {
1445 _PyThreadStateImpl *tstate;
1446
1447 // Try the preallocated tstate first.
1448 tstate = _Py_atomic_exchange_ptr(&interp->threads_preallocated, NULL);
1449
1450 // Fall back to the allocator.
1451 if (tstate == NULL) {
1452 tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
1453 if (tstate == NULL) {
1454 return NULL;
1455 }
1456 reset_threadstate(tstate);
1457 }
1458 return tstate;
1459 }
1460
1461 static void
free_threadstate(_PyThreadStateImpl * tstate)1462 free_threadstate(_PyThreadStateImpl *tstate)
1463 {
1464 PyInterpreterState *interp = tstate->base.interp;
1465 // The initial thread state of the interpreter is allocated
1466 // as part of the interpreter state so should not be freed.
1467 if (tstate == &interp->_initial_thread) {
1468 // Make it available again.
1469 reset_threadstate(tstate);
1470 assert(interp->threads_preallocated == NULL);
1471 _Py_atomic_store_ptr(&interp->threads_preallocated, tstate);
1472 }
1473 else {
1474 PyMem_RawFree(tstate);
1475 }
1476 }
1477
1478 /* Get the thread state to a minimal consistent state.
1479 Further init happens in pylifecycle.c before it can be used.
1480 All fields not initialized here are expected to be zeroed out,
1481 e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
1482 The interpreter state is not manipulated. Instead it is assumed that
1483 the thread is getting added to the interpreter.
1484 */
1485
1486 static void
init_threadstate(_PyThreadStateImpl * _tstate,PyInterpreterState * interp,uint64_t id,int whence)1487 init_threadstate(_PyThreadStateImpl *_tstate,
1488 PyInterpreterState *interp, uint64_t id, int whence)
1489 {
1490 PyThreadState *tstate = (PyThreadState *)_tstate;
1491 if (tstate->_status.initialized) {
1492 Py_FatalError("thread state already initialized");
1493 }
1494
1495 assert(interp != NULL);
1496 tstate->interp = interp;
1497 tstate->eval_breaker =
1498 _Py_atomic_load_uintptr_relaxed(&interp->ceval.instrumentation_version);
1499
1500 // next/prev are set in add_threadstate().
1501 assert(tstate->next == NULL);
1502 assert(tstate->prev == NULL);
1503
1504 assert(tstate->_whence == _PyThreadState_WHENCE_NOTSET);
1505 assert(whence >= 0 && whence <= _PyThreadState_WHENCE_EXEC);
1506 tstate->_whence = whence;
1507
1508 assert(id > 0);
1509 tstate->id = id;
1510
1511 // thread_id and native_thread_id are set in bind_tstate().
1512
1513 tstate->py_recursion_limit = interp->ceval.recursion_limit,
1514 tstate->py_recursion_remaining = interp->ceval.recursion_limit,
1515 tstate->c_recursion_remaining = Py_C_RECURSION_LIMIT;
1516
1517 tstate->exc_info = &tstate->exc_state;
1518
1519 // PyGILState_Release must not try to delete this thread state.
1520 // This is cleared when PyGILState_Ensure() creates the thread state.
1521 tstate->gilstate_counter = 1;
1522
1523 tstate->current_frame = NULL;
1524 tstate->datastack_chunk = NULL;
1525 tstate->datastack_top = NULL;
1526 tstate->datastack_limit = NULL;
1527 tstate->what_event = -1;
1528 tstate->previous_executor = NULL;
1529 tstate->dict_global_version = 0;
1530
1531 _tstate->asyncio_running_loop = NULL;
1532
1533 tstate->delete_later = NULL;
1534
1535 llist_init(&_tstate->mem_free_queue);
1536
1537 if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
1538 // Start in the suspended state if there is an ongoing stop-the-world.
1539 tstate->state = _Py_THREAD_SUSPENDED;
1540 }
1541
1542 tstate->_status.initialized = 1;
1543 }
1544
1545 static void
add_threadstate(PyInterpreterState * interp,PyThreadState * tstate,PyThreadState * next)1546 add_threadstate(PyInterpreterState *interp, PyThreadState *tstate,
1547 PyThreadState *next)
1548 {
1549 assert(interp->threads.head != tstate);
1550 if (next != NULL) {
1551 assert(next->prev == NULL || next->prev == tstate);
1552 next->prev = tstate;
1553 }
1554 tstate->next = next;
1555 assert(tstate->prev == NULL);
1556 interp->threads.head = tstate;
1557 }
1558
1559 static PyThreadState *
new_threadstate(PyInterpreterState * interp,int whence)1560 new_threadstate(PyInterpreterState *interp, int whence)
1561 {
1562 // Allocate the thread state.
1563 _PyThreadStateImpl *tstate = alloc_threadstate(interp);
1564 if (tstate == NULL) {
1565 return NULL;
1566 }
1567
1568 #ifdef Py_GIL_DISABLED
1569 Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp);
1570 if (qsbr_idx < 0) {
1571 free_threadstate(tstate);
1572 return NULL;
1573 }
1574 #endif
1575
1576 /* We serialize concurrent creation to protect global state. */
1577 HEAD_LOCK(interp->runtime);
1578
1579 // Initialize the new thread state.
1580 interp->threads.next_unique_id += 1;
1581 uint64_t id = interp->threads.next_unique_id;
1582 init_threadstate(tstate, interp, id, whence);
1583
1584 // Add the new thread state to the interpreter.
1585 PyThreadState *old_head = interp->threads.head;
1586 add_threadstate(interp, (PyThreadState *)tstate, old_head);
1587
1588 HEAD_UNLOCK(interp->runtime);
1589 #ifdef Py_GIL_DISABLED
1590 if (id == 1) {
1591 if (_Py_atomic_load_int(&interp->gc.immortalize) == 0) {
1592 // Immortalize objects marked as using deferred reference counting
1593 // the first time a non-main thread is created.
1594 _PyGC_ImmortalizeDeferredObjects(interp);
1595 }
1596 }
1597 #endif
1598
1599 #ifdef Py_GIL_DISABLED
1600 // Must be called with lock unlocked to avoid lock ordering deadlocks.
1601 _Py_qsbr_register(tstate, interp, qsbr_idx);
1602 #endif
1603
1604 return (PyThreadState *)tstate;
1605 }
1606
1607 PyThreadState *
PyThreadState_New(PyInterpreterState * interp)1608 PyThreadState_New(PyInterpreterState *interp)
1609 {
1610 return _PyThreadState_NewBound(interp, _PyThreadState_WHENCE_UNKNOWN);
1611 }
1612
1613 PyThreadState *
_PyThreadState_NewBound(PyInterpreterState * interp,int whence)1614 _PyThreadState_NewBound(PyInterpreterState *interp, int whence)
1615 {
1616 PyThreadState *tstate = new_threadstate(interp, whence);
1617 if (tstate) {
1618 bind_tstate(tstate);
1619 // This makes sure there's a gilstate tstate bound
1620 // as soon as possible.
1621 if (gilstate_tss_get(tstate->interp->runtime) == NULL) {
1622 bind_gilstate_tstate(tstate);
1623 }
1624 }
1625 return tstate;
1626 }
1627
1628 // This must be followed by a call to _PyThreadState_Bind();
1629 PyThreadState *
_PyThreadState_New(PyInterpreterState * interp,int whence)1630 _PyThreadState_New(PyInterpreterState *interp, int whence)
1631 {
1632 return new_threadstate(interp, whence);
1633 }
1634
1635 // We keep this for stable ABI compabibility.
1636 PyAPI_FUNC(PyThreadState*)
_PyThreadState_Prealloc(PyInterpreterState * interp)1637 _PyThreadState_Prealloc(PyInterpreterState *interp)
1638 {
1639 return _PyThreadState_New(interp, _PyThreadState_WHENCE_UNKNOWN);
1640 }
1641
1642 // We keep this around for (accidental) stable ABI compatibility.
1643 // Realistically, no extensions are using it.
1644 PyAPI_FUNC(void)
_PyThreadState_Init(PyThreadState * tstate)1645 _PyThreadState_Init(PyThreadState *tstate)
1646 {
1647 Py_FatalError("_PyThreadState_Init() is for internal use only");
1648 }
1649
1650
1651 static void
clear_datastack(PyThreadState * tstate)1652 clear_datastack(PyThreadState *tstate)
1653 {
1654 _PyStackChunk *chunk = tstate->datastack_chunk;
1655 tstate->datastack_chunk = NULL;
1656 while (chunk != NULL) {
1657 _PyStackChunk *prev = chunk->previous;
1658 _PyObject_VirtualFree(chunk, chunk->size);
1659 chunk = prev;
1660 }
1661 }
1662
1663 void
PyThreadState_Clear(PyThreadState * tstate)1664 PyThreadState_Clear(PyThreadState *tstate)
1665 {
1666 assert(tstate->_status.initialized && !tstate->_status.cleared);
1667 assert(current_fast_get()->interp == tstate->interp);
1668 assert(!_PyThreadState_IsRunningMain(tstate));
1669 // XXX assert(!tstate->_status.bound || tstate->_status.unbound);
1670 tstate->_status.finalizing = 1; // just in case
1671
1672 /* XXX Conditions we need to enforce:
1673
1674 * the GIL must be held by the current thread
1675 * current_fast_get()->interp must match tstate->interp
1676 * for the main interpreter, current_fast_get() must be the main thread
1677 */
1678
1679 int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1680
1681 if (verbose && tstate->current_frame != NULL) {
1682 /* bpo-20526: After the main thread calls
1683 _PyInterpreterState_SetFinalizing() in Py_FinalizeEx()
1684 (or in Py_EndInterpreter() for subinterpreters),
1685 threads must exit when trying to take the GIL.
1686 If a thread exit in the middle of _PyEval_EvalFrameDefault(),
1687 tstate->frame is not reset to its previous value.
1688 It is more likely with daemon threads, but it can happen
1689 with regular threads if threading._shutdown() fails
1690 (ex: interrupted by CTRL+C). */
1691 fprintf(stderr,
1692 "PyThreadState_Clear: warning: thread still has a frame\n");
1693 }
1694
1695 /* At this point tstate shouldn't be used any more,
1696 neither to run Python code nor for other uses.
1697
1698 This is tricky when current_fast_get() == tstate, in the same way
1699 as noted in interpreter_clear() above. The below finalizers
1700 can possibly run Python code or otherwise use the partially
1701 cleared thread state. For now we trust that isn't a problem
1702 in practice.
1703 */
1704 // XXX Deal with the possibility of problematic finalizers.
1705
1706 /* Don't clear tstate->pyframe: it is a borrowed reference */
1707
1708 Py_CLEAR(tstate->threading_local_key);
1709 Py_CLEAR(tstate->threading_local_sentinel);
1710
1711 Py_CLEAR(((_PyThreadStateImpl *)tstate)->asyncio_running_loop);
1712
1713 Py_CLEAR(tstate->dict);
1714 Py_CLEAR(tstate->async_exc);
1715
1716 Py_CLEAR(tstate->current_exception);
1717
1718 Py_CLEAR(tstate->exc_state.exc_value);
1719
1720 /* The stack of exception states should contain just this thread. */
1721 if (verbose && tstate->exc_info != &tstate->exc_state) {
1722 fprintf(stderr,
1723 "PyThreadState_Clear: warning: thread still has a generator\n");
1724 }
1725
1726 if (tstate->c_profilefunc != NULL) {
1727 tstate->interp->sys_profiling_threads--;
1728 tstate->c_profilefunc = NULL;
1729 }
1730 if (tstate->c_tracefunc != NULL) {
1731 tstate->interp->sys_tracing_threads--;
1732 tstate->c_tracefunc = NULL;
1733 }
1734 Py_CLEAR(tstate->c_profileobj);
1735 Py_CLEAR(tstate->c_traceobj);
1736
1737 Py_CLEAR(tstate->async_gen_firstiter);
1738 Py_CLEAR(tstate->async_gen_finalizer);
1739
1740 Py_CLEAR(tstate->context);
1741
1742 #ifdef Py_GIL_DISABLED
1743 // Each thread should clear own freelists in free-threading builds.
1744 struct _Py_object_freelists *freelists = _Py_object_freelists_GET();
1745 _PyObject_ClearFreeLists(freelists, 1);
1746
1747 // Remove ourself from the biased reference counting table of threads.
1748 _Py_brc_remove_thread(tstate);
1749 #endif
1750
1751 // Merge our queue of pointers to be freed into the interpreter queue.
1752 _PyMem_AbandonDelayed(tstate);
1753
1754 _PyThreadState_ClearMimallocHeaps(tstate);
1755
1756 tstate->_status.cleared = 1;
1757
1758 // XXX Call _PyThreadStateSwap(runtime, NULL) here if "current".
1759 // XXX Do it as early in the function as possible.
1760 }
1761
1762 static void
1763 decrement_stoptheworld_countdown(struct _stoptheworld_state *stw);
1764
1765 /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1766 static void
tstate_delete_common(PyThreadState * tstate,int release_gil)1767 tstate_delete_common(PyThreadState *tstate, int release_gil)
1768 {
1769 assert(tstate->_status.cleared && !tstate->_status.finalized);
1770 tstate_verify_not_active(tstate);
1771 assert(!_PyThreadState_IsRunningMain(tstate));
1772
1773 PyInterpreterState *interp = tstate->interp;
1774 if (interp == NULL) {
1775 Py_FatalError("NULL interpreter");
1776 }
1777 _PyRuntimeState *runtime = interp->runtime;
1778
1779 HEAD_LOCK(runtime);
1780 if (tstate->prev) {
1781 tstate->prev->next = tstate->next;
1782 }
1783 else {
1784 interp->threads.head = tstate->next;
1785 }
1786 if (tstate->next) {
1787 tstate->next->prev = tstate->prev;
1788 }
1789 if (tstate->state != _Py_THREAD_SUSPENDED) {
1790 // Any ongoing stop-the-world request should not wait for us because
1791 // our thread is getting deleted.
1792 if (interp->stoptheworld.requested) {
1793 decrement_stoptheworld_countdown(&interp->stoptheworld);
1794 }
1795 if (runtime->stoptheworld.requested) {
1796 decrement_stoptheworld_countdown(&runtime->stoptheworld);
1797 }
1798 }
1799
1800 #if defined(Py_REF_DEBUG) && defined(Py_GIL_DISABLED)
1801 // Add our portion of the total refcount to the interpreter's total.
1802 _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
1803 tstate->interp->object_state.reftotal += tstate_impl->reftotal;
1804 tstate_impl->reftotal = 0;
1805 #endif
1806
1807 HEAD_UNLOCK(runtime);
1808
1809 // XXX Unbind in PyThreadState_Clear(), or earlier
1810 // (and assert not-equal here)?
1811 if (tstate->_status.bound_gilstate) {
1812 unbind_gilstate_tstate(tstate);
1813 }
1814 if (tstate->_status.bound) {
1815 unbind_tstate(tstate);
1816 }
1817
1818 // XXX Move to PyThreadState_Clear()?
1819 clear_datastack(tstate);
1820
1821 if (release_gil) {
1822 _PyEval_ReleaseLock(tstate->interp, tstate, 1);
1823 }
1824
1825 #ifdef Py_GIL_DISABLED
1826 _Py_qsbr_unregister(tstate);
1827 #endif
1828
1829 tstate->_status.finalized = 1;
1830 }
1831
1832 static void
zapthreads(PyInterpreterState * interp)1833 zapthreads(PyInterpreterState *interp)
1834 {
1835 PyThreadState *tstate;
1836 /* No need to lock the mutex here because this should only happen
1837 when the threads are all really dead (XXX famous last words). */
1838 while ((tstate = interp->threads.head) != NULL) {
1839 tstate_verify_not_active(tstate);
1840 tstate_delete_common(tstate, 0);
1841 free_threadstate((_PyThreadStateImpl *)tstate);
1842 }
1843 }
1844
1845
1846 void
PyThreadState_Delete(PyThreadState * tstate)1847 PyThreadState_Delete(PyThreadState *tstate)
1848 {
1849 _Py_EnsureTstateNotNULL(tstate);
1850 tstate_verify_not_active(tstate);
1851 tstate_delete_common(tstate, 0);
1852 free_threadstate((_PyThreadStateImpl *)tstate);
1853 }
1854
1855
1856 void
_PyThreadState_DeleteCurrent(PyThreadState * tstate)1857 _PyThreadState_DeleteCurrent(PyThreadState *tstate)
1858 {
1859 _Py_EnsureTstateNotNULL(tstate);
1860 #ifdef Py_GIL_DISABLED
1861 _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
1862 #endif
1863 current_fast_clear(tstate->interp->runtime);
1864 tstate_delete_common(tstate, 1); // release GIL as part of call
1865 free_threadstate((_PyThreadStateImpl *)tstate);
1866 }
1867
1868 void
PyThreadState_DeleteCurrent(void)1869 PyThreadState_DeleteCurrent(void)
1870 {
1871 PyThreadState *tstate = current_fast_get();
1872 _PyThreadState_DeleteCurrent(tstate);
1873 }
1874
1875
1876 // Unlinks and removes all thread states from `tstate->interp`, with the
1877 // exception of the one passed as an argument. However, it does not delete
1878 // these thread states. Instead, it returns the removed thread states as a
1879 // linked list.
1880 //
1881 // Note that if there is a current thread state, it *must* be the one
1882 // passed as argument. Also, this won't touch any interpreters other
1883 // than the current one, since we don't know which thread state should
1884 // be kept in those other interpreters.
1885 PyThreadState *
_PyThreadState_RemoveExcept(PyThreadState * tstate)1886 _PyThreadState_RemoveExcept(PyThreadState *tstate)
1887 {
1888 assert(tstate != NULL);
1889 PyInterpreterState *interp = tstate->interp;
1890 _PyRuntimeState *runtime = interp->runtime;
1891
1892 #ifdef Py_GIL_DISABLED
1893 assert(runtime->stoptheworld.world_stopped);
1894 #endif
1895
1896 HEAD_LOCK(runtime);
1897 /* Remove all thread states, except tstate, from the linked list of
1898 thread states. */
1899 PyThreadState *list = interp->threads.head;
1900 if (list == tstate) {
1901 list = tstate->next;
1902 }
1903 if (tstate->prev) {
1904 tstate->prev->next = tstate->next;
1905 }
1906 if (tstate->next) {
1907 tstate->next->prev = tstate->prev;
1908 }
1909 tstate->prev = tstate->next = NULL;
1910 interp->threads.head = tstate;
1911 HEAD_UNLOCK(runtime);
1912
1913 return list;
1914 }
1915
1916 // Deletes the thread states in the linked list `list`.
1917 //
1918 // This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
1919 void
_PyThreadState_DeleteList(PyThreadState * list)1920 _PyThreadState_DeleteList(PyThreadState *list)
1921 {
1922 // The world can't be stopped because we PyThreadState_Clear() can
1923 // call destructors.
1924 assert(!_PyRuntime.stoptheworld.world_stopped);
1925
1926 PyThreadState *p, *next;
1927 for (p = list; p; p = next) {
1928 next = p->next;
1929 PyThreadState_Clear(p);
1930 free_threadstate((_PyThreadStateImpl *)p);
1931 }
1932 }
1933
1934
1935 //----------
1936 // accessors
1937 //----------
1938
1939 /* An extension mechanism to store arbitrary additional per-thread state.
1940 PyThreadState_GetDict() returns a dictionary that can be used to hold such
1941 state; the caller should pick a unique key and store its state there. If
1942 PyThreadState_GetDict() returns NULL, an exception has *not* been raised
1943 and the caller should assume no per-thread state is available. */
1944
1945 PyObject *
_PyThreadState_GetDict(PyThreadState * tstate)1946 _PyThreadState_GetDict(PyThreadState *tstate)
1947 {
1948 assert(tstate != NULL);
1949 if (tstate->dict == NULL) {
1950 tstate->dict = PyDict_New();
1951 if (tstate->dict == NULL) {
1952 _PyErr_Clear(tstate);
1953 }
1954 }
1955 return tstate->dict;
1956 }
1957
1958
1959 PyObject *
PyThreadState_GetDict(void)1960 PyThreadState_GetDict(void)
1961 {
1962 PyThreadState *tstate = current_fast_get();
1963 if (tstate == NULL) {
1964 return NULL;
1965 }
1966 return _PyThreadState_GetDict(tstate);
1967 }
1968
1969
1970 PyInterpreterState *
PyThreadState_GetInterpreter(PyThreadState * tstate)1971 PyThreadState_GetInterpreter(PyThreadState *tstate)
1972 {
1973 assert(tstate != NULL);
1974 return tstate->interp;
1975 }
1976
1977
1978 PyFrameObject*
PyThreadState_GetFrame(PyThreadState * tstate)1979 PyThreadState_GetFrame(PyThreadState *tstate)
1980 {
1981 assert(tstate != NULL);
1982 _PyInterpreterFrame *f = _PyThreadState_GetFrame(tstate);
1983 if (f == NULL) {
1984 return NULL;
1985 }
1986 PyFrameObject *frame = _PyFrame_GetFrameObject(f);
1987 if (frame == NULL) {
1988 PyErr_Clear();
1989 }
1990 return (PyFrameObject*)Py_XNewRef(frame);
1991 }
1992
1993
1994 uint64_t
PyThreadState_GetID(PyThreadState * tstate)1995 PyThreadState_GetID(PyThreadState *tstate)
1996 {
1997 assert(tstate != NULL);
1998 return tstate->id;
1999 }
2000
2001
2002 static inline void
tstate_activate(PyThreadState * tstate)2003 tstate_activate(PyThreadState *tstate)
2004 {
2005 assert(tstate != NULL);
2006 // XXX assert(tstate_is_alive(tstate));
2007 assert(tstate_is_bound(tstate));
2008 assert(!tstate->_status.active);
2009
2010 assert(!tstate->_status.bound_gilstate ||
2011 tstate == gilstate_tss_get((tstate->interp->runtime)));
2012 if (!tstate->_status.bound_gilstate) {
2013 bind_gilstate_tstate(tstate);
2014 }
2015
2016 tstate->_status.active = 1;
2017 }
2018
2019 static inline void
tstate_deactivate(PyThreadState * tstate)2020 tstate_deactivate(PyThreadState *tstate)
2021 {
2022 assert(tstate != NULL);
2023 // XXX assert(tstate_is_alive(tstate));
2024 assert(tstate_is_bound(tstate));
2025 assert(tstate->_status.active);
2026
2027 tstate->_status.active = 0;
2028
2029 // We do not unbind the gilstate tstate here.
2030 // It will still be used in PyGILState_Ensure().
2031 }
2032
2033 static int
tstate_try_attach(PyThreadState * tstate)2034 tstate_try_attach(PyThreadState *tstate)
2035 {
2036 #ifdef Py_GIL_DISABLED
2037 int expected = _Py_THREAD_DETACHED;
2038 return _Py_atomic_compare_exchange_int(&tstate->state,
2039 &expected,
2040 _Py_THREAD_ATTACHED);
2041 #else
2042 assert(tstate->state == _Py_THREAD_DETACHED);
2043 tstate->state = _Py_THREAD_ATTACHED;
2044 return 1;
2045 #endif
2046 }
2047
2048 static void
tstate_set_detached(PyThreadState * tstate,int detached_state)2049 tstate_set_detached(PyThreadState *tstate, int detached_state)
2050 {
2051 assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2052 #ifdef Py_GIL_DISABLED
2053 _Py_atomic_store_int(&tstate->state, detached_state);
2054 #else
2055 tstate->state = detached_state;
2056 #endif
2057 }
2058
2059 static void
tstate_wait_attach(PyThreadState * tstate)2060 tstate_wait_attach(PyThreadState *tstate)
2061 {
2062 do {
2063 int expected = _Py_THREAD_SUSPENDED;
2064
2065 // Wait until we're switched out of SUSPENDED to DETACHED.
2066 _PyParkingLot_Park(&tstate->state, &expected, sizeof(tstate->state),
2067 /*timeout=*/-1, NULL, /*detach=*/0);
2068
2069 // Once we're back in DETACHED we can re-attach
2070 } while (!tstate_try_attach(tstate));
2071 }
2072
2073 void
_PyThreadState_Attach(PyThreadState * tstate)2074 _PyThreadState_Attach(PyThreadState *tstate)
2075 {
2076 #if defined(Py_DEBUG)
2077 // This is called from PyEval_RestoreThread(). Similar
2078 // to it, we need to ensure errno doesn't change.
2079 int err = errno;
2080 #endif
2081
2082 _Py_EnsureTstateNotNULL(tstate);
2083 if (current_fast_get() != NULL) {
2084 Py_FatalError("non-NULL old thread state");
2085 }
2086
2087
2088 while (1) {
2089 _PyEval_AcquireLock(tstate);
2090
2091 // XXX assert(tstate_is_alive(tstate));
2092 current_fast_set(&_PyRuntime, tstate);
2093 tstate_activate(tstate);
2094
2095 if (!tstate_try_attach(tstate)) {
2096 tstate_wait_attach(tstate);
2097 }
2098
2099 #ifdef Py_GIL_DISABLED
2100 if (_PyEval_IsGILEnabled(tstate) && !tstate->_status.holds_gil) {
2101 // The GIL was enabled between our call to _PyEval_AcquireLock()
2102 // and when we attached (the GIL can't go from enabled to disabled
2103 // here because only a thread holding the GIL can disable
2104 // it). Detach and try again.
2105 tstate_set_detached(tstate, _Py_THREAD_DETACHED);
2106 tstate_deactivate(tstate);
2107 current_fast_clear(&_PyRuntime);
2108 continue;
2109 }
2110 _Py_qsbr_attach(((_PyThreadStateImpl *)tstate)->qsbr);
2111 #endif
2112 break;
2113 }
2114
2115 // Resume previous critical section. This acquires the lock(s) from the
2116 // top-most critical section.
2117 if (tstate->critical_section != 0) {
2118 _PyCriticalSection_Resume(tstate);
2119 }
2120
2121 #if defined(Py_DEBUG)
2122 errno = err;
2123 #endif
2124 }
2125
2126 static void
detach_thread(PyThreadState * tstate,int detached_state)2127 detach_thread(PyThreadState *tstate, int detached_state)
2128 {
2129 // XXX assert(tstate_is_alive(tstate) && tstate_is_bound(tstate));
2130 assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2131 assert(tstate == current_fast_get());
2132 if (tstate->critical_section != 0) {
2133 _PyCriticalSection_SuspendAll(tstate);
2134 }
2135 #ifdef Py_GIL_DISABLED
2136 _Py_qsbr_detach(((_PyThreadStateImpl *)tstate)->qsbr);
2137 #endif
2138 tstate_deactivate(tstate);
2139 tstate_set_detached(tstate, detached_state);
2140 current_fast_clear(&_PyRuntime);
2141 _PyEval_ReleaseLock(tstate->interp, tstate, 0);
2142 }
2143
2144 void
_PyThreadState_Detach(PyThreadState * tstate)2145 _PyThreadState_Detach(PyThreadState *tstate)
2146 {
2147 detach_thread(tstate, _Py_THREAD_DETACHED);
2148 }
2149
2150 void
_PyThreadState_Suspend(PyThreadState * tstate)2151 _PyThreadState_Suspend(PyThreadState *tstate)
2152 {
2153 _PyRuntimeState *runtime = &_PyRuntime;
2154
2155 assert(_Py_atomic_load_int_relaxed(&tstate->state) == _Py_THREAD_ATTACHED);
2156
2157 struct _stoptheworld_state *stw = NULL;
2158 HEAD_LOCK(runtime);
2159 if (runtime->stoptheworld.requested) {
2160 stw = &runtime->stoptheworld;
2161 }
2162 else if (tstate->interp->stoptheworld.requested) {
2163 stw = &tstate->interp->stoptheworld;
2164 }
2165 HEAD_UNLOCK(runtime);
2166
2167 if (stw == NULL) {
2168 // Switch directly to "detached" if there is no active stop-the-world
2169 // request.
2170 detach_thread(tstate, _Py_THREAD_DETACHED);
2171 return;
2172 }
2173
2174 // Switch to "suspended" state.
2175 detach_thread(tstate, _Py_THREAD_SUSPENDED);
2176
2177 // Decrease the count of remaining threads needing to park.
2178 HEAD_LOCK(runtime);
2179 decrement_stoptheworld_countdown(stw);
2180 HEAD_UNLOCK(runtime);
2181 }
2182
2183 // Decrease stop-the-world counter of remaining number of threads that need to
2184 // pause. If we are the final thread to pause, notify the requesting thread.
2185 static void
decrement_stoptheworld_countdown(struct _stoptheworld_state * stw)2186 decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
2187 {
2188 assert(stw->thread_countdown > 0);
2189 if (--stw->thread_countdown == 0) {
2190 _PyEvent_Notify(&stw->stop_event);
2191 }
2192 }
2193
2194 #ifdef Py_GIL_DISABLED
2195 // Interpreter for _Py_FOR_EACH_THREAD(). For global stop-the-world events,
2196 // we start with the first interpreter and then iterate over all interpreters.
2197 // For per-interpreter stop-the-world events, we only operate on the one
2198 // interpreter.
2199 static PyInterpreterState *
interp_for_stop_the_world(struct _stoptheworld_state * stw)2200 interp_for_stop_the_world(struct _stoptheworld_state *stw)
2201 {
2202 return (stw->is_global
2203 ? PyInterpreterState_Head()
2204 : _Py_CONTAINER_OF(stw, PyInterpreterState, stoptheworld));
2205 }
2206
2207 // Loops over threads for a stop-the-world event.
2208 // For global: all threads in all interpreters
2209 // For per-interpreter: all threads in the interpreter
2210 #define _Py_FOR_EACH_THREAD(stw, i, t) \
2211 for (i = interp_for_stop_the_world((stw)); \
2212 i != NULL; i = ((stw->is_global) ? i->next : NULL)) \
2213 for (t = i->threads.head; t; t = t->next)
2214
2215
2216 // Try to transition threads atomically from the "detached" state to the
2217 // "gc stopped" state. Returns true if all threads are in the "gc stopped"
2218 static bool
park_detached_threads(struct _stoptheworld_state * stw)2219 park_detached_threads(struct _stoptheworld_state *stw)
2220 {
2221 int num_parked = 0;
2222 PyInterpreterState *i;
2223 PyThreadState *t;
2224 _Py_FOR_EACH_THREAD(stw, i, t) {
2225 int state = _Py_atomic_load_int_relaxed(&t->state);
2226 if (state == _Py_THREAD_DETACHED) {
2227 // Atomically transition to "suspended" if in "detached" state.
2228 if (_Py_atomic_compare_exchange_int(&t->state,
2229 &state, _Py_THREAD_SUSPENDED)) {
2230 num_parked++;
2231 }
2232 }
2233 else if (state == _Py_THREAD_ATTACHED && t != stw->requester) {
2234 _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
2235 }
2236 }
2237 stw->thread_countdown -= num_parked;
2238 assert(stw->thread_countdown >= 0);
2239 return num_parked > 0 && stw->thread_countdown == 0;
2240 }
2241
2242 static void
stop_the_world(struct _stoptheworld_state * stw)2243 stop_the_world(struct _stoptheworld_state *stw)
2244 {
2245 _PyRuntimeState *runtime = &_PyRuntime;
2246
2247 PyMutex_Lock(&stw->mutex);
2248 if (stw->is_global) {
2249 _PyRWMutex_Lock(&runtime->stoptheworld_mutex);
2250 }
2251 else {
2252 _PyRWMutex_RLock(&runtime->stoptheworld_mutex);
2253 }
2254
2255 HEAD_LOCK(runtime);
2256 stw->requested = 1;
2257 stw->thread_countdown = 0;
2258 stw->stop_event = (PyEvent){0}; // zero-initialize (unset)
2259 stw->requester = _PyThreadState_GET(); // may be NULL
2260
2261 PyInterpreterState *i;
2262 PyThreadState *t;
2263 _Py_FOR_EACH_THREAD(stw, i, t) {
2264 if (t != stw->requester) {
2265 // Count all the other threads (we don't wait on ourself).
2266 stw->thread_countdown++;
2267 }
2268 }
2269
2270 if (stw->thread_countdown == 0) {
2271 HEAD_UNLOCK(runtime);
2272 stw->world_stopped = 1;
2273 return;
2274 }
2275
2276 for (;;) {
2277 // Switch threads that are detached to the GC stopped state
2278 bool stopped_all_threads = park_detached_threads(stw);
2279 HEAD_UNLOCK(runtime);
2280
2281 if (stopped_all_threads) {
2282 break;
2283 }
2284
2285 PyTime_t wait_ns = 1000*1000; // 1ms (arbitrary, may need tuning)
2286 int detach = 0;
2287 if (PyEvent_WaitTimed(&stw->stop_event, wait_ns, detach)) {
2288 assert(stw->thread_countdown == 0);
2289 break;
2290 }
2291
2292 HEAD_LOCK(runtime);
2293 }
2294 stw->world_stopped = 1;
2295 }
2296
2297 static void
start_the_world(struct _stoptheworld_state * stw)2298 start_the_world(struct _stoptheworld_state *stw)
2299 {
2300 _PyRuntimeState *runtime = &_PyRuntime;
2301 assert(PyMutex_IsLocked(&stw->mutex));
2302
2303 HEAD_LOCK(runtime);
2304 stw->requested = 0;
2305 stw->world_stopped = 0;
2306 // Switch threads back to the detached state.
2307 PyInterpreterState *i;
2308 PyThreadState *t;
2309 _Py_FOR_EACH_THREAD(stw, i, t) {
2310 if (t != stw->requester) {
2311 assert(_Py_atomic_load_int_relaxed(&t->state) ==
2312 _Py_THREAD_SUSPENDED);
2313 _Py_atomic_store_int(&t->state, _Py_THREAD_DETACHED);
2314 _PyParkingLot_UnparkAll(&t->state);
2315 }
2316 }
2317 stw->requester = NULL;
2318 HEAD_UNLOCK(runtime);
2319 if (stw->is_global) {
2320 _PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
2321 }
2322 else {
2323 _PyRWMutex_RUnlock(&runtime->stoptheworld_mutex);
2324 }
2325 PyMutex_Unlock(&stw->mutex);
2326 }
2327 #endif // Py_GIL_DISABLED
2328
2329 void
_PyEval_StopTheWorldAll(_PyRuntimeState * runtime)2330 _PyEval_StopTheWorldAll(_PyRuntimeState *runtime)
2331 {
2332 #ifdef Py_GIL_DISABLED
2333 stop_the_world(&runtime->stoptheworld);
2334 #endif
2335 }
2336
2337 void
_PyEval_StartTheWorldAll(_PyRuntimeState * runtime)2338 _PyEval_StartTheWorldAll(_PyRuntimeState *runtime)
2339 {
2340 #ifdef Py_GIL_DISABLED
2341 start_the_world(&runtime->stoptheworld);
2342 #endif
2343 }
2344
2345 void
_PyEval_StopTheWorld(PyInterpreterState * interp)2346 _PyEval_StopTheWorld(PyInterpreterState *interp)
2347 {
2348 #ifdef Py_GIL_DISABLED
2349 stop_the_world(&interp->stoptheworld);
2350 #endif
2351 }
2352
2353 void
_PyEval_StartTheWorld(PyInterpreterState * interp)2354 _PyEval_StartTheWorld(PyInterpreterState *interp)
2355 {
2356 #ifdef Py_GIL_DISABLED
2357 start_the_world(&interp->stoptheworld);
2358 #endif
2359 }
2360
2361 //----------
2362 // other API
2363 //----------
2364
2365 /* Asynchronously raise an exception in a thread.
2366 Requested by Just van Rossum and Alex Martelli.
2367 To prevent naive misuse, you must write your own extension
2368 to call this, or use ctypes. Must be called with the GIL held.
2369 Returns the number of tstates modified (normally 1, but 0 if `id` didn't
2370 match any known thread id). Can be called with exc=NULL to clear an
2371 existing async exception. This raises no exceptions. */
2372
2373 // XXX Move this to Python/ceval_gil.c?
2374 // XXX Deprecate this.
2375 int
PyThreadState_SetAsyncExc(unsigned long id,PyObject * exc)2376 PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
2377 {
2378 _PyRuntimeState *runtime = &_PyRuntime;
2379 PyInterpreterState *interp = _PyInterpreterState_GET();
2380
2381 /* Although the GIL is held, a few C API functions can be called
2382 * without the GIL held, and in particular some that create and
2383 * destroy thread and interpreter states. Those can mutate the
2384 * list of thread states we're traversing, so to prevent that we lock
2385 * head_mutex for the duration.
2386 */
2387 HEAD_LOCK(runtime);
2388 for (PyThreadState *tstate = interp->threads.head; tstate != NULL; tstate = tstate->next) {
2389 if (tstate->thread_id != id) {
2390 continue;
2391 }
2392
2393 /* Tricky: we need to decref the current value
2394 * (if any) in tstate->async_exc, but that can in turn
2395 * allow arbitrary Python code to run, including
2396 * perhaps calls to this function. To prevent
2397 * deadlock, we need to release head_mutex before
2398 * the decref.
2399 */
2400 Py_XINCREF(exc);
2401 PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
2402 HEAD_UNLOCK(runtime);
2403
2404 Py_XDECREF(old_exc);
2405 _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
2406 return 1;
2407 }
2408 HEAD_UNLOCK(runtime);
2409 return 0;
2410 }
2411
2412 //---------------------------------
2413 // API for the current thread state
2414 //---------------------------------
2415
2416 PyThreadState *
PyThreadState_GetUnchecked(void)2417 PyThreadState_GetUnchecked(void)
2418 {
2419 return current_fast_get();
2420 }
2421
2422
2423 PyThreadState *
PyThreadState_Get(void)2424 PyThreadState_Get(void)
2425 {
2426 PyThreadState *tstate = current_fast_get();
2427 _Py_EnsureTstateNotNULL(tstate);
2428 return tstate;
2429 }
2430
2431 PyThreadState *
_PyThreadState_Swap(_PyRuntimeState * runtime,PyThreadState * newts)2432 _PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts)
2433 {
2434 PyThreadState *oldts = current_fast_get();
2435 if (oldts != NULL) {
2436 _PyThreadState_Detach(oldts);
2437 }
2438 if (newts != NULL) {
2439 _PyThreadState_Attach(newts);
2440 }
2441 return oldts;
2442 }
2443
2444 PyThreadState *
PyThreadState_Swap(PyThreadState * newts)2445 PyThreadState_Swap(PyThreadState *newts)
2446 {
2447 return _PyThreadState_Swap(&_PyRuntime, newts);
2448 }
2449
2450
2451 void
_PyThreadState_Bind(PyThreadState * tstate)2452 _PyThreadState_Bind(PyThreadState *tstate)
2453 {
2454 // gh-104690: If Python is being finalized and PyInterpreterState_Delete()
2455 // was called, tstate becomes a dangling pointer.
2456 assert(_PyThreadState_CheckConsistency(tstate));
2457
2458 bind_tstate(tstate);
2459 // This makes sure there's a gilstate tstate bound
2460 // as soon as possible.
2461 if (gilstate_tss_get(tstate->interp->runtime) == NULL) {
2462 bind_gilstate_tstate(tstate);
2463 }
2464 }
2465
2466 #if defined(Py_GIL_DISABLED) && !defined(Py_LIMITED_API)
2467 uintptr_t
_Py_GetThreadLocal_Addr(void)2468 _Py_GetThreadLocal_Addr(void)
2469 {
2470 #ifdef HAVE_THREAD_LOCAL
2471 // gh-112535: Use the address of the thread-local PyThreadState variable as
2472 // a unique identifier for the current thread. Each thread has a unique
2473 // _Py_tss_tstate variable with a unique address.
2474 return (uintptr_t)&_Py_tss_tstate;
2475 #else
2476 # error "no supported thread-local variable storage classifier"
2477 #endif
2478 }
2479 #endif
2480
2481 /***********************************/
2482 /* routines for advanced debuggers */
2483 /***********************************/
2484
2485 // (requested by David Beazley)
2486 // Don't use unless you know what you are doing!
2487
2488 PyInterpreterState *
PyInterpreterState_Head(void)2489 PyInterpreterState_Head(void)
2490 {
2491 return _PyRuntime.interpreters.head;
2492 }
2493
2494 PyInterpreterState *
PyInterpreterState_Main(void)2495 PyInterpreterState_Main(void)
2496 {
2497 return _PyInterpreterState_Main();
2498 }
2499
2500 PyInterpreterState *
PyInterpreterState_Next(PyInterpreterState * interp)2501 PyInterpreterState_Next(PyInterpreterState *interp) {
2502 return interp->next;
2503 }
2504
2505 PyThreadState *
PyInterpreterState_ThreadHead(PyInterpreterState * interp)2506 PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
2507 return interp->threads.head;
2508 }
2509
2510 PyThreadState *
PyThreadState_Next(PyThreadState * tstate)2511 PyThreadState_Next(PyThreadState *tstate) {
2512 return tstate->next;
2513 }
2514
2515
2516 /********************************************/
2517 /* reporting execution state of all threads */
2518 /********************************************/
2519
2520 /* The implementation of sys._current_frames(). This is intended to be
2521 called with the GIL held, as it will be when called via
2522 sys._current_frames(). It's possible it would work fine even without
2523 the GIL held, but haven't thought enough about that.
2524 */
2525 PyObject *
_PyThread_CurrentFrames(void)2526 _PyThread_CurrentFrames(void)
2527 {
2528 _PyRuntimeState *runtime = &_PyRuntime;
2529 PyThreadState *tstate = current_fast_get();
2530 if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
2531 return NULL;
2532 }
2533
2534 PyObject *result = PyDict_New();
2535 if (result == NULL) {
2536 return NULL;
2537 }
2538
2539 /* for i in all interpreters:
2540 * for t in all of i's thread states:
2541 * if t's frame isn't NULL, map t's id to its frame
2542 * Because these lists can mutate even when the GIL is held, we
2543 * need to grab head_mutex for the duration.
2544 */
2545 _PyEval_StopTheWorldAll(runtime);
2546 HEAD_LOCK(runtime);
2547 PyInterpreterState *i;
2548 for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2549 PyThreadState *t;
2550 for (t = i->threads.head; t != NULL; t = t->next) {
2551 _PyInterpreterFrame *frame = t->current_frame;
2552 frame = _PyFrame_GetFirstComplete(frame);
2553 if (frame == NULL) {
2554 continue;
2555 }
2556 PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2557 if (id == NULL) {
2558 goto fail;
2559 }
2560 PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
2561 if (frameobj == NULL) {
2562 Py_DECREF(id);
2563 goto fail;
2564 }
2565 int stat = PyDict_SetItem(result, id, frameobj);
2566 Py_DECREF(id);
2567 if (stat < 0) {
2568 goto fail;
2569 }
2570 }
2571 }
2572 goto done;
2573
2574 fail:
2575 Py_CLEAR(result);
2576
2577 done:
2578 HEAD_UNLOCK(runtime);
2579 _PyEval_StartTheWorldAll(runtime);
2580 return result;
2581 }
2582
2583 /* The implementation of sys._current_exceptions(). This is intended to be
2584 called with the GIL held, as it will be when called via
2585 sys._current_exceptions(). It's possible it would work fine even without
2586 the GIL held, but haven't thought enough about that.
2587 */
2588 PyObject *
_PyThread_CurrentExceptions(void)2589 _PyThread_CurrentExceptions(void)
2590 {
2591 _PyRuntimeState *runtime = &_PyRuntime;
2592 PyThreadState *tstate = current_fast_get();
2593
2594 _Py_EnsureTstateNotNULL(tstate);
2595
2596 if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
2597 return NULL;
2598 }
2599
2600 PyObject *result = PyDict_New();
2601 if (result == NULL) {
2602 return NULL;
2603 }
2604
2605 /* for i in all interpreters:
2606 * for t in all of i's thread states:
2607 * if t's frame isn't NULL, map t's id to its frame
2608 * Because these lists can mutate even when the GIL is held, we
2609 * need to grab head_mutex for the duration.
2610 */
2611 _PyEval_StopTheWorldAll(runtime);
2612 HEAD_LOCK(runtime);
2613 PyInterpreterState *i;
2614 for (i = runtime->interpreters.head; i != NULL; i = i->next) {
2615 PyThreadState *t;
2616 for (t = i->threads.head; t != NULL; t = t->next) {
2617 _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
2618 if (err_info == NULL) {
2619 continue;
2620 }
2621 PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
2622 if (id == NULL) {
2623 goto fail;
2624 }
2625 PyObject *exc = err_info->exc_value;
2626 assert(exc == NULL ||
2627 exc == Py_None ||
2628 PyExceptionInstance_Check(exc));
2629
2630 int stat = PyDict_SetItem(result, id, exc == NULL ? Py_None : exc);
2631 Py_DECREF(id);
2632 if (stat < 0) {
2633 goto fail;
2634 }
2635 }
2636 }
2637 goto done;
2638
2639 fail:
2640 Py_CLEAR(result);
2641
2642 done:
2643 HEAD_UNLOCK(runtime);
2644 _PyEval_StartTheWorldAll(runtime);
2645 return result;
2646 }
2647
2648
2649 /***********************************/
2650 /* Python "auto thread state" API. */
2651 /***********************************/
2652
2653 /* Internal initialization/finalization functions called by
2654 Py_Initialize/Py_FinalizeEx
2655 */
2656 PyStatus
_PyGILState_Init(PyInterpreterState * interp)2657 _PyGILState_Init(PyInterpreterState *interp)
2658 {
2659 if (!_Py_IsMainInterpreter(interp)) {
2660 /* Currently, PyGILState is shared by all interpreters. The main
2661 * interpreter is responsible to initialize it. */
2662 return _PyStatus_OK();
2663 }
2664 _PyRuntimeState *runtime = interp->runtime;
2665 assert(gilstate_tss_get(runtime) == NULL);
2666 assert(runtime->gilstate.autoInterpreterState == NULL);
2667 runtime->gilstate.autoInterpreterState = interp;
2668 return _PyStatus_OK();
2669 }
2670
2671 void
_PyGILState_Fini(PyInterpreterState * interp)2672 _PyGILState_Fini(PyInterpreterState *interp)
2673 {
2674 if (!_Py_IsMainInterpreter(interp)) {
2675 /* Currently, PyGILState is shared by all interpreters. The main
2676 * interpreter is responsible to initialize it. */
2677 return;
2678 }
2679 interp->runtime->gilstate.autoInterpreterState = NULL;
2680 }
2681
2682
2683 // XXX Drop this.
2684 void
_PyGILState_SetTstate(PyThreadState * tstate)2685 _PyGILState_SetTstate(PyThreadState *tstate)
2686 {
2687 /* must init with valid states */
2688 assert(tstate != NULL);
2689 assert(tstate->interp != NULL);
2690
2691 if (!_Py_IsMainInterpreter(tstate->interp)) {
2692 /* Currently, PyGILState is shared by all interpreters. The main
2693 * interpreter is responsible to initialize it. */
2694 return;
2695 }
2696
2697 #ifndef NDEBUG
2698 _PyRuntimeState *runtime = tstate->interp->runtime;
2699
2700 assert(runtime->gilstate.autoInterpreterState == tstate->interp);
2701 assert(gilstate_tss_get(runtime) == tstate);
2702 assert(tstate->gilstate_counter == 1);
2703 #endif
2704 }
2705
2706 PyInterpreterState *
_PyGILState_GetInterpreterStateUnsafe(void)2707 _PyGILState_GetInterpreterStateUnsafe(void)
2708 {
2709 return _PyRuntime.gilstate.autoInterpreterState;
2710 }
2711
2712 /* The public functions */
2713
2714 PyThreadState *
PyGILState_GetThisThreadState(void)2715 PyGILState_GetThisThreadState(void)
2716 {
2717 _PyRuntimeState *runtime = &_PyRuntime;
2718 if (!gilstate_tss_initialized(runtime)) {
2719 return NULL;
2720 }
2721 return gilstate_tss_get(runtime);
2722 }
2723
2724 int
PyGILState_Check(void)2725 PyGILState_Check(void)
2726 {
2727 _PyRuntimeState *runtime = &_PyRuntime;
2728 if (!runtime->gilstate.check_enabled) {
2729 return 1;
2730 }
2731
2732 if (!gilstate_tss_initialized(runtime)) {
2733 return 1;
2734 }
2735
2736 PyThreadState *tstate = current_fast_get();
2737 if (tstate == NULL) {
2738 return 0;
2739 }
2740
2741 PyThreadState *tcur = gilstate_tss_get(runtime);
2742 return (tstate == tcur);
2743 }
2744
2745 PyGILState_STATE
PyGILState_Ensure(void)2746 PyGILState_Ensure(void)
2747 {
2748 _PyRuntimeState *runtime = &_PyRuntime;
2749
2750 /* Note that we do not auto-init Python here - apart from
2751 potential races with 2 threads auto-initializing, pep-311
2752 spells out other issues. Embedders are expected to have
2753 called Py_Initialize(). */
2754
2755 /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
2756 called by Py_Initialize() */
2757 assert(_PyEval_ThreadsInitialized());
2758 assert(gilstate_tss_initialized(runtime));
2759 assert(runtime->gilstate.autoInterpreterState != NULL);
2760
2761 PyThreadState *tcur = gilstate_tss_get(runtime);
2762 int has_gil;
2763 if (tcur == NULL) {
2764 /* Create a new Python thread state for this thread */
2765 // XXX Use PyInterpreterState_EnsureThreadState()?
2766 tcur = new_threadstate(runtime->gilstate.autoInterpreterState,
2767 _PyThreadState_WHENCE_GILSTATE);
2768 if (tcur == NULL) {
2769 Py_FatalError("Couldn't create thread-state for new thread");
2770 }
2771 bind_tstate(tcur);
2772 bind_gilstate_tstate(tcur);
2773
2774 /* This is our thread state! We'll need to delete it in the
2775 matching call to PyGILState_Release(). */
2776 assert(tcur->gilstate_counter == 1);
2777 tcur->gilstate_counter = 0;
2778 has_gil = 0; /* new thread state is never current */
2779 }
2780 else {
2781 has_gil = holds_gil(tcur);
2782 }
2783
2784 if (!has_gil) {
2785 PyEval_RestoreThread(tcur);
2786 }
2787
2788 /* Update our counter in the thread-state - no need for locks:
2789 - tcur will remain valid as we hold the GIL.
2790 - the counter is safe as we are the only thread "allowed"
2791 to modify this value
2792 */
2793 ++tcur->gilstate_counter;
2794
2795 return has_gil ? PyGILState_LOCKED : PyGILState_UNLOCKED;
2796 }
2797
2798 void
PyGILState_Release(PyGILState_STATE oldstate)2799 PyGILState_Release(PyGILState_STATE oldstate)
2800 {
2801 _PyRuntimeState *runtime = &_PyRuntime;
2802 PyThreadState *tstate = gilstate_tss_get(runtime);
2803 if (tstate == NULL) {
2804 Py_FatalError("auto-releasing thread-state, "
2805 "but no thread-state for this thread");
2806 }
2807
2808 /* We must hold the GIL and have our thread state current */
2809 /* XXX - remove the check - the assert should be fine,
2810 but while this is very new (April 2003), the extra check
2811 by release-only users can't hurt.
2812 */
2813 if (!holds_gil(tstate)) {
2814 _Py_FatalErrorFormat(__func__,
2815 "thread state %p must be current when releasing",
2816 tstate);
2817 }
2818 assert(holds_gil(tstate));
2819 --tstate->gilstate_counter;
2820 assert(tstate->gilstate_counter >= 0); /* illegal counter value */
2821
2822 /* If we're going to destroy this thread-state, we must
2823 * clear it while the GIL is held, as destructors may run.
2824 */
2825 if (tstate->gilstate_counter == 0) {
2826 /* can't have been locked when we created it */
2827 assert(oldstate == PyGILState_UNLOCKED);
2828 // XXX Unbind tstate here.
2829 // gh-119585: `PyThreadState_Clear()` may call destructors that
2830 // themselves use PyGILState_Ensure and PyGILState_Release, so make
2831 // sure that gilstate_counter is not zero when calling it.
2832 ++tstate->gilstate_counter;
2833 PyThreadState_Clear(tstate);
2834 --tstate->gilstate_counter;
2835 /* Delete the thread-state. Note this releases the GIL too!
2836 * It's vital that the GIL be held here, to avoid shutdown
2837 * races; see bugs 225673 and 1061968 (that nasty bug has a
2838 * habit of coming back).
2839 */
2840 assert(tstate->gilstate_counter == 0);
2841 assert(current_fast_get() == tstate);
2842 _PyThreadState_DeleteCurrent(tstate);
2843 }
2844 /* Release the lock if necessary */
2845 else if (oldstate == PyGILState_UNLOCKED) {
2846 PyEval_SaveThread();
2847 }
2848 }
2849
2850
2851 /*************/
2852 /* Other API */
2853 /*************/
2854
2855 _PyFrameEvalFunction
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState * interp)2856 _PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2857 {
2858 if (interp->eval_frame == NULL) {
2859 return _PyEval_EvalFrameDefault;
2860 }
2861 return interp->eval_frame;
2862 }
2863
2864
2865 void
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState * interp,_PyFrameEvalFunction eval_frame)2866 _PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2867 _PyFrameEvalFunction eval_frame)
2868 {
2869 if (eval_frame == _PyEval_EvalFrameDefault) {
2870 eval_frame = NULL;
2871 }
2872 if (eval_frame == interp->eval_frame) {
2873 return;
2874 }
2875 #ifdef _Py_TIER2
2876 if (eval_frame != NULL) {
2877 _Py_Executors_InvalidateAll(interp, 1);
2878 }
2879 #endif
2880 RARE_EVENT_INC(set_eval_frame_func);
2881 interp->eval_frame = eval_frame;
2882 }
2883
2884
2885 const PyConfig*
_PyInterpreterState_GetConfig(PyInterpreterState * interp)2886 _PyInterpreterState_GetConfig(PyInterpreterState *interp)
2887 {
2888 return &interp->config;
2889 }
2890
2891
2892 int
_PyInterpreterState_GetConfigCopy(PyConfig * config)2893 _PyInterpreterState_GetConfigCopy(PyConfig *config)
2894 {
2895 PyInterpreterState *interp = _PyInterpreterState_GET();
2896
2897 PyStatus status = _PyConfig_Copy(config, &interp->config);
2898 if (PyStatus_Exception(status)) {
2899 _PyErr_SetFromPyStatus(status);
2900 return -1;
2901 }
2902 return 0;
2903 }
2904
2905
2906 const PyConfig*
_Py_GetConfig(void)2907 _Py_GetConfig(void)
2908 {
2909 assert(PyGILState_Check());
2910 PyThreadState *tstate = current_fast_get();
2911 _Py_EnsureTstateNotNULL(tstate);
2912 return _PyInterpreterState_GetConfig(tstate->interp);
2913 }
2914
2915
2916 int
_PyInterpreterState_HasFeature(PyInterpreterState * interp,unsigned long feature)2917 _PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature)
2918 {
2919 return ((interp->feature_flags & feature) != 0);
2920 }
2921
2922
2923 #define MINIMUM_OVERHEAD 1000
2924
2925 static PyObject **
push_chunk(PyThreadState * tstate,int size)2926 push_chunk(PyThreadState *tstate, int size)
2927 {
2928 int allocate_size = DATA_STACK_CHUNK_SIZE;
2929 while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
2930 allocate_size *= 2;
2931 }
2932 _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
2933 if (new == NULL) {
2934 return NULL;
2935 }
2936 if (tstate->datastack_chunk) {
2937 tstate->datastack_chunk->top = tstate->datastack_top -
2938 &tstate->datastack_chunk->data[0];
2939 }
2940 tstate->datastack_chunk = new;
2941 tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
2942 // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
2943 // _PyThreadState_PopFrame from freeing it later by "skipping" over the
2944 // first element:
2945 PyObject **res = &new->data[new->previous == NULL];
2946 tstate->datastack_top = res + size;
2947 return res;
2948 }
2949
2950 _PyInterpreterFrame *
_PyThreadState_PushFrame(PyThreadState * tstate,size_t size)2951 _PyThreadState_PushFrame(PyThreadState *tstate, size_t size)
2952 {
2953 assert(size < INT_MAX/sizeof(PyObject *));
2954 if (_PyThreadState_HasStackSpace(tstate, (int)size)) {
2955 _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
2956 tstate->datastack_top += size;
2957 return res;
2958 }
2959 return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
2960 }
2961
2962 void
_PyThreadState_PopFrame(PyThreadState * tstate,_PyInterpreterFrame * frame)2963 _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
2964 {
2965 assert(tstate->datastack_chunk);
2966 PyObject **base = (PyObject **)frame;
2967 if (base == &tstate->datastack_chunk->data[0]) {
2968 _PyStackChunk *chunk = tstate->datastack_chunk;
2969 _PyStackChunk *previous = chunk->previous;
2970 // push_chunk ensures that the root chunk is never popped:
2971 assert(previous);
2972 tstate->datastack_top = &previous->data[previous->top];
2973 tstate->datastack_chunk = previous;
2974 _PyObject_VirtualFree(chunk, chunk->size);
2975 tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
2976 }
2977 else {
2978 assert(tstate->datastack_top);
2979 assert(tstate->datastack_top >= base);
2980 tstate->datastack_top = base;
2981 }
2982 }
2983
2984
2985 #ifndef NDEBUG
2986 // Check that a Python thread state valid. In practice, this function is used
2987 // on a Python debug build to check if 'tstate' is a dangling pointer, if the
2988 // PyThreadState memory has been freed.
2989 //
2990 // Usage:
2991 //
2992 // assert(_PyThreadState_CheckConsistency(tstate));
2993 int
_PyThreadState_CheckConsistency(PyThreadState * tstate)2994 _PyThreadState_CheckConsistency(PyThreadState *tstate)
2995 {
2996 assert(!_PyMem_IsPtrFreed(tstate));
2997 assert(!_PyMem_IsPtrFreed(tstate->interp));
2998 return 1;
2999 }
3000 #endif
3001
3002
3003 // Check if a Python thread must exit immediately, rather than taking the GIL
3004 // if Py_Finalize() has been called.
3005 //
3006 // When this function is called by a daemon thread after Py_Finalize() has been
3007 // called, the GIL does no longer exist.
3008 //
3009 // tstate can be a dangling pointer (point to freed memory): only tstate value
3010 // is used, the pointer is not deferenced.
3011 //
3012 // tstate must be non-NULL.
3013 int
_PyThreadState_MustExit(PyThreadState * tstate)3014 _PyThreadState_MustExit(PyThreadState *tstate)
3015 {
3016 /* bpo-39877: Access _PyRuntime directly rather than using
3017 tstate->interp->runtime to support calls from Python daemon threads.
3018 After Py_Finalize() has been called, tstate can be a dangling pointer:
3019 point to PyThreadState freed memory. */
3020 unsigned long finalizing_id = _PyRuntimeState_GetFinalizingID(&_PyRuntime);
3021 PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
3022 if (finalizing == NULL) {
3023 // XXX This isn't completely safe from daemon thraeds,
3024 // since tstate might be a dangling pointer.
3025 finalizing = _PyInterpreterState_GetFinalizing(tstate->interp);
3026 finalizing_id = _PyInterpreterState_GetFinalizingID(tstate->interp);
3027 }
3028 // XXX else check &_PyRuntime._main_interpreter._initial_thread
3029 if (finalizing == NULL) {
3030 return 0;
3031 }
3032 else if (finalizing == tstate) {
3033 return 0;
3034 }
3035 else if (finalizing_id == PyThread_get_thread_ident()) {
3036 /* gh-109793: we must have switched interpreters. */
3037 return 0;
3038 }
3039 return 1;
3040 }
3041
3042 /********************/
3043 /* mimalloc support */
3044 /********************/
3045
3046 static void
tstate_mimalloc_bind(PyThreadState * tstate)3047 tstate_mimalloc_bind(PyThreadState *tstate)
3048 {
3049 #ifdef Py_GIL_DISABLED
3050 struct _mimalloc_thread_state *mts = &((_PyThreadStateImpl*)tstate)->mimalloc;
3051
3052 // Initialize the mimalloc thread state. This must be called from the
3053 // same thread that will use the thread state. The "mem" heap doubles as
3054 // the "backing" heap.
3055 mi_tld_t *tld = &mts->tld;
3056 _mi_tld_init(tld, &mts->heaps[_Py_MIMALLOC_HEAP_MEM]);
3057 llist_init(&mts->page_list);
3058
3059 // Exiting threads push any remaining in-use segments to the abandoned
3060 // pool to be re-claimed later by other threads. We use per-interpreter
3061 // pools to keep Python objects from different interpreters separate.
3062 tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
3063
3064 // Don't fill in the first N bytes up to ob_type in debug builds. We may
3065 // access ob_tid and the refcount fields in the dict and list lock-less
3066 // accesses, so they must remain valid for a while after deallocation.
3067 size_t base_offset = offsetof(PyObject, ob_type);
3068 if (_PyMem_DebugEnabled()) {
3069 // The debug allocator adds two words at the beginning of each block.
3070 base_offset += 2 * sizeof(size_t);
3071 }
3072 size_t debug_offsets[_Py_MIMALLOC_HEAP_COUNT] = {
3073 [_Py_MIMALLOC_HEAP_OBJECT] = base_offset,
3074 [_Py_MIMALLOC_HEAP_GC] = base_offset,
3075 [_Py_MIMALLOC_HEAP_GC_PRE] = base_offset + 2 * sizeof(PyObject *),
3076 };
3077
3078 // Initialize each heap
3079 for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3080 _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
3081 mts->heaps[i].debug_offset = (uint8_t)debug_offsets[i];
3082 }
3083
3084 // Heaps that store Python objects should use QSBR to delay freeing
3085 // mimalloc pages while there may be concurrent lock-free readers.
3086 mts->heaps[_Py_MIMALLOC_HEAP_OBJECT].page_use_qsbr = true;
3087 mts->heaps[_Py_MIMALLOC_HEAP_GC].page_use_qsbr = true;
3088 mts->heaps[_Py_MIMALLOC_HEAP_GC_PRE].page_use_qsbr = true;
3089
3090 // By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
3091 // _PyObject_GC_New() and similar functions temporarily override this to
3092 // use one of the GC heaps.
3093 mts->current_object_heap = &mts->heaps[_Py_MIMALLOC_HEAP_OBJECT];
3094
3095 _Py_atomic_store_int(&mts->initialized, 1);
3096 #endif
3097 }
3098
3099 void
_PyThreadState_ClearMimallocHeaps(PyThreadState * tstate)3100 _PyThreadState_ClearMimallocHeaps(PyThreadState *tstate)
3101 {
3102 #ifdef Py_GIL_DISABLED
3103 if (!tstate->_status.bound) {
3104 // The mimalloc heaps are only initialized when the thread is bound.
3105 return;
3106 }
3107
3108 _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
3109 for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
3110 // Abandon all segments in use by this thread. This pushes them to
3111 // a shared pool to later be reclaimed by other threads. It's important
3112 // to do this before the thread state is destroyed so that objects
3113 // remain visible to the GC.
3114 _mi_heap_collect_abandon(&tstate_impl->mimalloc.heaps[i]);
3115 }
3116 #endif
3117 }
3118