1 #ifndef Py_INTERNAL_OBJECT_H
2 #define Py_INTERNAL_OBJECT_H
3 #ifdef __cplusplus
4 extern "C" {
5 #endif
6
7 #ifndef Py_BUILD_CORE
8 # error "this header requires Py_BUILD_CORE define"
9 #endif
10
11 #include <stdbool.h>
12 #include "pycore_gc.h" // _PyObject_GC_IS_TRACKED()
13 #include "pycore_emscripten_trampoline.h" // _PyCFunction_TrampolineCall()
14 #include "pycore_interp.h" // PyInterpreterState.gc
15 #include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_STORE_PTR_RELAXED
16 #include "pycore_pystate.h" // _PyInterpreterState_GET()
17
18
19 #define _Py_IMMORTAL_REFCNT_LOOSE ((_Py_IMMORTAL_REFCNT >> 1) + 1)
20
21 // gh-121528, gh-118997: Similar to _Py_IsImmortal() but be more loose when
22 // comparing the reference count to stay compatible with C extensions built
23 // with the stable ABI 3.11 or older. Such extensions implement INCREF/DECREF
24 // as refcnt++ and refcnt-- without taking in account immortal objects. For
25 // example, the reference count of an immortal object can change from
26 // _Py_IMMORTAL_REFCNT to _Py_IMMORTAL_REFCNT+1 (INCREF) or
27 // _Py_IMMORTAL_REFCNT-1 (DECREF).
28 //
29 // This function should only be used in assertions. Otherwise, _Py_IsImmortal()
30 // must be used instead.
_Py_IsImmortalLoose(PyObject * op)31 static inline int _Py_IsImmortalLoose(PyObject *op)
32 {
33 #if defined(Py_GIL_DISABLED)
34 return _Py_IsImmortal(op);
35 #else
36 return (op->ob_refcnt >= _Py_IMMORTAL_REFCNT_LOOSE);
37 #endif
38 }
39 #define _Py_IsImmortalLoose(op) _Py_IsImmortalLoose(_PyObject_CAST(op))
40
41
42 /* Check if an object is consistent. For example, ensure that the reference
43 counter is greater than or equal to 1, and ensure that ob_type is not NULL.
44
45 Call _PyObject_AssertFailed() if the object is inconsistent.
46
47 If check_content is zero, only check header fields: reduce the overhead.
48
49 The function always return 1. The return value is just here to be able to
50 write:
51
52 assert(_PyObject_CheckConsistency(obj, 1)); */
53 extern int _PyObject_CheckConsistency(PyObject *op, int check_content);
54
55 extern void _PyDebugAllocatorStats(FILE *out, const char *block_name,
56 int num_blocks, size_t sizeof_block);
57
58 extern void _PyObject_DebugTypeStats(FILE *out);
59
60 #ifdef Py_TRACE_REFS
61 // Forget a reference registered by _Py_NewReference(). Function called by
62 // _Py_Dealloc().
63 //
64 // On a free list, the function can be used before modifying an object to
65 // remove the object from traced objects. Then _Py_NewReference() or
66 // _Py_NewReferenceNoTotal() should be called again on the object to trace
67 // it again.
68 extern void _Py_ForgetReference(PyObject *);
69 #endif
70
71 // Export for shared _testinternalcapi extension
72 PyAPI_FUNC(int) _PyObject_IsFreed(PyObject *);
73
74 /* We need to maintain an internal copy of Py{Var}Object_HEAD_INIT to avoid
75 designated initializer conflicts in C++20. If we use the deinition in
76 object.h, we will be mixing designated and non-designated initializers in
77 pycore objects which is forbiddent in C++20. However, if we then use
78 designated initializers in object.h then Extensions without designated break.
79 Furthermore, we can't use designated initializers in Extensions since these
80 are not supported pre-C++20. Thus, keeping an internal copy here is the most
81 backwards compatible solution */
82 #if defined(Py_GIL_DISABLED)
83 #define _PyObject_HEAD_INIT(type) \
84 { \
85 .ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL, \
86 .ob_type = (type) \
87 }
88 #else
89 #define _PyObject_HEAD_INIT(type) \
90 { \
91 .ob_refcnt = _Py_IMMORTAL_REFCNT, \
92 .ob_type = (type) \
93 }
94 #endif
95 #define _PyVarObject_HEAD_INIT(type, size) \
96 { \
97 .ob_base = _PyObject_HEAD_INIT(type), \
98 .ob_size = size \
99 }
100
101 PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalRefcountErrorFunc(
102 const char *func,
103 const char *message);
104
105 #define _Py_FatalRefcountError(message) \
106 _Py_FatalRefcountErrorFunc(__func__, (message))
107
108
109 #ifdef Py_REF_DEBUG
110 /* The symbol is only exposed in the API for the sake of extensions
111 built against the pre-3.12 stable ABI. */
112 PyAPI_DATA(Py_ssize_t) _Py_RefTotal;
113
114 extern void _Py_AddRefTotal(PyThreadState *, Py_ssize_t);
115 extern void _Py_IncRefTotal(PyThreadState *);
116 extern void _Py_DecRefTotal(PyThreadState *);
117
118 # define _Py_DEC_REFTOTAL(interp) \
119 interp->object_state.reftotal--
120 #endif
121
122 // Increment reference count by n
_Py_RefcntAdd(PyObject * op,Py_ssize_t n)123 static inline void _Py_RefcntAdd(PyObject* op, Py_ssize_t n)
124 {
125 if (_Py_IsImmortal(op)) {
126 return;
127 }
128 #ifdef Py_REF_DEBUG
129 _Py_AddRefTotal(_PyThreadState_GET(), n);
130 #endif
131 #if !defined(Py_GIL_DISABLED)
132 op->ob_refcnt += n;
133 #else
134 if (_Py_IsOwnedByCurrentThread(op)) {
135 uint32_t local = op->ob_ref_local;
136 Py_ssize_t refcnt = (Py_ssize_t)local + n;
137 # if PY_SSIZE_T_MAX > UINT32_MAX
138 if (refcnt > (Py_ssize_t)UINT32_MAX) {
139 // Make the object immortal if the 32-bit local reference count
140 // would overflow.
141 refcnt = _Py_IMMORTAL_REFCNT_LOCAL;
142 }
143 # endif
144 _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, (uint32_t)refcnt);
145 }
146 else {
147 _Py_atomic_add_ssize(&op->ob_ref_shared, (n << _Py_REF_SHARED_SHIFT));
148 }
149 #endif
150 }
151 #define _Py_RefcntAdd(op, n) _Py_RefcntAdd(_PyObject_CAST(op), n)
152
153 extern void _Py_SetImmortal(PyObject *op);
154 extern void _Py_SetImmortalUntracked(PyObject *op);
155
156 // Makes an immortal object mortal again with the specified refcnt. Should only
157 // be used during runtime finalization.
_Py_SetMortal(PyObject * op,Py_ssize_t refcnt)158 static inline void _Py_SetMortal(PyObject *op, Py_ssize_t refcnt)
159 {
160 if (op) {
161 assert(_Py_IsImmortalLoose(op));
162 #ifdef Py_GIL_DISABLED
163 op->ob_tid = _Py_UNOWNED_TID;
164 op->ob_ref_local = 0;
165 op->ob_ref_shared = _Py_REF_SHARED(refcnt, _Py_REF_MERGED);
166 #else
167 op->ob_refcnt = refcnt;
168 #endif
169 }
170 }
171
172 /* _Py_ClearImmortal() should only be used during runtime finalization. */
_Py_ClearImmortal(PyObject * op)173 static inline void _Py_ClearImmortal(PyObject *op)
174 {
175 if (op) {
176 _Py_SetMortal(op, 1);
177 Py_DECREF(op);
178 }
179 }
180 #define _Py_ClearImmortal(op) \
181 do { \
182 _Py_ClearImmortal(_PyObject_CAST(op)); \
183 op = NULL; \
184 } while (0)
185
186 // Mark an object as supporting deferred reference counting. This is a no-op
187 // in the default (with GIL) build. Objects that use deferred reference
188 // counting should be tracked by the GC so that they are eventually collected.
189 extern void _PyObject_SetDeferredRefcount(PyObject *op);
190
191 static inline int
_PyObject_HasDeferredRefcount(PyObject * op)192 _PyObject_HasDeferredRefcount(PyObject *op)
193 {
194 #ifdef Py_GIL_DISABLED
195 return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_DEFERRED);
196 #else
197 return 0;
198 #endif
199 }
200
201 #if !defined(Py_GIL_DISABLED)
202 static inline void
_Py_DECREF_SPECIALIZED(PyObject * op,const destructor destruct)203 _Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct)
204 {
205 if (_Py_IsImmortal(op)) {
206 return;
207 }
208 _Py_DECREF_STAT_INC();
209 #ifdef Py_REF_DEBUG
210 _Py_DEC_REFTOTAL(PyInterpreterState_Get());
211 #endif
212 if (--op->ob_refcnt != 0) {
213 assert(op->ob_refcnt > 0);
214 }
215 else {
216 #ifdef Py_TRACE_REFS
217 _Py_ForgetReference(op);
218 #endif
219 struct _reftracer_runtime_state *tracer = &_PyRuntime.ref_tracer;
220 if (tracer->tracer_func != NULL) {
221 void* data = tracer->tracer_data;
222 tracer->tracer_func(op, PyRefTracer_DESTROY, data);
223 }
224 destruct(op);
225 }
226 }
227
228 static inline void
_Py_DECREF_NO_DEALLOC(PyObject * op)229 _Py_DECREF_NO_DEALLOC(PyObject *op)
230 {
231 if (_Py_IsImmortal(op)) {
232 return;
233 }
234 _Py_DECREF_STAT_INC();
235 #ifdef Py_REF_DEBUG
236 _Py_DEC_REFTOTAL(PyInterpreterState_Get());
237 #endif
238 op->ob_refcnt--;
239 #ifdef Py_DEBUG
240 if (op->ob_refcnt <= 0) {
241 _Py_FatalRefcountError("Expected a positive remaining refcount");
242 }
243 #endif
244 }
245
246 #else
247 // TODO: implement Py_DECREF specializations for Py_GIL_DISABLED build
248 static inline void
_Py_DECREF_SPECIALIZED(PyObject * op,const destructor destruct)249 _Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct)
250 {
251 Py_DECREF(op);
252 }
253
254 static inline void
_Py_DECREF_NO_DEALLOC(PyObject * op)255 _Py_DECREF_NO_DEALLOC(PyObject *op)
256 {
257 Py_DECREF(op);
258 }
259
260 static inline int
_Py_REF_IS_MERGED(Py_ssize_t ob_ref_shared)261 _Py_REF_IS_MERGED(Py_ssize_t ob_ref_shared)
262 {
263 return (ob_ref_shared & _Py_REF_SHARED_FLAG_MASK) == _Py_REF_MERGED;
264 }
265
266 static inline int
_Py_REF_IS_QUEUED(Py_ssize_t ob_ref_shared)267 _Py_REF_IS_QUEUED(Py_ssize_t ob_ref_shared)
268 {
269 return (ob_ref_shared & _Py_REF_SHARED_FLAG_MASK) == _Py_REF_QUEUED;
270 }
271
272 // Merge the local and shared reference count fields and add `extra` to the
273 // refcount when merging.
274 Py_ssize_t _Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra);
275 #endif // !defined(Py_GIL_DISABLED)
276
277 #ifdef Py_REF_DEBUG
278 # undef _Py_DEC_REFTOTAL
279 #endif
280
281
282 extern int _PyType_CheckConsistency(PyTypeObject *type);
283 extern int _PyDict_CheckConsistency(PyObject *mp, int check_content);
284
285 /* Update the Python traceback of an object. This function must be called
286 when a memory block is reused from a free list.
287
288 Internal function called by _Py_NewReference(). */
289 extern int _PyTraceMalloc_TraceRef(PyObject *op, PyRefTracerEvent event, void*);
290
291 // Fast inlined version of PyType_HasFeature()
292 static inline int
_PyType_HasFeature(PyTypeObject * type,unsigned long feature)293 _PyType_HasFeature(PyTypeObject *type, unsigned long feature) {
294 return ((FT_ATOMIC_LOAD_ULONG_RELAXED(type->tp_flags) & feature) != 0);
295 }
296
297 extern void _PyType_InitCache(PyInterpreterState *interp);
298
299 extern PyStatus _PyObject_InitState(PyInterpreterState *interp);
300 extern void _PyObject_FiniState(PyInterpreterState *interp);
301 extern bool _PyRefchain_IsTraced(PyInterpreterState *interp, PyObject *obj);
302
303 /* Inline functions trading binary compatibility for speed:
304 _PyObject_Init() is the fast version of PyObject_Init(), and
305 _PyObject_InitVar() is the fast version of PyObject_InitVar().
306
307 These inline functions must not be called with op=NULL. */
308 static inline void
_PyObject_Init(PyObject * op,PyTypeObject * typeobj)309 _PyObject_Init(PyObject *op, PyTypeObject *typeobj)
310 {
311 assert(op != NULL);
312 Py_SET_TYPE(op, typeobj);
313 assert(_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE) || _Py_IsImmortalLoose(typeobj));
314 Py_INCREF(typeobj);
315 _Py_NewReference(op);
316 }
317
318 static inline void
_PyObject_InitVar(PyVarObject * op,PyTypeObject * typeobj,Py_ssize_t size)319 _PyObject_InitVar(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
320 {
321 assert(op != NULL);
322 assert(typeobj != &PyLong_Type);
323 _PyObject_Init((PyObject *)op, typeobj);
324 Py_SET_SIZE(op, size);
325 }
326
327
328 /* Tell the GC to track this object.
329 *
330 * The object must not be tracked by the GC.
331 *
332 * NB: While the object is tracked by the collector, it must be safe to call the
333 * ob_traverse method.
334 *
335 * Internal note: interp->gc.generation0->_gc_prev doesn't have any bit flags
336 * because it's not object header. So we don't use _PyGCHead_PREV() and
337 * _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
338 *
339 * See also the public PyObject_GC_Track() function.
340 */
_PyObject_GC_TRACK(const char * filename,int lineno,PyObject * op)341 static inline void _PyObject_GC_TRACK(
342 // The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
343 #ifndef NDEBUG
344 const char *filename, int lineno,
345 #endif
346 PyObject *op)
347 {
348 _PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
349 "object already tracked by the garbage collector",
350 filename, lineno, __func__);
351 #ifdef Py_GIL_DISABLED
352 _PyObject_SET_GC_BITS(op, _PyGC_BITS_TRACKED);
353 #else
354 PyGC_Head *gc = _Py_AS_GC(op);
355 _PyObject_ASSERT_FROM(op,
356 (gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
357 "object is in generation which is garbage collected",
358 filename, lineno, __func__);
359
360 PyInterpreterState *interp = _PyInterpreterState_GET();
361 PyGC_Head *generation0 = interp->gc.generation0;
362 PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
363 _PyGCHead_SET_NEXT(last, gc);
364 _PyGCHead_SET_PREV(gc, last);
365 _PyGCHead_SET_NEXT(gc, generation0);
366 generation0->_gc_prev = (uintptr_t)gc;
367 #endif
368 }
369
370 /* Tell the GC to stop tracking this object.
371 *
372 * Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
373 * must be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
374 *
375 * The object must be tracked by the GC.
376 *
377 * See also the public PyObject_GC_UnTrack() which accept an object which is
378 * not tracked.
379 */
_PyObject_GC_UNTRACK(const char * filename,int lineno,PyObject * op)380 static inline void _PyObject_GC_UNTRACK(
381 // The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
382 #ifndef NDEBUG
383 const char *filename, int lineno,
384 #endif
385 PyObject *op)
386 {
387 _PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
388 "object not tracked by the garbage collector",
389 filename, lineno, __func__);
390
391 #ifdef Py_GIL_DISABLED
392 _PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_TRACKED);
393 #else
394 PyGC_Head *gc = _Py_AS_GC(op);
395 PyGC_Head *prev = _PyGCHead_PREV(gc);
396 PyGC_Head *next = _PyGCHead_NEXT(gc);
397 _PyGCHead_SET_NEXT(prev, next);
398 _PyGCHead_SET_PREV(next, prev);
399 gc->_gc_next = 0;
400 gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
401 #endif
402 }
403
404 // Macros to accept any type for the parameter, and to automatically pass
405 // the filename and the filename (if NDEBUG is not defined) where the macro
406 // is called.
407 #ifdef NDEBUG
408 # define _PyObject_GC_TRACK(op) \
409 _PyObject_GC_TRACK(_PyObject_CAST(op))
410 # define _PyObject_GC_UNTRACK(op) \
411 _PyObject_GC_UNTRACK(_PyObject_CAST(op))
412 #else
413 # define _PyObject_GC_TRACK(op) \
414 _PyObject_GC_TRACK(__FILE__, __LINE__, _PyObject_CAST(op))
415 # define _PyObject_GC_UNTRACK(op) \
416 _PyObject_GC_UNTRACK(__FILE__, __LINE__, _PyObject_CAST(op))
417 #endif
418
419 #ifdef Py_GIL_DISABLED
420
421 /* Tries to increment an object's reference count
422 *
423 * This is a specialized version of _Py_TryIncref that only succeeds if the
424 * object is immortal or local to this thread. It does not handle the case
425 * where the reference count modification requires an atomic operation. This
426 * allows call sites to specialize for the immortal/local case.
427 */
428 static inline int
_Py_TryIncrefFast(PyObject * op)429 _Py_TryIncrefFast(PyObject *op) {
430 uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local);
431 local += 1;
432 if (local == 0) {
433 // immortal
434 return 1;
435 }
436 if (_Py_IsOwnedByCurrentThread(op)) {
437 _Py_INCREF_STAT_INC();
438 _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, local);
439 #ifdef Py_REF_DEBUG
440 _Py_IncRefTotal(_PyThreadState_GET());
441 #endif
442 return 1;
443 }
444 return 0;
445 }
446
447 static inline int
_Py_TryIncRefShared(PyObject * op)448 _Py_TryIncRefShared(PyObject *op)
449 {
450 Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
451 for (;;) {
452 // If the shared refcount is zero and the object is either merged
453 // or may not have weak references, then we cannot incref it.
454 if (shared == 0 || shared == _Py_REF_MERGED) {
455 return 0;
456 }
457
458 if (_Py_atomic_compare_exchange_ssize(
459 &op->ob_ref_shared,
460 &shared,
461 shared + (1 << _Py_REF_SHARED_SHIFT))) {
462 #ifdef Py_REF_DEBUG
463 _Py_IncRefTotal(_PyThreadState_GET());
464 #endif
465 _Py_INCREF_STAT_INC();
466 return 1;
467 }
468 }
469 }
470
471 /* Tries to incref the object op and ensures that *src still points to it. */
472 static inline int
_Py_TryIncrefCompare(PyObject ** src,PyObject * op)473 _Py_TryIncrefCompare(PyObject **src, PyObject *op)
474 {
475 if (_Py_TryIncrefFast(op)) {
476 return 1;
477 }
478 if (!_Py_TryIncRefShared(op)) {
479 return 0;
480 }
481 if (op != _Py_atomic_load_ptr(src)) {
482 Py_DECREF(op);
483 return 0;
484 }
485 return 1;
486 }
487
488 /* Loads and increfs an object from ptr, which may contain a NULL value.
489 Safe with concurrent (atomic) updates to ptr.
490 NOTE: The writer must set maybe-weakref on the stored object! */
491 static inline PyObject *
_Py_XGetRef(PyObject ** ptr)492 _Py_XGetRef(PyObject **ptr)
493 {
494 for (;;) {
495 PyObject *value = _Py_atomic_load_ptr(ptr);
496 if (value == NULL) {
497 return value;
498 }
499 if (_Py_TryIncrefCompare(ptr, value)) {
500 return value;
501 }
502 }
503 }
504
505 /* Attempts to loads and increfs an object from ptr. Returns NULL
506 on failure, which may be due to a NULL value or a concurrent update. */
507 static inline PyObject *
_Py_TryXGetRef(PyObject ** ptr)508 _Py_TryXGetRef(PyObject **ptr)
509 {
510 PyObject *value = _Py_atomic_load_ptr(ptr);
511 if (value == NULL) {
512 return value;
513 }
514 if (_Py_TryIncrefCompare(ptr, value)) {
515 return value;
516 }
517 return NULL;
518 }
519
520 /* Like Py_NewRef but also optimistically sets _Py_REF_MAYBE_WEAKREF
521 on objects owned by a different thread. */
522 static inline PyObject *
_Py_NewRefWithLock(PyObject * op)523 _Py_NewRefWithLock(PyObject *op)
524 {
525 if (_Py_TryIncrefFast(op)) {
526 return op;
527 }
528 #ifdef Py_REF_DEBUG
529 _Py_IncRefTotal(_PyThreadState_GET());
530 #endif
531 _Py_INCREF_STAT_INC();
532 for (;;) {
533 Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
534 Py_ssize_t new_shared = shared + (1 << _Py_REF_SHARED_SHIFT);
535 if ((shared & _Py_REF_SHARED_FLAG_MASK) == 0) {
536 new_shared |= _Py_REF_MAYBE_WEAKREF;
537 }
538 if (_Py_atomic_compare_exchange_ssize(
539 &op->ob_ref_shared,
540 &shared,
541 new_shared)) {
542 return op;
543 }
544 }
545 }
546
547 static inline PyObject *
_Py_XNewRefWithLock(PyObject * obj)548 _Py_XNewRefWithLock(PyObject *obj)
549 {
550 if (obj == NULL) {
551 return NULL;
552 }
553 return _Py_NewRefWithLock(obj);
554 }
555
556 static inline void
_PyObject_SetMaybeWeakref(PyObject * op)557 _PyObject_SetMaybeWeakref(PyObject *op)
558 {
559 if (_Py_IsImmortal(op)) {
560 return;
561 }
562 for (;;) {
563 Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
564 if ((shared & _Py_REF_SHARED_FLAG_MASK) != 0) {
565 // Nothing to do if it's in WEAKREFS, QUEUED, or MERGED states.
566 return;
567 }
568 if (_Py_atomic_compare_exchange_ssize(
569 &op->ob_ref_shared, &shared, shared | _Py_REF_MAYBE_WEAKREF)) {
570 return;
571 }
572 }
573 }
574
575 #endif
576
577 /* Tries to incref op and returns 1 if successful or 0 otherwise. */
578 static inline int
_Py_TryIncref(PyObject * op)579 _Py_TryIncref(PyObject *op)
580 {
581 #ifdef Py_GIL_DISABLED
582 return _Py_TryIncrefFast(op) || _Py_TryIncRefShared(op);
583 #else
584 if (Py_REFCNT(op) > 0) {
585 Py_INCREF(op);
586 return 1;
587 }
588 return 0;
589 #endif
590 }
591
592 #ifdef Py_REF_DEBUG
593 extern void _PyInterpreterState_FinalizeRefTotal(PyInterpreterState *);
594 extern void _Py_FinalizeRefTotal(_PyRuntimeState *);
595 extern void _PyDebug_PrintTotalRefs(void);
596 #endif
597
598 #ifdef Py_TRACE_REFS
599 extern void _Py_AddToAllObjects(PyObject *op);
600 extern void _Py_PrintReferences(PyInterpreterState *, FILE *);
601 extern void _Py_PrintReferenceAddresses(PyInterpreterState *, FILE *);
602 #endif
603
604
605 /* Return the *address* of the object's weaklist. The address may be
606 * dereferenced to get the current head of the weaklist. This is useful
607 * for iterating over the linked list of weakrefs, especially when the
608 * list is being modified externally (e.g. refs getting removed).
609 *
610 * The returned pointer should not be used to change the head of the list
611 * nor should it be used to add, remove, or swap any refs in the list.
612 * That is the sole responsibility of the code in weakrefobject.c.
613 */
614 static inline PyObject **
_PyObject_GET_WEAKREFS_LISTPTR(PyObject * op)615 _PyObject_GET_WEAKREFS_LISTPTR(PyObject *op)
616 {
617 if (PyType_Check(op) &&
618 ((PyTypeObject *)op)->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) {
619 PyInterpreterState *interp = _PyInterpreterState_GET();
620 managed_static_type_state *state = _PyStaticType_GetState(
621 interp, (PyTypeObject *)op);
622 return _PyStaticType_GET_WEAKREFS_LISTPTR(state);
623 }
624 // Essentially _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET():
625 Py_ssize_t offset = Py_TYPE(op)->tp_weaklistoffset;
626 return (PyObject **)((char *)op + offset);
627 }
628
629 /* This is a special case of _PyObject_GET_WEAKREFS_LISTPTR().
630 * Only the most fundamental lookup path is used.
631 * Consequently, static types should not be used.
632 *
633 * For static builtin types the returned pointer will always point
634 * to a NULL tp_weaklist. This is fine for any deallocation cases,
635 * since static types are never deallocated and static builtin types
636 * are only finalized at the end of runtime finalization.
637 *
638 * If the weaklist for static types is actually needed then use
639 * _PyObject_GET_WEAKREFS_LISTPTR().
640 */
641 static inline PyWeakReference **
_PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(PyObject * op)642 _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(PyObject *op)
643 {
644 assert(!PyType_Check(op) ||
645 ((PyTypeObject *)op)->tp_flags & Py_TPFLAGS_HEAPTYPE);
646 Py_ssize_t offset = Py_TYPE(op)->tp_weaklistoffset;
647 return (PyWeakReference **)((char *)op + offset);
648 }
649
650 // Fast inlined version of PyObject_IS_GC()
651 static inline int
_PyObject_IS_GC(PyObject * obj)652 _PyObject_IS_GC(PyObject *obj)
653 {
654 PyTypeObject *type = Py_TYPE(obj);
655 return (PyType_IS_GC(type)
656 && (type->tp_is_gc == NULL || type->tp_is_gc(obj)));
657 }
658
659 // Fast inlined version of PyObject_Hash()
660 static inline Py_hash_t
_PyObject_HashFast(PyObject * op)661 _PyObject_HashFast(PyObject *op)
662 {
663 if (PyUnicode_CheckExact(op)) {
664 Py_hash_t hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(
665 _PyASCIIObject_CAST(op)->hash);
666 if (hash != -1) {
667 return hash;
668 }
669 }
670 return PyObject_Hash(op);
671 }
672
673 // Fast inlined version of PyType_IS_GC()
674 #define _PyType_IS_GC(t) _PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC)
675
676 static inline size_t
_PyType_PreHeaderSize(PyTypeObject * tp)677 _PyType_PreHeaderSize(PyTypeObject *tp)
678 {
679 return (
680 #ifndef Py_GIL_DISABLED
681 _PyType_IS_GC(tp) * sizeof(PyGC_Head) +
682 #endif
683 _PyType_HasFeature(tp, Py_TPFLAGS_PREHEADER) * 2 * sizeof(PyObject *)
684 );
685 }
686
687 void _PyObject_GC_Link(PyObject *op);
688
689 // Usage: assert(_Py_CheckSlotResult(obj, "__getitem__", result != NULL));
690 extern int _Py_CheckSlotResult(
691 PyObject *obj,
692 const char *slot_name,
693 int success);
694
695 // Test if a type supports weak references
_PyType_SUPPORTS_WEAKREFS(PyTypeObject * type)696 static inline int _PyType_SUPPORTS_WEAKREFS(PyTypeObject *type) {
697 return (type->tp_weaklistoffset != 0);
698 }
699
700 extern PyObject* _PyType_AllocNoTrack(PyTypeObject *type, Py_ssize_t nitems);
701 extern PyObject *_PyType_NewManagedObject(PyTypeObject *type);
702
703 extern PyTypeObject* _PyType_CalculateMetaclass(PyTypeObject *, PyObject *);
704 extern PyObject* _PyType_GetDocFromInternalDoc(const char *, const char *);
705 extern PyObject* _PyType_GetTextSignatureFromInternalDoc(const char *, const char *, int);
706 extern int _PyObject_SetAttributeErrorContext(PyObject *v, PyObject* name);
707
708 void _PyObject_InitInlineValues(PyObject *obj, PyTypeObject *tp);
709 extern int _PyObject_StoreInstanceAttribute(PyObject *obj,
710 PyObject *name, PyObject *value);
711 extern bool _PyObject_TryGetInstanceAttribute(PyObject *obj, PyObject *name,
712 PyObject **attr);
713
714 #ifdef Py_GIL_DISABLED
715 # define MANAGED_DICT_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-1)
716 # define MANAGED_WEAKREF_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-2)
717 #else
718 # define MANAGED_DICT_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-3)
719 # define MANAGED_WEAKREF_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-4)
720 #endif
721
722 typedef union {
723 PyDictObject *dict;
724 } PyManagedDictPointer;
725
726 static inline PyManagedDictPointer *
_PyObject_ManagedDictPointer(PyObject * obj)727 _PyObject_ManagedDictPointer(PyObject *obj)
728 {
729 assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
730 return (PyManagedDictPointer *)((char *)obj + MANAGED_DICT_OFFSET);
731 }
732
733 static inline PyDictObject *
_PyObject_GetManagedDict(PyObject * obj)734 _PyObject_GetManagedDict(PyObject *obj)
735 {
736 PyManagedDictPointer *dorv = _PyObject_ManagedDictPointer(obj);
737 return (PyDictObject *)FT_ATOMIC_LOAD_PTR_ACQUIRE(dorv->dict);
738 }
739
740 static inline PyDictValues *
_PyObject_InlineValues(PyObject * obj)741 _PyObject_InlineValues(PyObject *obj)
742 {
743 assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
744 assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
745 assert(Py_TYPE(obj)->tp_basicsize == sizeof(PyObject));
746 return (PyDictValues *)((char *)obj + sizeof(PyObject));
747 }
748
749 extern PyObject ** _PyObject_ComputedDictPointer(PyObject *);
750 extern int _PyObject_IsInstanceDictEmpty(PyObject *);
751
752 // Export for 'math' shared extension
753 PyAPI_FUNC(PyObject*) _PyObject_LookupSpecial(PyObject *, PyObject *);
754
755 extern int _PyObject_IsAbstract(PyObject *);
756
757 PyAPI_FUNC(int) _PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
758 extern PyObject* _PyObject_NextNotImplemented(PyObject *);
759
760 // Pickle support.
761 // Export for '_datetime' shared extension
762 PyAPI_FUNC(PyObject*) _PyObject_GetState(PyObject *);
763
764 /* C function call trampolines to mitigate bad function pointer casts.
765 *
766 * Typical native ABIs ignore additional arguments or fill in missing
767 * values with 0/NULL in function pointer cast. Compilers do not show
768 * warnings when a function pointer is explicitly casted to an
769 * incompatible type.
770 *
771 * Bad fpcasts are an issue in WebAssembly. WASM's indirect_call has strict
772 * function signature checks. Argument count, types, and return type must
773 * match.
774 *
775 * Third party code unintentionally rely on problematic fpcasts. The call
776 * trampoline mitigates common occurrences of bad fpcasts on Emscripten.
777 */
778 #if !(defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE))
779 #define _PyCFunction_TrampolineCall(meth, self, args) \
780 (meth)((self), (args))
781 #define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
782 (meth)((self), (args), (kw))
783 #endif // __EMSCRIPTEN__ && PY_CALL_TRAMPOLINE
784
785 // Export these 2 symbols for '_pickle' shared extension
786 PyAPI_DATA(PyTypeObject) _PyNone_Type;
787 PyAPI_DATA(PyTypeObject) _PyNotImplemented_Type;
788
789 // Maps Py_LT to Py_GT, ..., Py_GE to Py_LE.
790 // Export for the stable ABI.
791 PyAPI_DATA(int) _Py_SwappedOp[];
792
793 extern void _Py_GetConstant_Init(void);
794
795 #ifdef __cplusplus
796 }
797 #endif
798 #endif /* !Py_INTERNAL_OBJECT_H */
799