• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 
3   Reference Cycle Garbage Collection
4   ==================================
5 
6   Neil Schemenauer <nas@arctrix.com>
7 
8   Based on a post on the python-dev list.  Ideas from Guido van Rossum,
9   Eric Tiedemann, and various others.
10 
11   http://www.arctrix.com/nas/python/gc/
12 
13   The following mailing list threads provide a historical perspective on
14   the design of this module.  Note that a fair amount of refinement has
15   occurred since those discussions.
16 
17   http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18   http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19   http://mail.python.org/pipermail/python-dev/2000-March/002497.html
20 
21   For a highlevel view of the collection process, read the collect
22   function.
23 
24 */
25 
26 #include "Python.h"
27 #include "internal/context.h"
28 #include "internal/mem.h"
29 #include "internal/pystate.h"
30 #include "frameobject.h"        /* for PyFrame_ClearFreeList */
31 #include "pydtrace.h"
32 #include "pytime.h"             /* for _PyTime_GetMonotonicClock() */
33 
34 /*[clinic input]
35 module gc
36 [clinic start generated code]*/
37 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
38 
39 /* Get an object's GC head */
40 #define AS_GC(o) ((PyGC_Head *)(o)-1)
41 
42 /* Get the object given the GC head */
43 #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
44 
45 /* Python string to use if unhandled exception occurs */
46 static PyObject *gc_str = NULL;
47 
48 /* set for debugging information */
49 #define DEBUG_STATS             (1<<0) /* print collection statistics */
50 #define DEBUG_COLLECTABLE       (1<<1) /* print collectable objects */
51 #define DEBUG_UNCOLLECTABLE     (1<<2) /* print uncollectable objects */
52 #define DEBUG_SAVEALL           (1<<5) /* save all garbage in gc.garbage */
53 #define DEBUG_LEAK              DEBUG_COLLECTABLE | \
54                 DEBUG_UNCOLLECTABLE | \
55                 DEBUG_SAVEALL
56 
57 #define GEN_HEAD(n) (&_PyRuntime.gc.generations[n].head)
58 
59 void
_PyGC_Initialize(struct _gc_runtime_state * state)60 _PyGC_Initialize(struct _gc_runtime_state *state)
61 {
62     state->enabled = 1; /* automatic collection enabled? */
63 
64 #define _GEN_HEAD(n) (&state->generations[n].head)
65     struct gc_generation generations[NUM_GENERATIONS] = {
66         /* PyGC_Head,                                 threshold,      count */
67         {{{_GEN_HEAD(0), _GEN_HEAD(0), 0}},           700,            0},
68         {{{_GEN_HEAD(1), _GEN_HEAD(1), 0}},           10,             0},
69         {{{_GEN_HEAD(2), _GEN_HEAD(2), 0}},           10,             0},
70     };
71     for (int i = 0; i < NUM_GENERATIONS; i++) {
72         state->generations[i] = generations[i];
73     };
74     state->generation0 = GEN_HEAD(0);
75     struct gc_generation permanent_generation = {
76           {{&state->permanent_generation.head, &state->permanent_generation.head, 0}}, 0, 0
77     };
78     state->permanent_generation = permanent_generation;
79 }
80 
81 /*--------------------------------------------------------------------------
82 gc_refs values.
83 
84 Between collections, every gc'ed object has one of two gc_refs values:
85 
86 GC_UNTRACKED
87     The initial state; objects returned by PyObject_GC_Malloc are in this
88     state.  The object doesn't live in any generation list, and its
89     tp_traverse slot must not be called.
90 
91 GC_REACHABLE
92     The object lives in some generation list, and its tp_traverse is safe to
93     call.  An object transitions to GC_REACHABLE when PyObject_GC_Track
94     is called.
95 
96 During a collection, gc_refs can temporarily take on other states:
97 
98 >= 0
99     At the start of a collection, update_refs() copies the true refcount
100     to gc_refs, for each object in the generation being collected.
101     subtract_refs() then adjusts gc_refs so that it equals the number of
102     times an object is referenced directly from outside the generation
103     being collected.
104     gc_refs remains >= 0 throughout these steps.
105 
106 GC_TENTATIVELY_UNREACHABLE
107     move_unreachable() then moves objects not reachable (whether directly or
108     indirectly) from outside the generation into an "unreachable" set.
109     Objects that are found to be reachable have gc_refs set to GC_REACHABLE
110     again.  Objects that are found to be unreachable have gc_refs set to
111     GC_TENTATIVELY_UNREACHABLE.  It's "tentatively" because the pass doing
112     this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
113     transition back to GC_REACHABLE.
114 
115     Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
116     for collection.  If it's decided not to collect such an object (e.g.,
117     it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
118 ----------------------------------------------------------------------------
119 */
120 #define GC_UNTRACKED                    _PyGC_REFS_UNTRACKED
121 #define GC_REACHABLE                    _PyGC_REFS_REACHABLE
122 #define GC_TENTATIVELY_UNREACHABLE      _PyGC_REFS_TENTATIVELY_UNREACHABLE
123 
124 #define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED)
125 #define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE)
126 #define IS_TENTATIVELY_UNREACHABLE(o) ( \
127     _PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE)
128 
129 /*** list functions ***/
130 
131 static void
gc_list_init(PyGC_Head * list)132 gc_list_init(PyGC_Head *list)
133 {
134     list->gc.gc_prev = list;
135     list->gc.gc_next = list;
136 }
137 
138 static int
gc_list_is_empty(PyGC_Head * list)139 gc_list_is_empty(PyGC_Head *list)
140 {
141     return (list->gc.gc_next == list);
142 }
143 
144 #if 0
145 /* This became unused after gc_list_move() was introduced. */
146 /* Append `node` to `list`. */
147 static void
148 gc_list_append(PyGC_Head *node, PyGC_Head *list)
149 {
150     node->gc.gc_next = list;
151     node->gc.gc_prev = list->gc.gc_prev;
152     node->gc.gc_prev->gc.gc_next = node;
153     list->gc.gc_prev = node;
154 }
155 #endif
156 
157 /* Remove `node` from the gc list it's currently in. */
158 static void
gc_list_remove(PyGC_Head * node)159 gc_list_remove(PyGC_Head *node)
160 {
161     node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
162     node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
163     node->gc.gc_next = NULL; /* object is not currently tracked */
164 }
165 
166 /* Move `node` from the gc list it's currently in (which is not explicitly
167  * named here) to the end of `list`.  This is semantically the same as
168  * gc_list_remove(node) followed by gc_list_append(node, list).
169  */
170 static void
gc_list_move(PyGC_Head * node,PyGC_Head * list)171 gc_list_move(PyGC_Head *node, PyGC_Head *list)
172 {
173     PyGC_Head *new_prev;
174     PyGC_Head *current_prev = node->gc.gc_prev;
175     PyGC_Head *current_next = node->gc.gc_next;
176     /* Unlink from current list. */
177     current_prev->gc.gc_next = current_next;
178     current_next->gc.gc_prev = current_prev;
179     /* Relink at end of new list. */
180     new_prev = node->gc.gc_prev = list->gc.gc_prev;
181     new_prev->gc.gc_next = list->gc.gc_prev = node;
182     node->gc.gc_next = list;
183 }
184 
185 /* append list `from` onto list `to`; `from` becomes an empty list */
186 static void
gc_list_merge(PyGC_Head * from,PyGC_Head * to)187 gc_list_merge(PyGC_Head *from, PyGC_Head *to)
188 {
189     PyGC_Head *tail;
190     assert(from != to);
191     if (!gc_list_is_empty(from)) {
192         tail = to->gc.gc_prev;
193         tail->gc.gc_next = from->gc.gc_next;
194         tail->gc.gc_next->gc.gc_prev = tail;
195         to->gc.gc_prev = from->gc.gc_prev;
196         to->gc.gc_prev->gc.gc_next = to;
197     }
198     gc_list_init(from);
199 }
200 
201 static Py_ssize_t
gc_list_size(PyGC_Head * list)202 gc_list_size(PyGC_Head *list)
203 {
204     PyGC_Head *gc;
205     Py_ssize_t n = 0;
206     for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
207         n++;
208     }
209     return n;
210 }
211 
212 /* Append objects in a GC list to a Python list.
213  * Return 0 if all OK, < 0 if error (out of memory for list).
214  */
215 static int
append_objects(PyObject * py_list,PyGC_Head * gc_list)216 append_objects(PyObject *py_list, PyGC_Head *gc_list)
217 {
218     PyGC_Head *gc;
219     for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
220         PyObject *op = FROM_GC(gc);
221         if (op != py_list) {
222             if (PyList_Append(py_list, op)) {
223                 return -1; /* exception */
224             }
225         }
226     }
227     return 0;
228 }
229 
230 /*** end of list stuff ***/
231 
232 
233 /* Set all gc_refs = ob_refcnt.  After this, gc_refs is > 0 for all objects
234  * in containers, and is GC_REACHABLE for all tracked gc objects not in
235  * containers.
236  */
237 static void
update_refs(PyGC_Head * containers)238 update_refs(PyGC_Head *containers)
239 {
240     PyGC_Head *gc = containers->gc.gc_next;
241     for (; gc != containers; gc = gc->gc.gc_next) {
242         assert(_PyGCHead_REFS(gc) == GC_REACHABLE);
243         _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
244         /* Python's cyclic gc should never see an incoming refcount
245          * of 0:  if something decref'ed to 0, it should have been
246          * deallocated immediately at that time.
247          * Possible cause (if the assert triggers):  a tp_dealloc
248          * routine left a gc-aware object tracked during its teardown
249          * phase, and did something-- or allowed something to happen --
250          * that called back into Python.  gc can trigger then, and may
251          * see the still-tracked dying object.  Before this assert
252          * was added, such mistakes went on to allow gc to try to
253          * delete the object again.  In a debug build, that caused
254          * a mysterious segfault, when _Py_ForgetReference tried
255          * to remove the object from the doubly-linked list of all
256          * objects a second time.  In a release build, an actual
257          * double deallocation occurred, which leads to corruption
258          * of the allocator's internal bookkeeping pointers.  That's
259          * so serious that maybe this should be a release-build
260          * check instead of an assert?
261          */
262         assert(_PyGCHead_REFS(gc) != 0);
263     }
264 }
265 
266 /* A traversal callback for subtract_refs. */
267 static int
visit_decref(PyObject * op,void * data)268 visit_decref(PyObject *op, void *data)
269 {
270     assert(op != NULL);
271     if (PyObject_IS_GC(op)) {
272         PyGC_Head *gc = AS_GC(op);
273         /* We're only interested in gc_refs for objects in the
274          * generation being collected, which can be recognized
275          * because only they have positive gc_refs.
276          */
277         assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */
278         if (_PyGCHead_REFS(gc) > 0)
279             _PyGCHead_DECREF(gc);
280     }
281     return 0;
282 }
283 
284 /* Subtract internal references from gc_refs.  After this, gc_refs is >= 0
285  * for all objects in containers, and is GC_REACHABLE for all tracked gc
286  * objects not in containers.  The ones with gc_refs > 0 are directly
287  * reachable from outside containers, and so can't be collected.
288  */
289 static void
subtract_refs(PyGC_Head * containers)290 subtract_refs(PyGC_Head *containers)
291 {
292     traverseproc traverse;
293     PyGC_Head *gc = containers->gc.gc_next;
294     for (; gc != containers; gc=gc->gc.gc_next) {
295         traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
296         (void) traverse(FROM_GC(gc),
297                        (visitproc)visit_decref,
298                        NULL);
299     }
300 }
301 
302 /* A traversal callback for move_unreachable. */
303 static int
visit_reachable(PyObject * op,PyGC_Head * reachable)304 visit_reachable(PyObject *op, PyGC_Head *reachable)
305 {
306     if (PyObject_IS_GC(op)) {
307         PyGC_Head *gc = AS_GC(op);
308         const Py_ssize_t gc_refs = _PyGCHead_REFS(gc);
309 
310         if (gc_refs == 0) {
311             /* This is in move_unreachable's 'young' list, but
312              * the traversal hasn't yet gotten to it.  All
313              * we need to do is tell move_unreachable that it's
314              * reachable.
315              */
316             _PyGCHead_SET_REFS(gc, 1);
317         }
318         else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
319             /* This had gc_refs = 0 when move_unreachable got
320              * to it, but turns out it's reachable after all.
321              * Move it back to move_unreachable's 'young' list,
322              * and move_unreachable will eventually get to it
323              * again.
324              */
325             gc_list_move(gc, reachable);
326             _PyGCHead_SET_REFS(gc, 1);
327         }
328         /* Else there's nothing to do.
329          * If gc_refs > 0, it must be in move_unreachable's 'young'
330          * list, and move_unreachable will eventually get to it.
331          * If gc_refs == GC_REACHABLE, it's either in some other
332          * generation so we don't care about it, or move_unreachable
333          * already dealt with it.
334          * If gc_refs == GC_UNTRACKED, it must be ignored.
335          */
336          else {
337             assert(gc_refs > 0
338                    || gc_refs == GC_REACHABLE
339                    || gc_refs == GC_UNTRACKED);
340          }
341     }
342     return 0;
343 }
344 
345 /* Move the unreachable objects from young to unreachable.  After this,
346  * all objects in young have gc_refs = GC_REACHABLE, and all objects in
347  * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE.  All tracked
348  * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
349  * All objects in young after this are directly or indirectly reachable
350  * from outside the original young; and all objects in unreachable are
351  * not.
352  */
353 static void
move_unreachable(PyGC_Head * young,PyGC_Head * unreachable)354 move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
355 {
356     PyGC_Head *gc = young->gc.gc_next;
357 
358     /* Invariants:  all objects "to the left" of us in young have gc_refs
359      * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
360      * from outside the young list as it was at entry.  All other objects
361      * from the original young "to the left" of us are in unreachable now,
362      * and have gc_refs = GC_TENTATIVELY_UNREACHABLE.  All objects to the
363      * left of us in 'young' now have been scanned, and no objects here
364      * or to the right have been scanned yet.
365      */
366 
367     while (gc != young) {
368         PyGC_Head *next;
369 
370         if (_PyGCHead_REFS(gc)) {
371             /* gc is definitely reachable from outside the
372              * original 'young'.  Mark it as such, and traverse
373              * its pointers to find any other objects that may
374              * be directly reachable from it.  Note that the
375              * call to tp_traverse may append objects to young,
376              * so we have to wait until it returns to determine
377              * the next object to visit.
378              */
379             PyObject *op = FROM_GC(gc);
380             traverseproc traverse = Py_TYPE(op)->tp_traverse;
381             assert(_PyGCHead_REFS(gc) > 0);
382             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
383             (void) traverse(op,
384                             (visitproc)visit_reachable,
385                             (void *)young);
386             next = gc->gc.gc_next;
387             if (PyTuple_CheckExact(op)) {
388                 _PyTuple_MaybeUntrack(op);
389             }
390         }
391         else {
392             /* This *may* be unreachable.  To make progress,
393              * assume it is.  gc isn't directly reachable from
394              * any object we've already traversed, but may be
395              * reachable from an object we haven't gotten to yet.
396              * visit_reachable will eventually move gc back into
397              * young if that's so, and we'll see it again.
398              */
399             next = gc->gc.gc_next;
400             gc_list_move(gc, unreachable);
401             _PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE);
402         }
403         gc = next;
404     }
405 }
406 
407 /* Try to untrack all currently tracked dictionaries */
408 static void
untrack_dicts(PyGC_Head * head)409 untrack_dicts(PyGC_Head *head)
410 {
411     PyGC_Head *next, *gc = head->gc.gc_next;
412     while (gc != head) {
413         PyObject *op = FROM_GC(gc);
414         next = gc->gc.gc_next;
415         if (PyDict_CheckExact(op))
416             _PyDict_MaybeUntrack(op);
417         gc = next;
418     }
419 }
420 
421 /* Return true if object has a pre-PEP 442 finalization method. */
422 static int
has_legacy_finalizer(PyObject * op)423 has_legacy_finalizer(PyObject *op)
424 {
425     return op->ob_type->tp_del != NULL;
426 }
427 
428 /* Move the objects in unreachable with tp_del slots into `finalizers`.
429  * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
430  * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
431  */
432 static void
move_legacy_finalizers(PyGC_Head * unreachable,PyGC_Head * finalizers)433 move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
434 {
435     PyGC_Head *gc;
436     PyGC_Head *next;
437 
438     /* March over unreachable.  Move objects with finalizers into
439      * `finalizers`.
440      */
441     for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
442         PyObject *op = FROM_GC(gc);
443 
444         assert(IS_TENTATIVELY_UNREACHABLE(op));
445         next = gc->gc.gc_next;
446 
447         if (has_legacy_finalizer(op)) {
448             gc_list_move(gc, finalizers);
449             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
450         }
451     }
452 }
453 
454 /* A traversal callback for move_legacy_finalizer_reachable. */
455 static int
visit_move(PyObject * op,PyGC_Head * tolist)456 visit_move(PyObject *op, PyGC_Head *tolist)
457 {
458     if (PyObject_IS_GC(op)) {
459         if (IS_TENTATIVELY_UNREACHABLE(op)) {
460             PyGC_Head *gc = AS_GC(op);
461             gc_list_move(gc, tolist);
462             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
463         }
464     }
465     return 0;
466 }
467 
468 /* Move objects that are reachable from finalizers, from the unreachable set
469  * into finalizers set.
470  */
471 static void
move_legacy_finalizer_reachable(PyGC_Head * finalizers)472 move_legacy_finalizer_reachable(PyGC_Head *finalizers)
473 {
474     traverseproc traverse;
475     PyGC_Head *gc = finalizers->gc.gc_next;
476     for (; gc != finalizers; gc = gc->gc.gc_next) {
477         /* Note that the finalizers list may grow during this. */
478         traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
479         (void) traverse(FROM_GC(gc),
480                         (visitproc)visit_move,
481                         (void *)finalizers);
482     }
483 }
484 
485 /* Clear all weakrefs to unreachable objects, and if such a weakref has a
486  * callback, invoke it if necessary.  Note that it's possible for such
487  * weakrefs to be outside the unreachable set -- indeed, those are precisely
488  * the weakrefs whose callbacks must be invoked.  See gc_weakref.txt for
489  * overview & some details.  Some weakrefs with callbacks may be reclaimed
490  * directly by this routine; the number reclaimed is the return value.  Other
491  * weakrefs with callbacks may be moved into the `old` generation.  Objects
492  * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
493  * unreachable are left at GC_TENTATIVELY_UNREACHABLE.  When this returns,
494  * no object in `unreachable` is weakly referenced anymore.
495  */
496 static int
handle_weakrefs(PyGC_Head * unreachable,PyGC_Head * old)497 handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
498 {
499     PyGC_Head *gc;
500     PyObject *op;               /* generally FROM_GC(gc) */
501     PyWeakReference *wr;        /* generally a cast of op */
502     PyGC_Head wrcb_to_call;     /* weakrefs with callbacks to call */
503     PyGC_Head *next;
504     int num_freed = 0;
505 
506     gc_list_init(&wrcb_to_call);
507 
508     /* Clear all weakrefs to the objects in unreachable.  If such a weakref
509      * also has a callback, move it into `wrcb_to_call` if the callback
510      * needs to be invoked.  Note that we cannot invoke any callbacks until
511      * all weakrefs to unreachable objects are cleared, lest the callback
512      * resurrect an unreachable object via a still-active weakref.  We
513      * make another pass over wrcb_to_call, invoking callbacks, after this
514      * pass completes.
515      */
516     for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
517         PyWeakReference **wrlist;
518 
519         op = FROM_GC(gc);
520         assert(IS_TENTATIVELY_UNREACHABLE(op));
521         next = gc->gc.gc_next;
522 
523         if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
524             continue;
525 
526         /* It supports weakrefs.  Does it have any? */
527         wrlist = (PyWeakReference **)
528                                 PyObject_GET_WEAKREFS_LISTPTR(op);
529 
530         /* `op` may have some weakrefs.  March over the list, clear
531          * all the weakrefs, and move the weakrefs with callbacks
532          * that must be called into wrcb_to_call.
533          */
534         for (wr = *wrlist; wr != NULL; wr = *wrlist) {
535             PyGC_Head *wrasgc;                  /* AS_GC(wr) */
536 
537             /* _PyWeakref_ClearRef clears the weakref but leaves
538              * the callback pointer intact.  Obscure:  it also
539              * changes *wrlist.
540              */
541             assert(wr->wr_object == op);
542             _PyWeakref_ClearRef(wr);
543             assert(wr->wr_object == Py_None);
544             if (wr->wr_callback == NULL)
545                 continue;                       /* no callback */
546 
547     /* Headache time.  `op` is going away, and is weakly referenced by
548      * `wr`, which has a callback.  Should the callback be invoked?  If wr
549      * is also trash, no:
550      *
551      * 1. There's no need to call it.  The object and the weakref are
552      *    both going away, so it's legitimate to pretend the weakref is
553      *    going away first.  The user has to ensure a weakref outlives its
554      *    referent if they want a guarantee that the wr callback will get
555      *    invoked.
556      *
557      * 2. It may be catastrophic to call it.  If the callback is also in
558      *    cyclic trash (CT), then although the CT is unreachable from
559      *    outside the current generation, CT may be reachable from the
560      *    callback.  Then the callback could resurrect insane objects.
561      *
562      * Since the callback is never needed and may be unsafe in this case,
563      * wr is simply left in the unreachable set.  Note that because we
564      * already called _PyWeakref_ClearRef(wr), its callback will never
565      * trigger.
566      *
567      * OTOH, if wr isn't part of CT, we should invoke the callback:  the
568      * weakref outlived the trash.  Note that since wr isn't CT in this
569      * case, its callback can't be CT either -- wr acted as an external
570      * root to this generation, and therefore its callback did too.  So
571      * nothing in CT is reachable from the callback either, so it's hard
572      * to imagine how calling it later could create a problem for us.  wr
573      * is moved to wrcb_to_call in this case.
574      */
575             if (IS_TENTATIVELY_UNREACHABLE(wr))
576                 continue;
577             assert(IS_REACHABLE(wr));
578 
579             /* Create a new reference so that wr can't go away
580              * before we can process it again.
581              */
582             Py_INCREF(wr);
583 
584             /* Move wr to wrcb_to_call, for the next pass. */
585             wrasgc = AS_GC(wr);
586             assert(wrasgc != next); /* wrasgc is reachable, but
587                                        next isn't, so they can't
588                                        be the same */
589             gc_list_move(wrasgc, &wrcb_to_call);
590         }
591     }
592 
593     /* Invoke the callbacks we decided to honor.  It's safe to invoke them
594      * because they can't reference unreachable objects.
595      */
596     while (! gc_list_is_empty(&wrcb_to_call)) {
597         PyObject *temp;
598         PyObject *callback;
599 
600         gc = wrcb_to_call.gc.gc_next;
601         op = FROM_GC(gc);
602         assert(IS_REACHABLE(op));
603         assert(PyWeakref_Check(op));
604         wr = (PyWeakReference *)op;
605         callback = wr->wr_callback;
606         assert(callback != NULL);
607 
608         /* copy-paste of weakrefobject.c's handle_callback() */
609         temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
610         if (temp == NULL)
611             PyErr_WriteUnraisable(callback);
612         else
613             Py_DECREF(temp);
614 
615         /* Give up the reference we created in the first pass.  When
616          * op's refcount hits 0 (which it may or may not do right now),
617          * op's tp_dealloc will decref op->wr_callback too.  Note
618          * that the refcount probably will hit 0 now, and because this
619          * weakref was reachable to begin with, gc didn't already
620          * add it to its count of freed objects.  Example:  a reachable
621          * weak value dict maps some key to this reachable weakref.
622          * The callback removes this key->weakref mapping from the
623          * dict, leaving no other references to the weakref (excepting
624          * ours).
625          */
626         Py_DECREF(op);
627         if (wrcb_to_call.gc.gc_next == gc) {
628             /* object is still alive -- move it */
629             gc_list_move(gc, old);
630         }
631         else
632             ++num_freed;
633     }
634 
635     return num_freed;
636 }
637 
638 static void
debug_cycle(const char * msg,PyObject * op)639 debug_cycle(const char *msg, PyObject *op)
640 {
641     PySys_FormatStderr("gc: %s <%s %p>\n",
642                        msg, Py_TYPE(op)->tp_name, op);
643 }
644 
645 /* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
646  * only from such cycles).
647  * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
648  * garbage list (a Python list), else only the objects in finalizers with
649  * __del__ methods are appended to garbage.  All objects in finalizers are
650  * merged into the old list regardless.
651  */
652 static void
handle_legacy_finalizers(PyGC_Head * finalizers,PyGC_Head * old)653 handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
654 {
655     PyGC_Head *gc = finalizers->gc.gc_next;
656 
657     if (_PyRuntime.gc.garbage == NULL) {
658         _PyRuntime.gc.garbage = PyList_New(0);
659         if (_PyRuntime.gc.garbage == NULL)
660             Py_FatalError("gc couldn't create gc.garbage list");
661     }
662     for (; gc != finalizers; gc = gc->gc.gc_next) {
663         PyObject *op = FROM_GC(gc);
664 
665         if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
666             if (PyList_Append(_PyRuntime.gc.garbage, op) < 0)
667                 break;
668         }
669     }
670 
671     gc_list_merge(finalizers, old);
672 }
673 
674 /* Run first-time finalizers (if any) on all the objects in collectable.
675  * Note that this may remove some (or even all) of the objects from the
676  * list, due to refcounts falling to 0.
677  */
678 static void
finalize_garbage(PyGC_Head * collectable)679 finalize_garbage(PyGC_Head *collectable)
680 {
681     destructor finalize;
682     PyGC_Head seen;
683 
684     /* While we're going through the loop, `finalize(op)` may cause op, or
685      * other objects, to be reclaimed via refcounts falling to zero.  So
686      * there's little we can rely on about the structure of the input
687      * `collectable` list across iterations.  For safety, we always take the
688      * first object in that list and move it to a temporary `seen` list.
689      * If objects vanish from the `collectable` and `seen` lists we don't
690      * care.
691      */
692     gc_list_init(&seen);
693 
694     while (!gc_list_is_empty(collectable)) {
695         PyGC_Head *gc = collectable->gc.gc_next;
696         PyObject *op = FROM_GC(gc);
697         gc_list_move(gc, &seen);
698         if (!_PyGCHead_FINALIZED(gc) &&
699                 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
700                 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
701             _PyGCHead_SET_FINALIZED(gc, 1);
702             Py_INCREF(op);
703             finalize(op);
704             Py_DECREF(op);
705         }
706     }
707     gc_list_merge(&seen, collectable);
708 }
709 
710 /* Walk the collectable list and check that they are really unreachable
711    from the outside (some objects could have been resurrected by a
712    finalizer). */
713 static int
check_garbage(PyGC_Head * collectable)714 check_garbage(PyGC_Head *collectable)
715 {
716     PyGC_Head *gc;
717     for (gc = collectable->gc.gc_next; gc != collectable;
718          gc = gc->gc.gc_next) {
719         _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
720         assert(_PyGCHead_REFS(gc) != 0);
721     }
722     subtract_refs(collectable);
723     for (gc = collectable->gc.gc_next; gc != collectable;
724          gc = gc->gc.gc_next) {
725         assert(_PyGCHead_REFS(gc) >= 0);
726         if (_PyGCHead_REFS(gc) != 0)
727             return -1;
728     }
729     return 0;
730 }
731 
732 static void
revive_garbage(PyGC_Head * collectable)733 revive_garbage(PyGC_Head *collectable)
734 {
735     PyGC_Head *gc;
736     for (gc = collectable->gc.gc_next; gc != collectable;
737          gc = gc->gc.gc_next) {
738         _PyGCHead_SET_REFS(gc, GC_REACHABLE);
739     }
740 }
741 
742 /* Break reference cycles by clearing the containers involved.  This is
743  * tricky business as the lists can be changing and we don't know which
744  * objects may be freed.  It is possible I screwed something up here.
745  */
746 static void
delete_garbage(PyGC_Head * collectable,PyGC_Head * old)747 delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
748 {
749     inquiry clear;
750 
751     while (!gc_list_is_empty(collectable)) {
752         PyGC_Head *gc = collectable->gc.gc_next;
753         PyObject *op = FROM_GC(gc);
754 
755         if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
756             PyList_Append(_PyRuntime.gc.garbage, op);
757         }
758         else {
759             if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
760                 Py_INCREF(op);
761                 clear(op);
762                 Py_DECREF(op);
763             }
764         }
765         if (collectable->gc.gc_next == gc) {
766             /* object is still alive, move it, it may die later */
767             gc_list_move(gc, old);
768             _PyGCHead_SET_REFS(gc, GC_REACHABLE);
769         }
770     }
771 }
772 
773 /* Clear all free lists
774  * All free lists are cleared during the collection of the highest generation.
775  * Allocated items in the free list may keep a pymalloc arena occupied.
776  * Clearing the free lists may give back memory to the OS earlier.
777  */
778 static void
clear_freelists(void)779 clear_freelists(void)
780 {
781     (void)PyMethod_ClearFreeList();
782     (void)PyFrame_ClearFreeList();
783     (void)PyCFunction_ClearFreeList();
784     (void)PyTuple_ClearFreeList();
785     (void)PyUnicode_ClearFreeList();
786     (void)PyFloat_ClearFreeList();
787     (void)PyList_ClearFreeList();
788     (void)PyDict_ClearFreeList();
789     (void)PySet_ClearFreeList();
790     (void)PyAsyncGen_ClearFreeLists();
791     (void)PyContext_ClearFreeList();
792 }
793 
794 /* This is the main function.  Read this to understand how the
795  * collection process works. */
796 static Py_ssize_t
collect(int generation,Py_ssize_t * n_collected,Py_ssize_t * n_uncollectable,int nofail)797 collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
798         int nofail)
799 {
800     int i;
801     Py_ssize_t m = 0; /* # objects collected */
802     Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
803     PyGC_Head *young; /* the generation we are examining */
804     PyGC_Head *old; /* next older generation */
805     PyGC_Head unreachable; /* non-problematic unreachable trash */
806     PyGC_Head finalizers;  /* objects with, & reachable from, __del__ */
807     PyGC_Head *gc;
808     _PyTime_t t1 = 0;   /* initialize to prevent a compiler warning */
809 
810     struct gc_generation_stats *stats = &_PyRuntime.gc.generation_stats[generation];
811 
812     if (_PyRuntime.gc.debug & DEBUG_STATS) {
813         PySys_WriteStderr("gc: collecting generation %d...\n",
814                           generation);
815         PySys_WriteStderr("gc: objects in each generation:");
816         for (i = 0; i < NUM_GENERATIONS; i++)
817             PySys_FormatStderr(" %zd",
818                               gc_list_size(GEN_HEAD(i)));
819         PySys_WriteStderr("\ngc: objects in permanent generation: %zd",
820                          gc_list_size(&_PyRuntime.gc.permanent_generation.head));
821         t1 = _PyTime_GetMonotonicClock();
822 
823         PySys_WriteStderr("\n");
824     }
825 
826     if (PyDTrace_GC_START_ENABLED())
827         PyDTrace_GC_START(generation);
828 
829     /* update collection and allocation counters */
830     if (generation+1 < NUM_GENERATIONS)
831         _PyRuntime.gc.generations[generation+1].count += 1;
832     for (i = 0; i <= generation; i++)
833         _PyRuntime.gc.generations[i].count = 0;
834 
835     /* merge younger generations with one we are currently collecting */
836     for (i = 0; i < generation; i++) {
837         gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
838     }
839 
840     /* handy references */
841     young = GEN_HEAD(generation);
842     if (generation < NUM_GENERATIONS-1)
843         old = GEN_HEAD(generation+1);
844     else
845         old = young;
846 
847     /* Using ob_refcnt and gc_refs, calculate which objects in the
848      * container set are reachable from outside the set (i.e., have a
849      * refcount greater than 0 when all the references within the
850      * set are taken into account).
851      */
852     update_refs(young);
853     subtract_refs(young);
854 
855     /* Leave everything reachable from outside young in young, and move
856      * everything else (in young) to unreachable.
857      * NOTE:  This used to move the reachable objects into a reachable
858      * set instead.  But most things usually turn out to be reachable,
859      * so it's more efficient to move the unreachable things.
860      */
861     gc_list_init(&unreachable);
862     move_unreachable(young, &unreachable);
863 
864     /* Move reachable objects to next generation. */
865     if (young != old) {
866         if (generation == NUM_GENERATIONS - 2) {
867             _PyRuntime.gc.long_lived_pending += gc_list_size(young);
868         }
869         gc_list_merge(young, old);
870     }
871     else {
872         /* We only untrack dicts in full collections, to avoid quadratic
873            dict build-up. See issue #14775. */
874         untrack_dicts(young);
875         _PyRuntime.gc.long_lived_pending = 0;
876         _PyRuntime.gc.long_lived_total = gc_list_size(young);
877     }
878 
879     /* All objects in unreachable are trash, but objects reachable from
880      * legacy finalizers (e.g. tp_del) can't safely be deleted.
881      */
882     gc_list_init(&finalizers);
883     move_legacy_finalizers(&unreachable, &finalizers);
884     /* finalizers contains the unreachable objects with a legacy finalizer;
885      * unreachable objects reachable *from* those are also uncollectable,
886      * and we move those into the finalizers list too.
887      */
888     move_legacy_finalizer_reachable(&finalizers);
889 
890     /* Collect statistics on collectable objects found and print
891      * debugging information.
892      */
893     for (gc = unreachable.gc.gc_next; gc != &unreachable;
894                     gc = gc->gc.gc_next) {
895         m++;
896         if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
897             debug_cycle("collectable", FROM_GC(gc));
898         }
899     }
900 
901     /* Clear weakrefs and invoke callbacks as necessary. */
902     m += handle_weakrefs(&unreachable, old);
903 
904     /* Call tp_finalize on objects which have one. */
905     finalize_garbage(&unreachable);
906 
907     if (check_garbage(&unreachable)) {
908         revive_garbage(&unreachable);
909         gc_list_merge(&unreachable, old);
910     }
911     else {
912         /* Call tp_clear on objects in the unreachable set.  This will cause
913          * the reference cycles to be broken.  It may also cause some objects
914          * in finalizers to be freed.
915          */
916         delete_garbage(&unreachable, old);
917     }
918 
919     /* Collect statistics on uncollectable objects found and print
920      * debugging information. */
921     for (gc = finalizers.gc.gc_next;
922          gc != &finalizers;
923          gc = gc->gc.gc_next) {
924         n++;
925         if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
926             debug_cycle("uncollectable", FROM_GC(gc));
927     }
928     if (_PyRuntime.gc.debug & DEBUG_STATS) {
929         _PyTime_t t2 = _PyTime_GetMonotonicClock();
930 
931         if (m == 0 && n == 0)
932             PySys_WriteStderr("gc: done");
933         else
934             PySys_FormatStderr(
935                 "gc: done, %zd unreachable, %zd uncollectable",
936                 n+m, n);
937         PySys_WriteStderr(", %.4fs elapsed\n",
938                           _PyTime_AsSecondsDouble(t2 - t1));
939     }
940 
941     /* Append instances in the uncollectable set to a Python
942      * reachable list of garbage.  The programmer has to deal with
943      * this if they insist on creating this type of structure.
944      */
945     handle_legacy_finalizers(&finalizers, old);
946 
947     /* Clear free list only during the collection of the highest
948      * generation */
949     if (generation == NUM_GENERATIONS-1) {
950         clear_freelists();
951     }
952 
953     if (PyErr_Occurred()) {
954         if (nofail) {
955             PyErr_Clear();
956         }
957         else {
958             if (gc_str == NULL)
959                 gc_str = PyUnicode_FromString("garbage collection");
960             PyErr_WriteUnraisable(gc_str);
961             Py_FatalError("unexpected exception during garbage collection");
962         }
963     }
964 
965     /* Update stats */
966     if (n_collected)
967         *n_collected = m;
968     if (n_uncollectable)
969         *n_uncollectable = n;
970     stats->collections++;
971     stats->collected += m;
972     stats->uncollectable += n;
973 
974     if (PyDTrace_GC_DONE_ENABLED())
975         PyDTrace_GC_DONE(n+m);
976 
977     return n+m;
978 }
979 
980 /* Invoke progress callbacks to notify clients that garbage collection
981  * is starting or stopping
982  */
983 static void
invoke_gc_callback(const char * phase,int generation,Py_ssize_t collected,Py_ssize_t uncollectable)984 invoke_gc_callback(const char *phase, int generation,
985                    Py_ssize_t collected, Py_ssize_t uncollectable)
986 {
987     Py_ssize_t i;
988     PyObject *info = NULL;
989 
990     /* we may get called very early */
991     if (_PyRuntime.gc.callbacks == NULL)
992         return;
993     /* The local variable cannot be rebound, check it for sanity */
994     assert(_PyRuntime.gc.callbacks != NULL && PyList_CheckExact(_PyRuntime.gc.callbacks));
995     if (PyList_GET_SIZE(_PyRuntime.gc.callbacks) != 0) {
996         info = Py_BuildValue("{sisnsn}",
997             "generation", generation,
998             "collected", collected,
999             "uncollectable", uncollectable);
1000         if (info == NULL) {
1001             PyErr_WriteUnraisable(NULL);
1002             return;
1003         }
1004     }
1005     for (i=0; i<PyList_GET_SIZE(_PyRuntime.gc.callbacks); i++) {
1006         PyObject *r, *cb = PyList_GET_ITEM(_PyRuntime.gc.callbacks, i);
1007         Py_INCREF(cb); /* make sure cb doesn't go away */
1008         r = PyObject_CallFunction(cb, "sO", phase, info);
1009         if (r == NULL) {
1010             PyErr_WriteUnraisable(cb);
1011         }
1012         else {
1013             Py_DECREF(r);
1014         }
1015         Py_DECREF(cb);
1016     }
1017     Py_XDECREF(info);
1018 }
1019 
1020 /* Perform garbage collection of a generation and invoke
1021  * progress callbacks.
1022  */
1023 static Py_ssize_t
collect_with_callback(int generation)1024 collect_with_callback(int generation)
1025 {
1026     Py_ssize_t result, collected, uncollectable;
1027     invoke_gc_callback("start", generation, 0, 0);
1028     result = collect(generation, &collected, &uncollectable, 0);
1029     invoke_gc_callback("stop", generation, collected, uncollectable);
1030     return result;
1031 }
1032 
1033 static Py_ssize_t
collect_generations(void)1034 collect_generations(void)
1035 {
1036     int i;
1037     Py_ssize_t n = 0;
1038 
1039     /* Find the oldest generation (highest numbered) where the count
1040      * exceeds the threshold.  Objects in the that generation and
1041      * generations younger than it will be collected. */
1042     for (i = NUM_GENERATIONS-1; i >= 0; i--) {
1043         if (_PyRuntime.gc.generations[i].count > _PyRuntime.gc.generations[i].threshold) {
1044             /* Avoid quadratic performance degradation in number
1045                of tracked objects. See comments at the beginning
1046                of this file, and issue #4074.
1047             */
1048             if (i == NUM_GENERATIONS - 1
1049                 && _PyRuntime.gc.long_lived_pending < _PyRuntime.gc.long_lived_total / 4)
1050                 continue;
1051             n = collect_with_callback(i);
1052             break;
1053         }
1054     }
1055     return n;
1056 }
1057 
1058 #include "clinic/gcmodule.c.h"
1059 
1060 /*[clinic input]
1061 gc.enable
1062 
1063 Enable automatic garbage collection.
1064 [clinic start generated code]*/
1065 
1066 static PyObject *
gc_enable_impl(PyObject * module)1067 gc_enable_impl(PyObject *module)
1068 /*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
1069 {
1070     _PyRuntime.gc.enabled = 1;
1071     Py_RETURN_NONE;
1072 }
1073 
1074 /*[clinic input]
1075 gc.disable
1076 
1077 Disable automatic garbage collection.
1078 [clinic start generated code]*/
1079 
1080 static PyObject *
gc_disable_impl(PyObject * module)1081 gc_disable_impl(PyObject *module)
1082 /*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
1083 {
1084     _PyRuntime.gc.enabled = 0;
1085     Py_RETURN_NONE;
1086 }
1087 
1088 /*[clinic input]
1089 gc.isenabled -> bool
1090 
1091 Returns true if automatic garbage collection is enabled.
1092 [clinic start generated code]*/
1093 
1094 static int
gc_isenabled_impl(PyObject * module)1095 gc_isenabled_impl(PyObject *module)
1096 /*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
1097 {
1098     return _PyRuntime.gc.enabled;
1099 }
1100 
1101 /*[clinic input]
1102 gc.collect -> Py_ssize_t
1103 
1104     generation: int(c_default="NUM_GENERATIONS - 1") = 2
1105 
1106 Run the garbage collector.
1107 
1108 With no arguments, run a full collection.  The optional argument
1109 may be an integer specifying which generation to collect.  A ValueError
1110 is raised if the generation number is invalid.
1111 
1112 The number of unreachable objects is returned.
1113 [clinic start generated code]*/
1114 
1115 static Py_ssize_t
gc_collect_impl(PyObject * module,int generation)1116 gc_collect_impl(PyObject *module, int generation)
1117 /*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
1118 {
1119     Py_ssize_t n;
1120 
1121     if (generation < 0 || generation >= NUM_GENERATIONS) {
1122         PyErr_SetString(PyExc_ValueError, "invalid generation");
1123         return -1;
1124     }
1125 
1126     if (_PyRuntime.gc.collecting)
1127         n = 0; /* already collecting, don't do anything */
1128     else {
1129         _PyRuntime.gc.collecting = 1;
1130         n = collect_with_callback(generation);
1131         _PyRuntime.gc.collecting = 0;
1132     }
1133 
1134     return n;
1135 }
1136 
1137 /*[clinic input]
1138 gc.set_debug
1139 
1140     flags: int
1141         An integer that can have the following bits turned on:
1142           DEBUG_STATS - Print statistics during collection.
1143           DEBUG_COLLECTABLE - Print collectable objects found.
1144           DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1145             found.
1146           DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1147           DEBUG_LEAK - Debug leaking programs (everything but STATS).
1148     /
1149 
1150 Set the garbage collection debugging flags.
1151 
1152 Debugging information is written to sys.stderr.
1153 [clinic start generated code]*/
1154 
1155 static PyObject *
gc_set_debug_impl(PyObject * module,int flags)1156 gc_set_debug_impl(PyObject *module, int flags)
1157 /*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
1158 {
1159     _PyRuntime.gc.debug = flags;
1160 
1161     Py_RETURN_NONE;
1162 }
1163 
1164 /*[clinic input]
1165 gc.get_debug -> int
1166 
1167 Get the garbage collection debugging flags.
1168 [clinic start generated code]*/
1169 
1170 static int
gc_get_debug_impl(PyObject * module)1171 gc_get_debug_impl(PyObject *module)
1172 /*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
1173 {
1174     return _PyRuntime.gc.debug;
1175 }
1176 
1177 PyDoc_STRVAR(gc_set_thresh__doc__,
1178 "set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
1179 "\n"
1180 "Sets the collection thresholds.  Setting threshold0 to zero disables\n"
1181 "collection.\n");
1182 
1183 static PyObject *
gc_set_thresh(PyObject * self,PyObject * args)1184 gc_set_thresh(PyObject *self, PyObject *args)
1185 {
1186     int i;
1187     if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
1188                           &_PyRuntime.gc.generations[0].threshold,
1189                           &_PyRuntime.gc.generations[1].threshold,
1190                           &_PyRuntime.gc.generations[2].threshold))
1191         return NULL;
1192     for (i = 2; i < NUM_GENERATIONS; i++) {
1193         /* generations higher than 2 get the same threshold */
1194         _PyRuntime.gc.generations[i].threshold = _PyRuntime.gc.generations[2].threshold;
1195     }
1196 
1197     Py_RETURN_NONE;
1198 }
1199 
1200 /*[clinic input]
1201 gc.get_threshold
1202 
1203 Return the current collection thresholds.
1204 [clinic start generated code]*/
1205 
1206 static PyObject *
gc_get_threshold_impl(PyObject * module)1207 gc_get_threshold_impl(PyObject *module)
1208 /*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
1209 {
1210     return Py_BuildValue("(iii)",
1211                          _PyRuntime.gc.generations[0].threshold,
1212                          _PyRuntime.gc.generations[1].threshold,
1213                          _PyRuntime.gc.generations[2].threshold);
1214 }
1215 
1216 /*[clinic input]
1217 gc.get_count
1218 
1219 Return a three-tuple of the current collection counts.
1220 [clinic start generated code]*/
1221 
1222 static PyObject *
gc_get_count_impl(PyObject * module)1223 gc_get_count_impl(PyObject *module)
1224 /*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
1225 {
1226     return Py_BuildValue("(iii)",
1227                          _PyRuntime.gc.generations[0].count,
1228                          _PyRuntime.gc.generations[1].count,
1229                          _PyRuntime.gc.generations[2].count);
1230 }
1231 
1232 static int
referrersvisit(PyObject * obj,PyObject * objs)1233 referrersvisit(PyObject* obj, PyObject *objs)
1234 {
1235     Py_ssize_t i;
1236     for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1237         if (PyTuple_GET_ITEM(objs, i) == obj)
1238             return 1;
1239     return 0;
1240 }
1241 
1242 static int
gc_referrers_for(PyObject * objs,PyGC_Head * list,PyObject * resultlist)1243 gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
1244 {
1245     PyGC_Head *gc;
1246     PyObject *obj;
1247     traverseproc traverse;
1248     for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
1249         obj = FROM_GC(gc);
1250         traverse = Py_TYPE(obj)->tp_traverse;
1251         if (obj == objs || obj == resultlist)
1252             continue;
1253         if (traverse(obj, (visitproc)referrersvisit, objs)) {
1254             if (PyList_Append(resultlist, obj) < 0)
1255                 return 0; /* error */
1256         }
1257     }
1258     return 1; /* no error */
1259 }
1260 
1261 PyDoc_STRVAR(gc_get_referrers__doc__,
1262 "get_referrers(*objs) -> list\n\
1263 Return the list of objects that directly refer to any of objs.");
1264 
1265 static PyObject *
gc_get_referrers(PyObject * self,PyObject * args)1266 gc_get_referrers(PyObject *self, PyObject *args)
1267 {
1268     int i;
1269     PyObject *result = PyList_New(0);
1270     if (!result) return NULL;
1271 
1272     for (i = 0; i < NUM_GENERATIONS; i++) {
1273         if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1274             Py_DECREF(result);
1275             return NULL;
1276         }
1277     }
1278     return result;
1279 }
1280 
1281 /* Append obj to list; return true if error (out of memory), false if OK. */
1282 static int
referentsvisit(PyObject * obj,PyObject * list)1283 referentsvisit(PyObject *obj, PyObject *list)
1284 {
1285     return PyList_Append(list, obj) < 0;
1286 }
1287 
1288 PyDoc_STRVAR(gc_get_referents__doc__,
1289 "get_referents(*objs) -> list\n\
1290 Return the list of objects that are directly referred to by objs.");
1291 
1292 static PyObject *
gc_get_referents(PyObject * self,PyObject * args)1293 gc_get_referents(PyObject *self, PyObject *args)
1294 {
1295     Py_ssize_t i;
1296     PyObject *result = PyList_New(0);
1297 
1298     if (result == NULL)
1299         return NULL;
1300 
1301     for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1302         traverseproc traverse;
1303         PyObject *obj = PyTuple_GET_ITEM(args, i);
1304 
1305         if (! PyObject_IS_GC(obj))
1306             continue;
1307         traverse = Py_TYPE(obj)->tp_traverse;
1308         if (! traverse)
1309             continue;
1310         if (traverse(obj, (visitproc)referentsvisit, result)) {
1311             Py_DECREF(result);
1312             return NULL;
1313         }
1314     }
1315     return result;
1316 }
1317 
1318 /*[clinic input]
1319 gc.get_objects
1320 
1321 Return a list of objects tracked by the collector (excluding the list returned).
1322 [clinic start generated code]*/
1323 
1324 static PyObject *
gc_get_objects_impl(PyObject * module)1325 gc_get_objects_impl(PyObject *module)
1326 /*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/
1327 {
1328     int i;
1329     PyObject* result;
1330 
1331     result = PyList_New(0);
1332     if (result == NULL)
1333         return NULL;
1334     for (i = 0; i < NUM_GENERATIONS; i++) {
1335         if (append_objects(result, GEN_HEAD(i))) {
1336             Py_DECREF(result);
1337             return NULL;
1338         }
1339     }
1340     return result;
1341 }
1342 
1343 /*[clinic input]
1344 gc.get_stats
1345 
1346 Return a list of dictionaries containing per-generation statistics.
1347 [clinic start generated code]*/
1348 
1349 static PyObject *
gc_get_stats_impl(PyObject * module)1350 gc_get_stats_impl(PyObject *module)
1351 /*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
1352 {
1353     int i;
1354     PyObject *result;
1355     struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1356 
1357     /* To get consistent values despite allocations while constructing
1358        the result list, we use a snapshot of the running stats. */
1359     for (i = 0; i < NUM_GENERATIONS; i++) {
1360         stats[i] = _PyRuntime.gc.generation_stats[i];
1361     }
1362 
1363     result = PyList_New(0);
1364     if (result == NULL)
1365         return NULL;
1366 
1367     for (i = 0; i < NUM_GENERATIONS; i++) {
1368         PyObject *dict;
1369         st = &stats[i];
1370         dict = Py_BuildValue("{snsnsn}",
1371                              "collections", st->collections,
1372                              "collected", st->collected,
1373                              "uncollectable", st->uncollectable
1374                             );
1375         if (dict == NULL)
1376             goto error;
1377         if (PyList_Append(result, dict)) {
1378             Py_DECREF(dict);
1379             goto error;
1380         }
1381         Py_DECREF(dict);
1382     }
1383     return result;
1384 
1385 error:
1386     Py_XDECREF(result);
1387     return NULL;
1388 }
1389 
1390 
1391 /*[clinic input]
1392 gc.is_tracked
1393 
1394     obj: object
1395     /
1396 
1397 Returns true if the object is tracked by the garbage collector.
1398 
1399 Simple atomic objects will return false.
1400 [clinic start generated code]*/
1401 
1402 static PyObject *
gc_is_tracked(PyObject * module,PyObject * obj)1403 gc_is_tracked(PyObject *module, PyObject *obj)
1404 /*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
1405 {
1406     PyObject *result;
1407 
1408     if (PyObject_IS_GC(obj) && IS_TRACKED(obj))
1409         result = Py_True;
1410     else
1411         result = Py_False;
1412     Py_INCREF(result);
1413     return result;
1414 }
1415 
1416 /*[clinic input]
1417 gc.freeze
1418 
1419 Freeze all current tracked objects and ignore them for future collections.
1420 
1421 This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1422 Note: collection before a POSIX fork() call may free pages for future allocation
1423 which can cause copy-on-write.
1424 [clinic start generated code]*/
1425 
1426 static PyObject *
gc_freeze_impl(PyObject * module)1427 gc_freeze_impl(PyObject *module)
1428 /*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1429 {
1430     for (int i = 0; i < NUM_GENERATIONS; ++i) {
1431         gc_list_merge(GEN_HEAD(i), &_PyRuntime.gc.permanent_generation.head);
1432         _PyRuntime.gc.generations[i].count = 0;
1433     }
1434     Py_RETURN_NONE;
1435 }
1436 
1437 /*[clinic input]
1438 gc.unfreeze
1439 
1440 Unfreeze all objects in the permanent generation.
1441 
1442 Put all objects in the permanent generation back into oldest generation.
1443 [clinic start generated code]*/
1444 
1445 static PyObject *
gc_unfreeze_impl(PyObject * module)1446 gc_unfreeze_impl(PyObject *module)
1447 /*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1448 {
1449     gc_list_merge(&_PyRuntime.gc.permanent_generation.head, GEN_HEAD(NUM_GENERATIONS-1));
1450     Py_RETURN_NONE;
1451 }
1452 
1453 /*[clinic input]
1454 gc.get_freeze_count -> Py_ssize_t
1455 
1456 Return the number of objects in the permanent generation.
1457 [clinic start generated code]*/
1458 
1459 static Py_ssize_t
gc_get_freeze_count_impl(PyObject * module)1460 gc_get_freeze_count_impl(PyObject *module)
1461 /*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
1462 {
1463     return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
1464 }
1465 
1466 
1467 PyDoc_STRVAR(gc__doc__,
1468 "This module provides access to the garbage collector for reference cycles.\n"
1469 "\n"
1470 "enable() -- Enable automatic garbage collection.\n"
1471 "disable() -- Disable automatic garbage collection.\n"
1472 "isenabled() -- Returns true if automatic collection is enabled.\n"
1473 "collect() -- Do a full collection right now.\n"
1474 "get_count() -- Return the current collection counts.\n"
1475 "get_stats() -- Return list of dictionaries containing per-generation stats.\n"
1476 "set_debug() -- Set debugging flags.\n"
1477 "get_debug() -- Get debugging flags.\n"
1478 "set_threshold() -- Set the collection thresholds.\n"
1479 "get_threshold() -- Return the current the collection thresholds.\n"
1480 "get_objects() -- Return a list of all objects tracked by the collector.\n"
1481 "is_tracked() -- Returns true if a given object is tracked.\n"
1482 "get_referrers() -- Return the list of objects that refer to an object.\n"
1483 "get_referents() -- Return the list of objects that an object refers to.\n"
1484 "freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1485 "unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1486 "get_freeze_count() -- Return the number of objects in the permanent generation.\n");
1487 
1488 static PyMethodDef GcMethods[] = {
1489     GC_ENABLE_METHODDEF
1490     GC_DISABLE_METHODDEF
1491     GC_ISENABLED_METHODDEF
1492     GC_SET_DEBUG_METHODDEF
1493     GC_GET_DEBUG_METHODDEF
1494     GC_GET_COUNT_METHODDEF
1495     {"set_threshold",  gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
1496     GC_GET_THRESHOLD_METHODDEF
1497     GC_COLLECT_METHODDEF
1498     GC_GET_OBJECTS_METHODDEF
1499     GC_GET_STATS_METHODDEF
1500     GC_IS_TRACKED_METHODDEF
1501     {"get_referrers",  gc_get_referrers, METH_VARARGS,
1502         gc_get_referrers__doc__},
1503     {"get_referents",  gc_get_referents, METH_VARARGS,
1504         gc_get_referents__doc__},
1505     GC_FREEZE_METHODDEF
1506     GC_UNFREEZE_METHODDEF
1507     GC_GET_FREEZE_COUNT_METHODDEF
1508     {NULL,      NULL}           /* Sentinel */
1509 };
1510 
1511 static struct PyModuleDef gcmodule = {
1512     PyModuleDef_HEAD_INIT,
1513     "gc",              /* m_name */
1514     gc__doc__,         /* m_doc */
1515     -1,                /* m_size */
1516     GcMethods,         /* m_methods */
1517     NULL,              /* m_reload */
1518     NULL,              /* m_traverse */
1519     NULL,              /* m_clear */
1520     NULL               /* m_free */
1521 };
1522 
1523 PyMODINIT_FUNC
PyInit_gc(void)1524 PyInit_gc(void)
1525 {
1526     PyObject *m;
1527 
1528     m = PyModule_Create(&gcmodule);
1529 
1530     if (m == NULL)
1531         return NULL;
1532 
1533     if (_PyRuntime.gc.garbage == NULL) {
1534         _PyRuntime.gc.garbage = PyList_New(0);
1535         if (_PyRuntime.gc.garbage == NULL)
1536             return NULL;
1537     }
1538     Py_INCREF(_PyRuntime.gc.garbage);
1539     if (PyModule_AddObject(m, "garbage", _PyRuntime.gc.garbage) < 0)
1540         return NULL;
1541 
1542     if (_PyRuntime.gc.callbacks == NULL) {
1543         _PyRuntime.gc.callbacks = PyList_New(0);
1544         if (_PyRuntime.gc.callbacks == NULL)
1545             return NULL;
1546     }
1547     Py_INCREF(_PyRuntime.gc.callbacks);
1548     if (PyModule_AddObject(m, "callbacks", _PyRuntime.gc.callbacks) < 0)
1549         return NULL;
1550 
1551 #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
1552     ADD_INT(DEBUG_STATS);
1553     ADD_INT(DEBUG_COLLECTABLE);
1554     ADD_INT(DEBUG_UNCOLLECTABLE);
1555     ADD_INT(DEBUG_SAVEALL);
1556     ADD_INT(DEBUG_LEAK);
1557 #undef ADD_INT
1558     return m;
1559 }
1560 
1561 /* API to invoke gc.collect() from C */
1562 Py_ssize_t
PyGC_Collect(void)1563 PyGC_Collect(void)
1564 {
1565     Py_ssize_t n;
1566 
1567     if (_PyRuntime.gc.collecting)
1568         n = 0; /* already collecting, don't do anything */
1569     else {
1570         PyObject *exc, *value, *tb;
1571         _PyRuntime.gc.collecting = 1;
1572         PyErr_Fetch(&exc, &value, &tb);
1573         n = collect_with_callback(NUM_GENERATIONS - 1);
1574         PyErr_Restore(exc, value, tb);
1575         _PyRuntime.gc.collecting = 0;
1576     }
1577 
1578     return n;
1579 }
1580 
1581 Py_ssize_t
_PyGC_CollectIfEnabled(void)1582 _PyGC_CollectIfEnabled(void)
1583 {
1584     if (!_PyRuntime.gc.enabled)
1585         return 0;
1586 
1587     return PyGC_Collect();
1588 }
1589 
1590 Py_ssize_t
_PyGC_CollectNoFail(void)1591 _PyGC_CollectNoFail(void)
1592 {
1593     Py_ssize_t n;
1594 
1595     /* Ideally, this function is only called on interpreter shutdown,
1596        and therefore not recursively.  Unfortunately, when there are daemon
1597        threads, a daemon thread can start a cyclic garbage collection
1598        during interpreter shutdown (and then never finish it).
1599        See http://bugs.python.org/issue8713#msg195178 for an example.
1600        */
1601     if (_PyRuntime.gc.collecting)
1602         n = 0;
1603     else {
1604         _PyRuntime.gc.collecting = 1;
1605         n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
1606         _PyRuntime.gc.collecting = 0;
1607     }
1608     return n;
1609 }
1610 
1611 void
_PyGC_DumpShutdownStats(void)1612 _PyGC_DumpShutdownStats(void)
1613 {
1614     if (!(_PyRuntime.gc.debug & DEBUG_SAVEALL)
1615         && _PyRuntime.gc.garbage != NULL && PyList_GET_SIZE(_PyRuntime.gc.garbage) > 0) {
1616         const char *message;
1617         if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
1618             message = "gc: %zd uncollectable objects at " \
1619                 "shutdown";
1620         else
1621             message = "gc: %zd uncollectable objects at " \
1622                 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
1623         /* PyErr_WarnFormat does too many things and we are at shutdown,
1624            the warnings module's dependencies (e.g. linecache) may be gone
1625            already. */
1626         if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1627                                      "gc", NULL, message,
1628                                      PyList_GET_SIZE(_PyRuntime.gc.garbage)))
1629             PyErr_WriteUnraisable(NULL);
1630         if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE) {
1631             PyObject *repr = NULL, *bytes = NULL;
1632             repr = PyObject_Repr(_PyRuntime.gc.garbage);
1633             if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
1634                 PyErr_WriteUnraisable(_PyRuntime.gc.garbage);
1635             else {
1636                 PySys_WriteStderr(
1637                     "      %s\n",
1638                     PyBytes_AS_STRING(bytes)
1639                     );
1640             }
1641             Py_XDECREF(repr);
1642             Py_XDECREF(bytes);
1643         }
1644     }
1645 }
1646 
1647 void
_PyGC_Fini(void)1648 _PyGC_Fini(void)
1649 {
1650     Py_CLEAR(_PyRuntime.gc.callbacks);
1651 }
1652 
1653 /* for debugging */
1654 void
_PyGC_Dump(PyGC_Head * g)1655 _PyGC_Dump(PyGC_Head *g)
1656 {
1657     _PyObject_Dump(FROM_GC(g));
1658 }
1659 
1660 /* extension modules might be compiled with GC support so these
1661    functions must always be available */
1662 
1663 #undef PyObject_GC_Track
1664 #undef PyObject_GC_UnTrack
1665 #undef PyObject_GC_Del
1666 #undef _PyObject_GC_Malloc
1667 
1668 void
PyObject_GC_Track(void * op)1669 PyObject_GC_Track(void *op)
1670 {
1671     _PyObject_GC_TRACK(op);
1672 }
1673 
1674 void
PyObject_GC_UnTrack(void * op)1675 PyObject_GC_UnTrack(void *op)
1676 {
1677     /* Obscure:  the Py_TRASHCAN mechanism requires that we be able to
1678      * call PyObject_GC_UnTrack twice on an object.
1679      */
1680     if (IS_TRACKED(op))
1681         _PyObject_GC_UNTRACK(op);
1682 }
1683 
1684 static PyObject *
_PyObject_GC_Alloc(int use_calloc,size_t basicsize)1685 _PyObject_GC_Alloc(int use_calloc, size_t basicsize)
1686 {
1687     PyObject *op;
1688     PyGC_Head *g;
1689     size_t size;
1690     if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1691         return PyErr_NoMemory();
1692     size = sizeof(PyGC_Head) + basicsize;
1693     if (use_calloc)
1694         g = (PyGC_Head *)PyObject_Calloc(1, size);
1695     else
1696         g = (PyGC_Head *)PyObject_Malloc(size);
1697     if (g == NULL)
1698         return PyErr_NoMemory();
1699     g->gc.gc_refs = 0;
1700     _PyGCHead_SET_REFS(g, GC_UNTRACKED);
1701     _PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
1702     if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
1703         _PyRuntime.gc.enabled &&
1704         _PyRuntime.gc.generations[0].threshold &&
1705         !_PyRuntime.gc.collecting &&
1706         !PyErr_Occurred()) {
1707         _PyRuntime.gc.collecting = 1;
1708         collect_generations();
1709         _PyRuntime.gc.collecting = 0;
1710     }
1711     op = FROM_GC(g);
1712     return op;
1713 }
1714 
1715 PyObject *
_PyObject_GC_Malloc(size_t basicsize)1716 _PyObject_GC_Malloc(size_t basicsize)
1717 {
1718     return _PyObject_GC_Alloc(0, basicsize);
1719 }
1720 
1721 PyObject *
_PyObject_GC_Calloc(size_t basicsize)1722 _PyObject_GC_Calloc(size_t basicsize)
1723 {
1724     return _PyObject_GC_Alloc(1, basicsize);
1725 }
1726 
1727 PyObject *
_PyObject_GC_New(PyTypeObject * tp)1728 _PyObject_GC_New(PyTypeObject *tp)
1729 {
1730     PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1731     if (op != NULL)
1732         op = PyObject_INIT(op, tp);
1733     return op;
1734 }
1735 
1736 PyVarObject *
_PyObject_GC_NewVar(PyTypeObject * tp,Py_ssize_t nitems)1737 _PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
1738 {
1739     size_t size;
1740     PyVarObject *op;
1741 
1742     if (nitems < 0) {
1743         PyErr_BadInternalCall();
1744         return NULL;
1745     }
1746     size = _PyObject_VAR_SIZE(tp, nitems);
1747     op = (PyVarObject *) _PyObject_GC_Malloc(size);
1748     if (op != NULL)
1749         op = PyObject_INIT_VAR(op, tp, nitems);
1750     return op;
1751 }
1752 
1753 PyVarObject *
_PyObject_GC_Resize(PyVarObject * op,Py_ssize_t nitems)1754 _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
1755 {
1756     const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1757     PyGC_Head *g = AS_GC(op);
1758     assert(!IS_TRACKED(op));
1759     if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1760         return (PyVarObject *)PyErr_NoMemory();
1761     g = (PyGC_Head *)PyObject_REALLOC(g,  sizeof(PyGC_Head) + basicsize);
1762     if (g == NULL)
1763         return (PyVarObject *)PyErr_NoMemory();
1764     op = (PyVarObject *) FROM_GC(g);
1765     Py_SIZE(op) = nitems;
1766     return op;
1767 }
1768 
1769 void
PyObject_GC_Del(void * op)1770 PyObject_GC_Del(void *op)
1771 {
1772     PyGC_Head *g = AS_GC(op);
1773     if (IS_TRACKED(op))
1774         gc_list_remove(g);
1775     if (_PyRuntime.gc.generations[0].count > 0) {
1776         _PyRuntime.gc.generations[0].count--;
1777     }
1778     PyObject_FREE(g);
1779 }
1780