1 /* Frame object implementation */
2
3 #include "Python.h"
4 #include "pycore_object.h"
5 #include "pycore_pystate.h"
6
7 #include "code.h"
8 #include "frameobject.h"
9 #include "opcode.h"
10 #include "structmember.h"
11
12 #define OFF(x) offsetof(PyFrameObject, x)
13
14 static PyMemberDef frame_memberlist[] = {
15 {"f_back", T_OBJECT, OFF(f_back), READONLY},
16 {"f_code", T_OBJECT, OFF(f_code), READONLY},
17 {"f_builtins", T_OBJECT, OFF(f_builtins), READONLY},
18 {"f_globals", T_OBJECT, OFF(f_globals), READONLY},
19 {"f_lasti", T_INT, OFF(f_lasti), READONLY},
20 {"f_trace_lines", T_BOOL, OFF(f_trace_lines), 0},
21 {"f_trace_opcodes", T_BOOL, OFF(f_trace_opcodes), 0},
22 {NULL} /* Sentinel */
23 };
24
25 static PyObject *
frame_getlocals(PyFrameObject * f,void * closure)26 frame_getlocals(PyFrameObject *f, void *closure)
27 {
28 if (PyFrame_FastToLocalsWithError(f) < 0)
29 return NULL;
30 Py_INCREF(f->f_locals);
31 return f->f_locals;
32 }
33
34 int
PyFrame_GetLineNumber(PyFrameObject * f)35 PyFrame_GetLineNumber(PyFrameObject *f)
36 {
37 if (f->f_trace)
38 return f->f_lineno;
39 else
40 return PyCode_Addr2Line(f->f_code, f->f_lasti);
41 }
42
43 static PyObject *
frame_getlineno(PyFrameObject * f,void * closure)44 frame_getlineno(PyFrameObject *f, void *closure)
45 {
46 return PyLong_FromLong(PyFrame_GetLineNumber(f));
47 }
48
49
50 /* Given the index of the effective opcode,
51 scan back to construct the oparg with EXTENDED_ARG */
52 static unsigned int
get_arg(const _Py_CODEUNIT * codestr,Py_ssize_t i)53 get_arg(const _Py_CODEUNIT *codestr, Py_ssize_t i)
54 {
55 _Py_CODEUNIT word;
56 unsigned int oparg = _Py_OPARG(codestr[i]);
57 if (i >= 1 && _Py_OPCODE(word = codestr[i-1]) == EXTENDED_ARG) {
58 oparg |= _Py_OPARG(word) << 8;
59 if (i >= 2 && _Py_OPCODE(word = codestr[i-2]) == EXTENDED_ARG) {
60 oparg |= _Py_OPARG(word) << 16;
61 if (i >= 3 && _Py_OPCODE(word = codestr[i-3]) == EXTENDED_ARG) {
62 oparg |= _Py_OPARG(word) << 24;
63 }
64 }
65 }
66 return oparg;
67 }
68
69
70 /* Setter for f_lineno - you can set f_lineno from within a trace function in
71 * order to jump to a given line of code, subject to some restrictions. Most
72 * lines are OK to jump to because they don't make any assumptions about the
73 * state of the stack (obvious because you could remove the line and the code
74 * would still work without any stack errors), but there are some constructs
75 * that limit jumping:
76 *
77 * o Lines with an 'except' statement on them can't be jumped to, because
78 * they expect an exception to be on the top of the stack.
79 * o Lines that live in a 'finally' block can't be jumped from or to, since
80 * the END_FINALLY expects to clean up the stack after the 'try' block.
81 * o 'try', 'with' and 'async with' blocks can't be jumped into because
82 * the blockstack needs to be set up before their code runs.
83 * o 'for' and 'async for' loops can't be jumped into because the
84 * iterator needs to be on the stack.
85 * o Jumps cannot be made from within a trace function invoked with a
86 * 'return' or 'exception' event since the eval loop has been exited at
87 * that time.
88 */
89 static int
frame_setlineno(PyFrameObject * f,PyObject * p_new_lineno,void * Py_UNUSED (ignored))90 frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno, void *Py_UNUSED(ignored))
91 {
92 int new_lineno = 0; /* The new value of f_lineno */
93 long l_new_lineno;
94 int overflow;
95 int new_lasti = 0; /* The new value of f_lasti */
96 unsigned char *code = NULL; /* The bytecode for the frame... */
97 Py_ssize_t code_len = 0; /* ...and its length */
98 unsigned char *lnotab = NULL; /* Iterating over co_lnotab */
99 Py_ssize_t lnotab_len = 0; /* (ditto) */
100 int offset = 0; /* (ditto) */
101 int line = 0; /* (ditto) */
102 int addr = 0; /* (ditto) */
103 int delta_iblock = 0; /* Scanning the SETUPs and POPs */
104 int delta = 0;
105 int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */
106 int blockstack_top = 0; /* (ditto) */
107
108 if (p_new_lineno == NULL) {
109 PyErr_SetString(PyExc_AttributeError, "cannot delete attribute");
110 return -1;
111 }
112 /* f_lineno must be an integer. */
113 if (!PyLong_CheckExact(p_new_lineno)) {
114 PyErr_SetString(PyExc_ValueError,
115 "lineno must be an integer");
116 return -1;
117 }
118
119 /* Upon the 'call' trace event of a new frame, f->f_lasti is -1 and
120 * f->f_trace is NULL, check first on the first condition.
121 * Forbidding jumps from the 'call' event of a new frame is a side effect
122 * of allowing to set f_lineno only from trace functions. */
123 if (f->f_lasti == -1) {
124 PyErr_Format(PyExc_ValueError,
125 "can't jump from the 'call' trace event of a new frame");
126 return -1;
127 }
128
129 /* You can only do this from within a trace function, not via
130 * _getframe or similar hackery. */
131 if (!f->f_trace) {
132 PyErr_Format(PyExc_ValueError,
133 "f_lineno can only be set by a trace function");
134 return -1;
135 }
136
137 /* Forbid jumps upon a 'return' trace event (except after executing a
138 * YIELD_VALUE or YIELD_FROM opcode, f_stacktop is not NULL in that case)
139 * and upon an 'exception' trace event.
140 * Jumps from 'call' trace events have already been forbidden above for new
141 * frames, so this check does not change anything for 'call' events. */
142 if (f->f_stacktop == NULL) {
143 PyErr_SetString(PyExc_ValueError,
144 "can only jump from a 'line' trace event");
145 return -1;
146 }
147
148 /* Fail if the line comes before the start of the code block. */
149 l_new_lineno = PyLong_AsLongAndOverflow(p_new_lineno, &overflow);
150 if (overflow
151 #if SIZEOF_LONG > SIZEOF_INT
152 || l_new_lineno > INT_MAX
153 || l_new_lineno < INT_MIN
154 #endif
155 ) {
156 PyErr_SetString(PyExc_ValueError,
157 "lineno out of range");
158 return -1;
159 }
160 new_lineno = (int)l_new_lineno;
161
162 if (new_lineno < f->f_code->co_firstlineno) {
163 PyErr_Format(PyExc_ValueError,
164 "line %d comes before the current code block",
165 new_lineno);
166 return -1;
167 }
168 else if (new_lineno == f->f_code->co_firstlineno) {
169 new_lasti = 0;
170 new_lineno = f->f_code->co_firstlineno;
171 }
172 else {
173 /* Find the bytecode offset for the start of the given
174 * line, or the first code-owning line after it. */
175 char *tmp;
176 PyBytes_AsStringAndSize(f->f_code->co_lnotab,
177 &tmp, &lnotab_len);
178 lnotab = (unsigned char *) tmp;
179 addr = 0;
180 line = f->f_code->co_firstlineno;
181 new_lasti = -1;
182 for (offset = 0; offset < lnotab_len; offset += 2) {
183 addr += lnotab[offset];
184 line += (signed char)lnotab[offset+1];
185 if (line >= new_lineno) {
186 new_lasti = addr;
187 new_lineno = line;
188 break;
189 }
190 }
191 }
192
193 /* If we didn't reach the requested line, return an error. */
194 if (new_lasti == -1) {
195 PyErr_Format(PyExc_ValueError,
196 "line %d comes after the current code block",
197 new_lineno);
198 return -1;
199 }
200
201 /* We're now ready to look at the bytecode. */
202 PyBytes_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len);
203
204 /* The trace function is called with a 'return' trace event after the
205 * execution of a yield statement. */
206 assert(f->f_lasti != -1);
207 if (code[f->f_lasti] == YIELD_VALUE || code[f->f_lasti] == YIELD_FROM) {
208 PyErr_SetString(PyExc_ValueError,
209 "can't jump from a yield statement");
210 return -1;
211 }
212
213 /* You can't jump onto a line with an 'except' statement on it -
214 * they expect to have an exception on the top of the stack, which
215 * won't be true if you jump to them. They always start with code
216 * that either pops the exception using POP_TOP (plain 'except:'
217 * lines do this) or duplicates the exception on the stack using
218 * DUP_TOP (if there's an exception type specified). See compile.c,
219 * 'com_try_except' for the full details. There aren't any other
220 * cases (AFAIK) where a line's code can start with DUP_TOP or
221 * POP_TOP, but if any ever appear, they'll be subject to the same
222 * restriction (but with a different error message). */
223 if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) {
224 PyErr_SetString(PyExc_ValueError,
225 "can't jump to 'except' line as there's no exception");
226 return -1;
227 }
228
229 /* You can't jump into or out of a 'finally' block because the 'try'
230 * block leaves something on the stack for the END_FINALLY to clean up.
231 * So we walk the bytecode, maintaining a simulated blockstack.
232 * 'blockstack' is a stack of the bytecode addresses of the starts of
233 * the 'finally' blocks. */
234 memset(blockstack, '\0', sizeof(blockstack));
235 blockstack_top = 0;
236 unsigned char prevop = NOP;
237 for (addr = 0; addr < code_len; addr += sizeof(_Py_CODEUNIT)) {
238 unsigned char op = code[addr];
239 switch (op) {
240 case SETUP_FINALLY:
241 case SETUP_WITH:
242 case SETUP_ASYNC_WITH:
243 case FOR_ITER: {
244 unsigned int oparg = get_arg((const _Py_CODEUNIT *)code,
245 addr / sizeof(_Py_CODEUNIT));
246 int target_addr = addr + oparg + sizeof(_Py_CODEUNIT);
247 assert(target_addr < code_len);
248 /* Police block-jumping (you can't jump into the middle of a block)
249 * and ensure that the blockstack finishes up in a sensible state (by
250 * popping any blocks we're jumping out of). We look at all the
251 * blockstack operations between the current position and the new
252 * one, and keep track of how many blocks we drop out of on the way.
253 * By also keeping track of the lowest blockstack position we see, we
254 * can tell whether the jump goes into any blocks without coming out
255 * again - in that case we raise an exception below. */
256 int first_in = addr < f->f_lasti && f->f_lasti < target_addr;
257 int second_in = addr < new_lasti && new_lasti < target_addr;
258 if (!first_in && second_in) {
259 PyErr_SetString(PyExc_ValueError,
260 "can't jump into the middle of a block");
261 return -1;
262 }
263 int in_for_loop = op == FOR_ITER || code[target_addr] == END_ASYNC_FOR;
264 if (first_in && !second_in) {
265 if (!delta_iblock) {
266 if (in_for_loop) {
267 /* Pop the iterators of any 'for' and 'async for' loop
268 * we're jumping out of. */
269 delta++;
270 }
271 else if (prevop == LOAD_CONST) {
272 /* Pops None pushed before SETUP_FINALLY. */
273 delta++;
274 }
275 }
276 if (!in_for_loop) {
277 delta_iblock++;
278 }
279 }
280 if (!in_for_loop) {
281 blockstack[blockstack_top++] = target_addr;
282 }
283 break;
284 }
285
286 case END_FINALLY: {
287 assert(blockstack_top > 0);
288 int target_addr = blockstack[--blockstack_top];
289 assert(target_addr <= addr);
290 int first_in = target_addr <= f->f_lasti && f->f_lasti <= addr;
291 int second_in = target_addr <= new_lasti && new_lasti <= addr;
292 if (first_in != second_in) {
293 op = code[target_addr];
294 PyErr_Format(PyExc_ValueError,
295 "can't jump %s %s block",
296 second_in ? "into" : "out of",
297 (op == DUP_TOP || op == POP_TOP) ?
298 "an 'except'" : "a 'finally'");
299 return -1;
300 }
301 break;
302 }
303 }
304 prevop = op;
305 }
306
307 /* Verify that the blockstack tracking code didn't get lost. */
308 assert(blockstack_top == 0);
309
310 /* Pop any blocks that we're jumping out of. */
311 if (delta_iblock > 0) {
312 f->f_iblock -= delta_iblock;
313 PyTryBlock *b = &f->f_blockstack[f->f_iblock];
314 delta += (int)(f->f_stacktop - f->f_valuestack) - b->b_level;
315 if (b->b_type == SETUP_FINALLY &&
316 code[b->b_handler] == WITH_CLEANUP_START)
317 {
318 /* Pop the exit function. */
319 delta++;
320 }
321 }
322 while (delta > 0) {
323 PyObject *v = (*--f->f_stacktop);
324 Py_DECREF(v);
325 delta--;
326 }
327
328 /* Finally set the new f_lineno and f_lasti and return OK. */
329 f->f_lineno = new_lineno;
330 f->f_lasti = new_lasti;
331 return 0;
332 }
333
334 static PyObject *
frame_gettrace(PyFrameObject * f,void * closure)335 frame_gettrace(PyFrameObject *f, void *closure)
336 {
337 PyObject* trace = f->f_trace;
338
339 if (trace == NULL)
340 trace = Py_None;
341
342 Py_INCREF(trace);
343
344 return trace;
345 }
346
347 static int
frame_settrace(PyFrameObject * f,PyObject * v,void * closure)348 frame_settrace(PyFrameObject *f, PyObject* v, void *closure)
349 {
350 /* We rely on f_lineno being accurate when f_trace is set. */
351 f->f_lineno = PyFrame_GetLineNumber(f);
352
353 if (v == Py_None)
354 v = NULL;
355 Py_XINCREF(v);
356 Py_XSETREF(f->f_trace, v);
357
358 return 0;
359 }
360
361
362 static PyGetSetDef frame_getsetlist[] = {
363 {"f_locals", (getter)frame_getlocals, NULL, NULL},
364 {"f_lineno", (getter)frame_getlineno,
365 (setter)frame_setlineno, NULL},
366 {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL},
367 {0}
368 };
369
370 /* Stack frames are allocated and deallocated at a considerable rate.
371 In an attempt to improve the speed of function calls, we:
372
373 1. Hold a single "zombie" frame on each code object. This retains
374 the allocated and initialised frame object from an invocation of
375 the code object. The zombie is reanimated the next time we need a
376 frame object for that code object. Doing this saves the malloc/
377 realloc required when using a free_list frame that isn't the
378 correct size. It also saves some field initialisation.
379
380 In zombie mode, no field of PyFrameObject holds a reference, but
381 the following fields are still valid:
382
383 * ob_type, ob_size, f_code, f_valuestack;
384
385 * f_locals, f_trace are NULL;
386
387 * f_localsplus does not require re-allocation and
388 the local variables in f_localsplus are NULL.
389
390 2. We also maintain a separate free list of stack frames (just like
391 floats are allocated in a special way -- see floatobject.c). When
392 a stack frame is on the free list, only the following members have
393 a meaning:
394 ob_type == &Frametype
395 f_back next item on free list, or NULL
396 f_stacksize size of value stack
397 ob_size size of localsplus
398 Note that the value and block stacks are preserved -- this can save
399 another malloc() call or two (and two free() calls as well!).
400 Also note that, unlike for integers, each frame object is a
401 malloc'ed object in its own right -- it is only the actual calls to
402 malloc() that we are trying to save here, not the administration.
403 After all, while a typical program may make millions of calls, a
404 call depth of more than 20 or 30 is probably already exceptional
405 unless the program contains run-away recursion. I hope.
406
407 Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on
408 free_list. Else programs creating lots of cyclic trash involving
409 frames could provoke free_list into growing without bound.
410 */
411
412 static PyFrameObject *free_list = NULL;
413 static int numfree = 0; /* number of frames currently in free_list */
414 /* max value for numfree */
415 #define PyFrame_MAXFREELIST 200
416
417 static void _Py_HOT_FUNCTION
frame_dealloc(PyFrameObject * f)418 frame_dealloc(PyFrameObject *f)
419 {
420 PyObject **p, **valuestack;
421 PyCodeObject *co;
422
423 if (_PyObject_GC_IS_TRACKED(f))
424 _PyObject_GC_UNTRACK(f);
425
426 Py_TRASHCAN_SAFE_BEGIN(f)
427 /* Kill all local variables */
428 valuestack = f->f_valuestack;
429 for (p = f->f_localsplus; p < valuestack; p++)
430 Py_CLEAR(*p);
431
432 /* Free stack */
433 if (f->f_stacktop != NULL) {
434 for (p = valuestack; p < f->f_stacktop; p++)
435 Py_XDECREF(*p);
436 }
437
438 Py_XDECREF(f->f_back);
439 Py_DECREF(f->f_builtins);
440 Py_DECREF(f->f_globals);
441 Py_CLEAR(f->f_locals);
442 Py_CLEAR(f->f_trace);
443
444 co = f->f_code;
445 if (co->co_zombieframe == NULL)
446 co->co_zombieframe = f;
447 else if (numfree < PyFrame_MAXFREELIST) {
448 ++numfree;
449 f->f_back = free_list;
450 free_list = f;
451 }
452 else
453 PyObject_GC_Del(f);
454
455 Py_DECREF(co);
456 Py_TRASHCAN_SAFE_END(f)
457 }
458
459 static int
frame_traverse(PyFrameObject * f,visitproc visit,void * arg)460 frame_traverse(PyFrameObject *f, visitproc visit, void *arg)
461 {
462 PyObject **fastlocals, **p;
463 Py_ssize_t i, slots;
464
465 Py_VISIT(f->f_back);
466 Py_VISIT(f->f_code);
467 Py_VISIT(f->f_builtins);
468 Py_VISIT(f->f_globals);
469 Py_VISIT(f->f_locals);
470 Py_VISIT(f->f_trace);
471
472 /* locals */
473 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars);
474 fastlocals = f->f_localsplus;
475 for (i = slots; --i >= 0; ++fastlocals)
476 Py_VISIT(*fastlocals);
477
478 /* stack */
479 if (f->f_stacktop != NULL) {
480 for (p = f->f_valuestack; p < f->f_stacktop; p++)
481 Py_VISIT(*p);
482 }
483 return 0;
484 }
485
486 static int
frame_tp_clear(PyFrameObject * f)487 frame_tp_clear(PyFrameObject *f)
488 {
489 PyObject **fastlocals, **p, **oldtop;
490 Py_ssize_t i, slots;
491
492 /* Before anything else, make sure that this frame is clearly marked
493 * as being defunct! Else, e.g., a generator reachable from this
494 * frame may also point to this frame, believe itself to still be
495 * active, and try cleaning up this frame again.
496 */
497 oldtop = f->f_stacktop;
498 f->f_stacktop = NULL;
499 f->f_executing = 0;
500
501 Py_CLEAR(f->f_trace);
502
503 /* locals */
504 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars);
505 fastlocals = f->f_localsplus;
506 for (i = slots; --i >= 0; ++fastlocals)
507 Py_CLEAR(*fastlocals);
508
509 /* stack */
510 if (oldtop != NULL) {
511 for (p = f->f_valuestack; p < oldtop; p++)
512 Py_CLEAR(*p);
513 }
514 return 0;
515 }
516
517 static PyObject *
frame_clear(PyFrameObject * f,PyObject * Py_UNUSED (ignored))518 frame_clear(PyFrameObject *f, PyObject *Py_UNUSED(ignored))
519 {
520 if (f->f_executing) {
521 PyErr_SetString(PyExc_RuntimeError,
522 "cannot clear an executing frame");
523 return NULL;
524 }
525 if (f->f_gen) {
526 _PyGen_Finalize(f->f_gen);
527 assert(f->f_gen == NULL);
528 }
529 (void)frame_tp_clear(f);
530 Py_RETURN_NONE;
531 }
532
533 PyDoc_STRVAR(clear__doc__,
534 "F.clear(): clear most references held by the frame");
535
536 static PyObject *
frame_sizeof(PyFrameObject * f,PyObject * Py_UNUSED (ignored))537 frame_sizeof(PyFrameObject *f, PyObject *Py_UNUSED(ignored))
538 {
539 Py_ssize_t res, extras, ncells, nfrees;
540
541 ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars);
542 nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars);
543 extras = f->f_code->co_stacksize + f->f_code->co_nlocals +
544 ncells + nfrees;
545 /* subtract one as it is already included in PyFrameObject */
546 res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *);
547
548 return PyLong_FromSsize_t(res);
549 }
550
551 PyDoc_STRVAR(sizeof__doc__,
552 "F.__sizeof__() -> size of F in memory, in bytes");
553
554 static PyObject *
frame_repr(PyFrameObject * f)555 frame_repr(PyFrameObject *f)
556 {
557 int lineno = PyFrame_GetLineNumber(f);
558 return PyUnicode_FromFormat(
559 "<frame at %p, file %R, line %d, code %S>",
560 f, f->f_code->co_filename, lineno, f->f_code->co_name);
561 }
562
563 static PyMethodDef frame_methods[] = {
564 {"clear", (PyCFunction)frame_clear, METH_NOARGS,
565 clear__doc__},
566 {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS,
567 sizeof__doc__},
568 {NULL, NULL} /* sentinel */
569 };
570
571 PyTypeObject PyFrame_Type = {
572 PyVarObject_HEAD_INIT(&PyType_Type, 0)
573 "frame",
574 sizeof(PyFrameObject),
575 sizeof(PyObject *),
576 (destructor)frame_dealloc, /* tp_dealloc */
577 0, /* tp_vectorcall_offset */
578 0, /* tp_getattr */
579 0, /* tp_setattr */
580 0, /* tp_as_async */
581 (reprfunc)frame_repr, /* tp_repr */
582 0, /* tp_as_number */
583 0, /* tp_as_sequence */
584 0, /* tp_as_mapping */
585 0, /* tp_hash */
586 0, /* tp_call */
587 0, /* tp_str */
588 PyObject_GenericGetAttr, /* tp_getattro */
589 PyObject_GenericSetAttr, /* tp_setattro */
590 0, /* tp_as_buffer */
591 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
592 0, /* tp_doc */
593 (traverseproc)frame_traverse, /* tp_traverse */
594 (inquiry)frame_tp_clear, /* tp_clear */
595 0, /* tp_richcompare */
596 0, /* tp_weaklistoffset */
597 0, /* tp_iter */
598 0, /* tp_iternext */
599 frame_methods, /* tp_methods */
600 frame_memberlist, /* tp_members */
601 frame_getsetlist, /* tp_getset */
602 0, /* tp_base */
603 0, /* tp_dict */
604 };
605
606 _Py_IDENTIFIER(__builtins__);
607
608 PyFrameObject* _Py_HOT_FUNCTION
_PyFrame_New_NoTrack(PyThreadState * tstate,PyCodeObject * code,PyObject * globals,PyObject * locals)609 _PyFrame_New_NoTrack(PyThreadState *tstate, PyCodeObject *code,
610 PyObject *globals, PyObject *locals)
611 {
612 PyFrameObject *back = tstate->frame;
613 PyFrameObject *f;
614 PyObject *builtins;
615 Py_ssize_t i;
616
617 #ifdef Py_DEBUG
618 if (code == NULL || globals == NULL || !PyDict_Check(globals) ||
619 (locals != NULL && !PyMapping_Check(locals))) {
620 PyErr_BadInternalCall();
621 return NULL;
622 }
623 #endif
624 if (back == NULL || back->f_globals != globals) {
625 builtins = _PyDict_GetItemIdWithError(globals, &PyId___builtins__);
626 if (builtins) {
627 if (PyModule_Check(builtins)) {
628 builtins = PyModule_GetDict(builtins);
629 assert(builtins != NULL);
630 }
631 }
632 if (builtins == NULL) {
633 if (PyErr_Occurred()) {
634 return NULL;
635 }
636 /* No builtins! Make up a minimal one
637 Give them 'None', at least. */
638 builtins = PyDict_New();
639 if (builtins == NULL ||
640 PyDict_SetItemString(
641 builtins, "None", Py_None) < 0)
642 return NULL;
643 }
644 else
645 Py_INCREF(builtins);
646
647 }
648 else {
649 /* If we share the globals, we share the builtins.
650 Save a lookup and a call. */
651 builtins = back->f_builtins;
652 assert(builtins != NULL);
653 Py_INCREF(builtins);
654 }
655 if (code->co_zombieframe != NULL) {
656 f = code->co_zombieframe;
657 code->co_zombieframe = NULL;
658 _Py_NewReference((PyObject *)f);
659 assert(f->f_code == code);
660 }
661 else {
662 Py_ssize_t extras, ncells, nfrees;
663 ncells = PyTuple_GET_SIZE(code->co_cellvars);
664 nfrees = PyTuple_GET_SIZE(code->co_freevars);
665 extras = code->co_stacksize + code->co_nlocals + ncells +
666 nfrees;
667 if (free_list == NULL) {
668 f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type,
669 extras);
670 if (f == NULL) {
671 Py_DECREF(builtins);
672 return NULL;
673 }
674 }
675 else {
676 assert(numfree > 0);
677 --numfree;
678 f = free_list;
679 free_list = free_list->f_back;
680 if (Py_SIZE(f) < extras) {
681 PyFrameObject *new_f = PyObject_GC_Resize(PyFrameObject, f, extras);
682 if (new_f == NULL) {
683 PyObject_GC_Del(f);
684 Py_DECREF(builtins);
685 return NULL;
686 }
687 f = new_f;
688 }
689 _Py_NewReference((PyObject *)f);
690 }
691
692 f->f_code = code;
693 extras = code->co_nlocals + ncells + nfrees;
694 f->f_valuestack = f->f_localsplus + extras;
695 for (i=0; i<extras; i++)
696 f->f_localsplus[i] = NULL;
697 f->f_locals = NULL;
698 f->f_trace = NULL;
699 }
700 f->f_stacktop = f->f_valuestack;
701 f->f_builtins = builtins;
702 Py_XINCREF(back);
703 f->f_back = back;
704 Py_INCREF(code);
705 Py_INCREF(globals);
706 f->f_globals = globals;
707 /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */
708 if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) ==
709 (CO_NEWLOCALS | CO_OPTIMIZED))
710 ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */
711 else if (code->co_flags & CO_NEWLOCALS) {
712 locals = PyDict_New();
713 if (locals == NULL) {
714 Py_DECREF(f);
715 return NULL;
716 }
717 f->f_locals = locals;
718 }
719 else {
720 if (locals == NULL)
721 locals = globals;
722 Py_INCREF(locals);
723 f->f_locals = locals;
724 }
725
726 f->f_lasti = -1;
727 f->f_lineno = code->co_firstlineno;
728 f->f_iblock = 0;
729 f->f_executing = 0;
730 f->f_gen = NULL;
731 f->f_trace_opcodes = 0;
732 f->f_trace_lines = 1;
733
734 return f;
735 }
736
737 PyFrameObject*
PyFrame_New(PyThreadState * tstate,PyCodeObject * code,PyObject * globals,PyObject * locals)738 PyFrame_New(PyThreadState *tstate, PyCodeObject *code,
739 PyObject *globals, PyObject *locals)
740 {
741 PyFrameObject *f = _PyFrame_New_NoTrack(tstate, code, globals, locals);
742 if (f)
743 _PyObject_GC_TRACK(f);
744 return f;
745 }
746
747
748 /* Block management */
749
750 void
PyFrame_BlockSetup(PyFrameObject * f,int type,int handler,int level)751 PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level)
752 {
753 PyTryBlock *b;
754 if (f->f_iblock >= CO_MAXBLOCKS)
755 Py_FatalError("XXX block stack overflow");
756 b = &f->f_blockstack[f->f_iblock++];
757 b->b_type = type;
758 b->b_level = level;
759 b->b_handler = handler;
760 }
761
762 PyTryBlock *
PyFrame_BlockPop(PyFrameObject * f)763 PyFrame_BlockPop(PyFrameObject *f)
764 {
765 PyTryBlock *b;
766 if (f->f_iblock <= 0)
767 Py_FatalError("XXX block stack underflow");
768 b = &f->f_blockstack[--f->f_iblock];
769 return b;
770 }
771
772 /* Convert between "fast" version of locals and dictionary version.
773
774 map and values are input arguments. map is a tuple of strings.
775 values is an array of PyObject*. At index i, map[i] is the name of
776 the variable with value values[i]. The function copies the first
777 nmap variable from map/values into dict. If values[i] is NULL,
778 the variable is deleted from dict.
779
780 If deref is true, then the values being copied are cell variables
781 and the value is extracted from the cell variable before being put
782 in dict.
783 */
784
785 static int
map_to_dict(PyObject * map,Py_ssize_t nmap,PyObject * dict,PyObject ** values,int deref)786 map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
787 int deref)
788 {
789 Py_ssize_t j;
790 assert(PyTuple_Check(map));
791 assert(PyDict_Check(dict));
792 assert(PyTuple_Size(map) >= nmap);
793 for (j=0; j < nmap; j++) {
794 PyObject *key = PyTuple_GET_ITEM(map, j);
795 PyObject *value = values[j];
796 assert(PyUnicode_Check(key));
797 if (deref && value != NULL) {
798 assert(PyCell_Check(value));
799 value = PyCell_GET(value);
800 }
801 if (value == NULL) {
802 if (PyObject_DelItem(dict, key) != 0) {
803 if (PyErr_ExceptionMatches(PyExc_KeyError))
804 PyErr_Clear();
805 else
806 return -1;
807 }
808 }
809 else {
810 if (PyObject_SetItem(dict, key, value) != 0)
811 return -1;
812 }
813 }
814 return 0;
815 }
816
817 /* Copy values from the "locals" dict into the fast locals.
818
819 dict is an input argument containing string keys representing
820 variables names and arbitrary PyObject* as values.
821
822 map and values are input arguments. map is a tuple of strings.
823 values is an array of PyObject*. At index i, map[i] is the name of
824 the variable with value values[i]. The function copies the first
825 nmap variable from map/values into dict. If values[i] is NULL,
826 the variable is deleted from dict.
827
828 If deref is true, then the values being copied are cell variables
829 and the value is extracted from the cell variable before being put
830 in dict. If clear is true, then variables in map but not in dict
831 are set to NULL in map; if clear is false, variables missing in
832 dict are ignored.
833
834 Exceptions raised while modifying the dict are silently ignored,
835 because there is no good way to report them.
836 */
837
838 static void
dict_to_map(PyObject * map,Py_ssize_t nmap,PyObject * dict,PyObject ** values,int deref,int clear)839 dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
840 int deref, int clear)
841 {
842 Py_ssize_t j;
843 assert(PyTuple_Check(map));
844 assert(PyDict_Check(dict));
845 assert(PyTuple_Size(map) >= nmap);
846 for (j=0; j < nmap; j++) {
847 PyObject *key = PyTuple_GET_ITEM(map, j);
848 PyObject *value = PyObject_GetItem(dict, key);
849 assert(PyUnicode_Check(key));
850 /* We only care about NULLs if clear is true. */
851 if (value == NULL) {
852 PyErr_Clear();
853 if (!clear)
854 continue;
855 }
856 if (deref) {
857 assert(PyCell_Check(values[j]));
858 if (PyCell_GET(values[j]) != value) {
859 if (PyCell_Set(values[j], value) < 0)
860 PyErr_Clear();
861 }
862 } else if (values[j] != value) {
863 Py_XINCREF(value);
864 Py_XSETREF(values[j], value);
865 }
866 Py_XDECREF(value);
867 }
868 }
869
870 int
PyFrame_FastToLocalsWithError(PyFrameObject * f)871 PyFrame_FastToLocalsWithError(PyFrameObject *f)
872 {
873 /* Merge fast locals into f->f_locals */
874 PyObject *locals, *map;
875 PyObject **fast;
876 PyCodeObject *co;
877 Py_ssize_t j;
878 Py_ssize_t ncells, nfreevars;
879
880 if (f == NULL) {
881 PyErr_BadInternalCall();
882 return -1;
883 }
884 locals = f->f_locals;
885 if (locals == NULL) {
886 locals = f->f_locals = PyDict_New();
887 if (locals == NULL)
888 return -1;
889 }
890 co = f->f_code;
891 map = co->co_varnames;
892 if (!PyTuple_Check(map)) {
893 PyErr_Format(PyExc_SystemError,
894 "co_varnames must be a tuple, not %s",
895 Py_TYPE(map)->tp_name);
896 return -1;
897 }
898 fast = f->f_localsplus;
899 j = PyTuple_GET_SIZE(map);
900 if (j > co->co_nlocals)
901 j = co->co_nlocals;
902 if (co->co_nlocals) {
903 if (map_to_dict(map, j, locals, fast, 0) < 0)
904 return -1;
905 }
906 ncells = PyTuple_GET_SIZE(co->co_cellvars);
907 nfreevars = PyTuple_GET_SIZE(co->co_freevars);
908 if (ncells || nfreevars) {
909 if (map_to_dict(co->co_cellvars, ncells,
910 locals, fast + co->co_nlocals, 1))
911 return -1;
912
913 /* If the namespace is unoptimized, then one of the
914 following cases applies:
915 1. It does not contain free variables, because it
916 uses import * or is a top-level namespace.
917 2. It is a class namespace.
918 We don't want to accidentally copy free variables
919 into the locals dict used by the class.
920 */
921 if (co->co_flags & CO_OPTIMIZED) {
922 if (map_to_dict(co->co_freevars, nfreevars,
923 locals, fast + co->co_nlocals + ncells, 1) < 0)
924 return -1;
925 }
926 }
927 return 0;
928 }
929
930 void
PyFrame_FastToLocals(PyFrameObject * f)931 PyFrame_FastToLocals(PyFrameObject *f)
932 {
933 int res;
934
935 assert(!PyErr_Occurred());
936
937 res = PyFrame_FastToLocalsWithError(f);
938 if (res < 0)
939 PyErr_Clear();
940 }
941
942 void
PyFrame_LocalsToFast(PyFrameObject * f,int clear)943 PyFrame_LocalsToFast(PyFrameObject *f, int clear)
944 {
945 /* Merge f->f_locals into fast locals */
946 PyObject *locals, *map;
947 PyObject **fast;
948 PyObject *error_type, *error_value, *error_traceback;
949 PyCodeObject *co;
950 Py_ssize_t j;
951 Py_ssize_t ncells, nfreevars;
952 if (f == NULL)
953 return;
954 locals = f->f_locals;
955 co = f->f_code;
956 map = co->co_varnames;
957 if (locals == NULL)
958 return;
959 if (!PyTuple_Check(map))
960 return;
961 PyErr_Fetch(&error_type, &error_value, &error_traceback);
962 fast = f->f_localsplus;
963 j = PyTuple_GET_SIZE(map);
964 if (j > co->co_nlocals)
965 j = co->co_nlocals;
966 if (co->co_nlocals)
967 dict_to_map(co->co_varnames, j, locals, fast, 0, clear);
968 ncells = PyTuple_GET_SIZE(co->co_cellvars);
969 nfreevars = PyTuple_GET_SIZE(co->co_freevars);
970 if (ncells || nfreevars) {
971 dict_to_map(co->co_cellvars, ncells,
972 locals, fast + co->co_nlocals, 1, clear);
973 /* Same test as in PyFrame_FastToLocals() above. */
974 if (co->co_flags & CO_OPTIMIZED) {
975 dict_to_map(co->co_freevars, nfreevars,
976 locals, fast + co->co_nlocals + ncells, 1,
977 clear);
978 }
979 }
980 PyErr_Restore(error_type, error_value, error_traceback);
981 }
982
983 /* Clear out the free list */
984 int
PyFrame_ClearFreeList(void)985 PyFrame_ClearFreeList(void)
986 {
987 int freelist_size = numfree;
988
989 while (free_list != NULL) {
990 PyFrameObject *f = free_list;
991 free_list = free_list->f_back;
992 PyObject_GC_Del(f);
993 --numfree;
994 }
995 assert(numfree == 0);
996 return freelist_size;
997 }
998
999 void
PyFrame_Fini(void)1000 PyFrame_Fini(void)
1001 {
1002 (void)PyFrame_ClearFreeList();
1003 }
1004
1005 /* Print summary info about the state of the optimized allocator */
1006 void
_PyFrame_DebugMallocStats(FILE * out)1007 _PyFrame_DebugMallocStats(FILE *out)
1008 {
1009 _PyDebugAllocatorStats(out,
1010 "free PyFrameObject",
1011 numfree, sizeof(PyFrameObject));
1012 }
1013
1014