1 /* Frame object implementation */
2
3 #include "Python.h"
4 #include "internal/pystate.h"
5
6 #include "code.h"
7 #include "frameobject.h"
8 #include "opcode.h"
9 #include "structmember.h"
10
11 #define OFF(x) offsetof(PyFrameObject, x)
12
13 static PyMemberDef frame_memberlist[] = {
14 {"f_back", T_OBJECT, OFF(f_back), READONLY},
15 {"f_code", T_OBJECT, OFF(f_code), READONLY},
16 {"f_builtins", T_OBJECT, OFF(f_builtins), READONLY},
17 {"f_globals", T_OBJECT, OFF(f_globals), READONLY},
18 {"f_lasti", T_INT, OFF(f_lasti), READONLY},
19 {"f_trace_lines", T_BOOL, OFF(f_trace_lines), 0},
20 {"f_trace_opcodes", T_BOOL, OFF(f_trace_opcodes), 0},
21 {NULL} /* Sentinel */
22 };
23
24 static PyObject *
frame_getlocals(PyFrameObject * f,void * closure)25 frame_getlocals(PyFrameObject *f, void *closure)
26 {
27 if (PyFrame_FastToLocalsWithError(f) < 0)
28 return NULL;
29 Py_INCREF(f->f_locals);
30 return f->f_locals;
31 }
32
33 int
PyFrame_GetLineNumber(PyFrameObject * f)34 PyFrame_GetLineNumber(PyFrameObject *f)
35 {
36 if (f->f_trace)
37 return f->f_lineno;
38 else
39 return PyCode_Addr2Line(f->f_code, f->f_lasti);
40 }
41
42 static PyObject *
frame_getlineno(PyFrameObject * f,void * closure)43 frame_getlineno(PyFrameObject *f, void *closure)
44 {
45 return PyLong_FromLong(PyFrame_GetLineNumber(f));
46 }
47
48 /* Setter for f_lineno - you can set f_lineno from within a trace function in
49 * order to jump to a given line of code, subject to some restrictions. Most
50 * lines are OK to jump to because they don't make any assumptions about the
51 * state of the stack (obvious because you could remove the line and the code
52 * would still work without any stack errors), but there are some constructs
53 * that limit jumping:
54 *
55 * o Lines with an 'except' statement on them can't be jumped to, because
56 * they expect an exception to be on the top of the stack.
57 * o Lines that live in a 'finally' block can't be jumped from or to, since
58 * the END_FINALLY expects to clean up the stack after the 'try' block.
59 * o 'try'/'for'/'while' blocks can't be jumped into because the blockstack
60 * needs to be set up before their code runs, and for 'for' loops the
61 * iterator needs to be on the stack.
62 * o Jumps cannot be made from within a trace function invoked with a
63 * 'return' or 'exception' event since the eval loop has been exited at
64 * that time.
65 */
66 static int
frame_setlineno(PyFrameObject * f,PyObject * p_new_lineno,void * Py_UNUSED (ignored))67 frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno, void *Py_UNUSED(ignored))
68 {
69 int new_lineno = 0; /* The new value of f_lineno */
70 long l_new_lineno;
71 int overflow;
72 int new_lasti = 0; /* The new value of f_lasti */
73 int new_iblock = 0; /* The new value of f_iblock */
74 unsigned char *code = NULL; /* The bytecode for the frame... */
75 Py_ssize_t code_len = 0; /* ...and its length */
76 unsigned char *lnotab = NULL; /* Iterating over co_lnotab */
77 Py_ssize_t lnotab_len = 0; /* (ditto) */
78 int offset = 0; /* (ditto) */
79 int line = 0; /* (ditto) */
80 int addr = 0; /* (ditto) */
81 int min_addr = 0; /* Scanning the SETUPs and POPs */
82 int max_addr = 0; /* (ditto) */
83 int delta_iblock = 0; /* (ditto) */
84 int min_delta_iblock = 0; /* (ditto) */
85 int min_iblock = 0; /* (ditto) */
86 int f_lasti_setup_addr = 0; /* Policing no-jump-into-finally */
87 int new_lasti_setup_addr = 0; /* (ditto) */
88 int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */
89 int in_finally[CO_MAXBLOCKS]; /* (ditto) */
90 int blockstack_top = 0; /* (ditto) */
91 unsigned char setup_op = 0; /* (ditto) */
92
93 if (p_new_lineno == NULL) {
94 PyErr_SetString(PyExc_AttributeError, "cannot delete attribute");
95 return -1;
96 }
97 /* f_lineno must be an integer. */
98 if (!PyLong_CheckExact(p_new_lineno)) {
99 PyErr_SetString(PyExc_ValueError,
100 "lineno must be an integer");
101 return -1;
102 }
103
104 /* Upon the 'call' trace event of a new frame, f->f_lasti is -1 and
105 * f->f_trace is NULL, check first on the first condition.
106 * Forbidding jumps from the 'call' event of a new frame is a side effect
107 * of allowing to set f_lineno only from trace functions. */
108 if (f->f_lasti == -1) {
109 PyErr_Format(PyExc_ValueError,
110 "can't jump from the 'call' trace event of a new frame");
111 return -1;
112 }
113
114 /* You can only do this from within a trace function, not via
115 * _getframe or similar hackery. */
116 if (!f->f_trace) {
117 PyErr_Format(PyExc_ValueError,
118 "f_lineno can only be set by a trace function");
119 return -1;
120 }
121
122 /* Forbid jumps upon a 'return' trace event (except after executing a
123 * YIELD_VALUE or YIELD_FROM opcode, f_stacktop is not NULL in that case)
124 * and upon an 'exception' trace event.
125 * Jumps from 'call' trace events have already been forbidden above for new
126 * frames, so this check does not change anything for 'call' events. */
127 if (f->f_stacktop == NULL) {
128 PyErr_SetString(PyExc_ValueError,
129 "can only jump from a 'line' trace event");
130 return -1;
131 }
132
133 /* Fail if the line comes before the start of the code block. */
134 l_new_lineno = PyLong_AsLongAndOverflow(p_new_lineno, &overflow);
135 if (overflow
136 #if SIZEOF_LONG > SIZEOF_INT
137 || l_new_lineno > INT_MAX
138 || l_new_lineno < INT_MIN
139 #endif
140 ) {
141 PyErr_SetString(PyExc_ValueError,
142 "lineno out of range");
143 return -1;
144 }
145 new_lineno = (int)l_new_lineno;
146
147 if (new_lineno < f->f_code->co_firstlineno) {
148 PyErr_Format(PyExc_ValueError,
149 "line %d comes before the current code block",
150 new_lineno);
151 return -1;
152 }
153 else if (new_lineno == f->f_code->co_firstlineno) {
154 new_lasti = 0;
155 new_lineno = f->f_code->co_firstlineno;
156 }
157 else {
158 /* Find the bytecode offset for the start of the given
159 * line, or the first code-owning line after it. */
160 char *tmp;
161 PyBytes_AsStringAndSize(f->f_code->co_lnotab,
162 &tmp, &lnotab_len);
163 lnotab = (unsigned char *) tmp;
164 addr = 0;
165 line = f->f_code->co_firstlineno;
166 new_lasti = -1;
167 for (offset = 0; offset < lnotab_len; offset += 2) {
168 addr += lnotab[offset];
169 line += (signed char)lnotab[offset+1];
170 if (line >= new_lineno) {
171 new_lasti = addr;
172 new_lineno = line;
173 break;
174 }
175 }
176 }
177
178 /* If we didn't reach the requested line, return an error. */
179 if (new_lasti == -1) {
180 PyErr_Format(PyExc_ValueError,
181 "line %d comes after the current code block",
182 new_lineno);
183 return -1;
184 }
185
186 /* We're now ready to look at the bytecode. */
187 PyBytes_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len);
188
189 /* The trace function is called with a 'return' trace event after the
190 * execution of a yield statement. */
191 assert(f->f_lasti != -1);
192 if (code[f->f_lasti] == YIELD_VALUE || code[f->f_lasti] == YIELD_FROM) {
193 PyErr_SetString(PyExc_ValueError,
194 "can't jump from a yield statement");
195 return -1;
196 }
197
198 min_addr = Py_MIN(new_lasti, f->f_lasti);
199 max_addr = Py_MAX(new_lasti, f->f_lasti);
200
201 /* You can't jump onto a line with an 'except' statement on it -
202 * they expect to have an exception on the top of the stack, which
203 * won't be true if you jump to them. They always start with code
204 * that either pops the exception using POP_TOP (plain 'except:'
205 * lines do this) or duplicates the exception on the stack using
206 * DUP_TOP (if there's an exception type specified). See compile.c,
207 * 'com_try_except' for the full details. There aren't any other
208 * cases (AFAIK) where a line's code can start with DUP_TOP or
209 * POP_TOP, but if any ever appear, they'll be subject to the same
210 * restriction (but with a different error message). */
211 if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) {
212 PyErr_SetString(PyExc_ValueError,
213 "can't jump to 'except' line as there's no exception");
214 return -1;
215 }
216
217 /* You can't jump into or out of a 'finally' block because the 'try'
218 * block leaves something on the stack for the END_FINALLY to clean
219 * up. So we walk the bytecode, maintaining a simulated blockstack.
220 * When we reach the old or new address and it's in a 'finally' block
221 * we note the address of the corresponding SETUP_FINALLY. The jump
222 * is only legal if neither address is in a 'finally' block or
223 * they're both in the same one. 'blockstack' is a stack of the
224 * bytecode addresses of the SETUP_X opcodes, and 'in_finally' tracks
225 * whether we're in a 'finally' block at each blockstack level. */
226 f_lasti_setup_addr = -1;
227 new_lasti_setup_addr = -1;
228 memset(blockstack, '\0', sizeof(blockstack));
229 memset(in_finally, '\0', sizeof(in_finally));
230 blockstack_top = 0;
231 for (addr = 0; addr < code_len; addr += sizeof(_Py_CODEUNIT)) {
232 unsigned char op = code[addr];
233 switch (op) {
234 case SETUP_LOOP:
235 case SETUP_EXCEPT:
236 case SETUP_FINALLY:
237 case SETUP_WITH:
238 case SETUP_ASYNC_WITH:
239 blockstack[blockstack_top++] = addr;
240 in_finally[blockstack_top-1] = 0;
241 break;
242
243 case POP_BLOCK:
244 assert(blockstack_top > 0);
245 setup_op = code[blockstack[blockstack_top-1]];
246 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH
247 || setup_op == SETUP_ASYNC_WITH) {
248 in_finally[blockstack_top-1] = 1;
249 }
250 else {
251 blockstack_top--;
252 }
253 break;
254
255 case END_FINALLY:
256 /* Ignore END_FINALLYs for SETUP_EXCEPTs - they exist
257 * in the bytecode but don't correspond to an actual
258 * 'finally' block. (If blockstack_top is 0, we must
259 * be seeing such an END_FINALLY.) */
260 if (blockstack_top > 0) {
261 setup_op = code[blockstack[blockstack_top-1]];
262 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH
263 || setup_op == SETUP_ASYNC_WITH) {
264 blockstack_top--;
265 }
266 }
267 break;
268 }
269
270 /* For the addresses we're interested in, see whether they're
271 * within a 'finally' block and if so, remember the address
272 * of the SETUP_FINALLY. */
273 if (addr == new_lasti || addr == f->f_lasti) {
274 int i = 0;
275 int setup_addr = -1;
276 for (i = blockstack_top-1; i >= 0; i--) {
277 if (in_finally[i]) {
278 setup_addr = blockstack[i];
279 break;
280 }
281 }
282
283 if (setup_addr != -1) {
284 if (addr == new_lasti) {
285 new_lasti_setup_addr = setup_addr;
286 }
287
288 if (addr == f->f_lasti) {
289 f_lasti_setup_addr = setup_addr;
290 }
291 }
292 }
293 }
294
295 /* Verify that the blockstack tracking code didn't get lost. */
296 assert(blockstack_top == 0);
297
298 /* After all that, are we jumping into / out of a 'finally' block? */
299 if (new_lasti_setup_addr != f_lasti_setup_addr) {
300 PyErr_SetString(PyExc_ValueError,
301 "can't jump into or out of a 'finally' block");
302 return -1;
303 }
304
305
306 /* Police block-jumping (you can't jump into the middle of a block)
307 * and ensure that the blockstack finishes up in a sensible state (by
308 * popping any blocks we're jumping out of). We look at all the
309 * blockstack operations between the current position and the new
310 * one, and keep track of how many blocks we drop out of on the way.
311 * By also keeping track of the lowest blockstack position we see, we
312 * can tell whether the jump goes into any blocks without coming out
313 * again - in that case we raise an exception below. */
314 delta_iblock = 0;
315 for (addr = min_addr; addr < max_addr; addr += sizeof(_Py_CODEUNIT)) {
316 unsigned char op = code[addr];
317 switch (op) {
318 case SETUP_LOOP:
319 case SETUP_EXCEPT:
320 case SETUP_FINALLY:
321 case SETUP_WITH:
322 case SETUP_ASYNC_WITH:
323 delta_iblock++;
324 break;
325
326 case POP_BLOCK:
327 delta_iblock--;
328 break;
329 }
330
331 min_delta_iblock = Py_MIN(min_delta_iblock, delta_iblock);
332 }
333
334 /* Derive the absolute iblock values from the deltas. */
335 min_iblock = f->f_iblock + min_delta_iblock;
336 if (new_lasti > f->f_lasti) {
337 /* Forwards jump. */
338 new_iblock = f->f_iblock + delta_iblock;
339 }
340 else {
341 /* Backwards jump. */
342 new_iblock = f->f_iblock - delta_iblock;
343 }
344
345 /* Are we jumping into a block? */
346 if (new_iblock > min_iblock) {
347 PyErr_SetString(PyExc_ValueError,
348 "can't jump into the middle of a block");
349 return -1;
350 }
351
352 /* Pop any blocks that we're jumping out of. */
353 while (f->f_iblock > new_iblock) {
354 PyTryBlock *b = &f->f_blockstack[--f->f_iblock];
355 while ((f->f_stacktop - f->f_valuestack) > b->b_level) {
356 PyObject *v = (*--f->f_stacktop);
357 Py_DECREF(v);
358 }
359 if (b->b_type == SETUP_FINALLY &&
360 code[b->b_handler] == WITH_CLEANUP_START)
361 {
362 /* Pop the exit function. */
363 PyObject *v = (*--f->f_stacktop);
364 Py_DECREF(v);
365 }
366 }
367
368 /* Finally set the new f_lineno and f_lasti and return OK. */
369 f->f_lineno = new_lineno;
370 f->f_lasti = new_lasti;
371 return 0;
372 }
373
374 static PyObject *
frame_gettrace(PyFrameObject * f,void * closure)375 frame_gettrace(PyFrameObject *f, void *closure)
376 {
377 PyObject* trace = f->f_trace;
378
379 if (trace == NULL)
380 trace = Py_None;
381
382 Py_INCREF(trace);
383
384 return trace;
385 }
386
387 static int
frame_settrace(PyFrameObject * f,PyObject * v,void * closure)388 frame_settrace(PyFrameObject *f, PyObject* v, void *closure)
389 {
390 /* We rely on f_lineno being accurate when f_trace is set. */
391 f->f_lineno = PyFrame_GetLineNumber(f);
392
393 if (v == Py_None)
394 v = NULL;
395 Py_XINCREF(v);
396 Py_XSETREF(f->f_trace, v);
397
398 return 0;
399 }
400
401
402 static PyGetSetDef frame_getsetlist[] = {
403 {"f_locals", (getter)frame_getlocals, NULL, NULL},
404 {"f_lineno", (getter)frame_getlineno,
405 (setter)frame_setlineno, NULL},
406 {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL},
407 {0}
408 };
409
410 /* Stack frames are allocated and deallocated at a considerable rate.
411 In an attempt to improve the speed of function calls, we:
412
413 1. Hold a single "zombie" frame on each code object. This retains
414 the allocated and initialised frame object from an invocation of
415 the code object. The zombie is reanimated the next time we need a
416 frame object for that code object. Doing this saves the malloc/
417 realloc required when using a free_list frame that isn't the
418 correct size. It also saves some field initialisation.
419
420 In zombie mode, no field of PyFrameObject holds a reference, but
421 the following fields are still valid:
422
423 * ob_type, ob_size, f_code, f_valuestack;
424
425 * f_locals, f_trace are NULL;
426
427 * f_localsplus does not require re-allocation and
428 the local variables in f_localsplus are NULL.
429
430 2. We also maintain a separate free list of stack frames (just like
431 floats are allocated in a special way -- see floatobject.c). When
432 a stack frame is on the free list, only the following members have
433 a meaning:
434 ob_type == &Frametype
435 f_back next item on free list, or NULL
436 f_stacksize size of value stack
437 ob_size size of localsplus
438 Note that the value and block stacks are preserved -- this can save
439 another malloc() call or two (and two free() calls as well!).
440 Also note that, unlike for integers, each frame object is a
441 malloc'ed object in its own right -- it is only the actual calls to
442 malloc() that we are trying to save here, not the administration.
443 After all, while a typical program may make millions of calls, a
444 call depth of more than 20 or 30 is probably already exceptional
445 unless the program contains run-away recursion. I hope.
446
447 Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on
448 free_list. Else programs creating lots of cyclic trash involving
449 frames could provoke free_list into growing without bound.
450 */
451
452 static PyFrameObject *free_list = NULL;
453 static int numfree = 0; /* number of frames currently in free_list */
454 /* max value for numfree */
455 #define PyFrame_MAXFREELIST 200
456
457 static void _Py_HOT_FUNCTION
frame_dealloc(PyFrameObject * f)458 frame_dealloc(PyFrameObject *f)
459 {
460 PyObject **p, **valuestack;
461 PyCodeObject *co;
462
463 if (_PyObject_GC_IS_TRACKED(f))
464 _PyObject_GC_UNTRACK(f);
465
466 Py_TRASHCAN_SAFE_BEGIN(f)
467 /* Kill all local variables */
468 valuestack = f->f_valuestack;
469 for (p = f->f_localsplus; p < valuestack; p++)
470 Py_CLEAR(*p);
471
472 /* Free stack */
473 if (f->f_stacktop != NULL) {
474 for (p = valuestack; p < f->f_stacktop; p++)
475 Py_XDECREF(*p);
476 }
477
478 Py_XDECREF(f->f_back);
479 Py_DECREF(f->f_builtins);
480 Py_DECREF(f->f_globals);
481 Py_CLEAR(f->f_locals);
482 Py_CLEAR(f->f_trace);
483
484 co = f->f_code;
485 if (co->co_zombieframe == NULL)
486 co->co_zombieframe = f;
487 else if (numfree < PyFrame_MAXFREELIST) {
488 ++numfree;
489 f->f_back = free_list;
490 free_list = f;
491 }
492 else
493 PyObject_GC_Del(f);
494
495 Py_DECREF(co);
496 Py_TRASHCAN_SAFE_END(f)
497 }
498
499 static int
frame_traverse(PyFrameObject * f,visitproc visit,void * arg)500 frame_traverse(PyFrameObject *f, visitproc visit, void *arg)
501 {
502 PyObject **fastlocals, **p;
503 Py_ssize_t i, slots;
504
505 Py_VISIT(f->f_back);
506 Py_VISIT(f->f_code);
507 Py_VISIT(f->f_builtins);
508 Py_VISIT(f->f_globals);
509 Py_VISIT(f->f_locals);
510 Py_VISIT(f->f_trace);
511
512 /* locals */
513 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars);
514 fastlocals = f->f_localsplus;
515 for (i = slots; --i >= 0; ++fastlocals)
516 Py_VISIT(*fastlocals);
517
518 /* stack */
519 if (f->f_stacktop != NULL) {
520 for (p = f->f_valuestack; p < f->f_stacktop; p++)
521 Py_VISIT(*p);
522 }
523 return 0;
524 }
525
526 static int
frame_tp_clear(PyFrameObject * f)527 frame_tp_clear(PyFrameObject *f)
528 {
529 PyObject **fastlocals, **p, **oldtop;
530 Py_ssize_t i, slots;
531
532 /* Before anything else, make sure that this frame is clearly marked
533 * as being defunct! Else, e.g., a generator reachable from this
534 * frame may also point to this frame, believe itself to still be
535 * active, and try cleaning up this frame again.
536 */
537 oldtop = f->f_stacktop;
538 f->f_stacktop = NULL;
539 f->f_executing = 0;
540
541 Py_CLEAR(f->f_trace);
542
543 /* locals */
544 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars);
545 fastlocals = f->f_localsplus;
546 for (i = slots; --i >= 0; ++fastlocals)
547 Py_CLEAR(*fastlocals);
548
549 /* stack */
550 if (oldtop != NULL) {
551 for (p = f->f_valuestack; p < oldtop; p++)
552 Py_CLEAR(*p);
553 }
554 return 0;
555 }
556
557 static PyObject *
frame_clear(PyFrameObject * f)558 frame_clear(PyFrameObject *f)
559 {
560 if (f->f_executing) {
561 PyErr_SetString(PyExc_RuntimeError,
562 "cannot clear an executing frame");
563 return NULL;
564 }
565 if (f->f_gen) {
566 _PyGen_Finalize(f->f_gen);
567 assert(f->f_gen == NULL);
568 }
569 (void)frame_tp_clear(f);
570 Py_RETURN_NONE;
571 }
572
573 PyDoc_STRVAR(clear__doc__,
574 "F.clear(): clear most references held by the frame");
575
576 static PyObject *
frame_sizeof(PyFrameObject * f)577 frame_sizeof(PyFrameObject *f)
578 {
579 Py_ssize_t res, extras, ncells, nfrees;
580
581 ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars);
582 nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars);
583 extras = f->f_code->co_stacksize + f->f_code->co_nlocals +
584 ncells + nfrees;
585 /* subtract one as it is already included in PyFrameObject */
586 res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *);
587
588 return PyLong_FromSsize_t(res);
589 }
590
591 PyDoc_STRVAR(sizeof__doc__,
592 "F.__sizeof__() -> size of F in memory, in bytes");
593
594 static PyObject *
frame_repr(PyFrameObject * f)595 frame_repr(PyFrameObject *f)
596 {
597 int lineno = PyFrame_GetLineNumber(f);
598 return PyUnicode_FromFormat(
599 "<frame at %p, file %R, line %d, code %S>",
600 f, f->f_code->co_filename, lineno, f->f_code->co_name);
601 }
602
603 static PyMethodDef frame_methods[] = {
604 {"clear", (PyCFunction)frame_clear, METH_NOARGS,
605 clear__doc__},
606 {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS,
607 sizeof__doc__},
608 {NULL, NULL} /* sentinel */
609 };
610
611 PyTypeObject PyFrame_Type = {
612 PyVarObject_HEAD_INIT(&PyType_Type, 0)
613 "frame",
614 sizeof(PyFrameObject),
615 sizeof(PyObject *),
616 (destructor)frame_dealloc, /* tp_dealloc */
617 0, /* tp_print */
618 0, /* tp_getattr */
619 0, /* tp_setattr */
620 0, /* tp_reserved */
621 (reprfunc)frame_repr, /* tp_repr */
622 0, /* tp_as_number */
623 0, /* tp_as_sequence */
624 0, /* tp_as_mapping */
625 0, /* tp_hash */
626 0, /* tp_call */
627 0, /* tp_str */
628 PyObject_GenericGetAttr, /* tp_getattro */
629 PyObject_GenericSetAttr, /* tp_setattro */
630 0, /* tp_as_buffer */
631 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
632 0, /* tp_doc */
633 (traverseproc)frame_traverse, /* tp_traverse */
634 (inquiry)frame_tp_clear, /* tp_clear */
635 0, /* tp_richcompare */
636 0, /* tp_weaklistoffset */
637 0, /* tp_iter */
638 0, /* tp_iternext */
639 frame_methods, /* tp_methods */
640 frame_memberlist, /* tp_members */
641 frame_getsetlist, /* tp_getset */
642 0, /* tp_base */
643 0, /* tp_dict */
644 };
645
646 _Py_IDENTIFIER(__builtins__);
647
_PyFrame_Init()648 int _PyFrame_Init()
649 {
650 /* Before, PyId___builtins__ was a string created explicitly in
651 this function. Now there is nothing to initialize anymore, but
652 the function is kept for backward compatibility. */
653 return 1;
654 }
655
656 PyFrameObject* _Py_HOT_FUNCTION
_PyFrame_New_NoTrack(PyThreadState * tstate,PyCodeObject * code,PyObject * globals,PyObject * locals)657 _PyFrame_New_NoTrack(PyThreadState *tstate, PyCodeObject *code,
658 PyObject *globals, PyObject *locals)
659 {
660 PyFrameObject *back = tstate->frame;
661 PyFrameObject *f;
662 PyObject *builtins;
663 Py_ssize_t i;
664
665 #ifdef Py_DEBUG
666 if (code == NULL || globals == NULL || !PyDict_Check(globals) ||
667 (locals != NULL && !PyMapping_Check(locals))) {
668 PyErr_BadInternalCall();
669 return NULL;
670 }
671 #endif
672 if (back == NULL || back->f_globals != globals) {
673 builtins = _PyDict_GetItemId(globals, &PyId___builtins__);
674 if (builtins) {
675 if (PyModule_Check(builtins)) {
676 builtins = PyModule_GetDict(builtins);
677 assert(builtins != NULL);
678 }
679 }
680 if (builtins == NULL) {
681 /* No builtins! Make up a minimal one
682 Give them 'None', at least. */
683 builtins = PyDict_New();
684 if (builtins == NULL ||
685 PyDict_SetItemString(
686 builtins, "None", Py_None) < 0)
687 return NULL;
688 }
689 else
690 Py_INCREF(builtins);
691
692 }
693 else {
694 /* If we share the globals, we share the builtins.
695 Save a lookup and a call. */
696 builtins = back->f_builtins;
697 assert(builtins != NULL);
698 Py_INCREF(builtins);
699 }
700 if (code->co_zombieframe != NULL) {
701 f = code->co_zombieframe;
702 code->co_zombieframe = NULL;
703 _Py_NewReference((PyObject *)f);
704 assert(f->f_code == code);
705 }
706 else {
707 Py_ssize_t extras, ncells, nfrees;
708 ncells = PyTuple_GET_SIZE(code->co_cellvars);
709 nfrees = PyTuple_GET_SIZE(code->co_freevars);
710 extras = code->co_stacksize + code->co_nlocals + ncells +
711 nfrees;
712 if (free_list == NULL) {
713 f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type,
714 extras);
715 if (f == NULL) {
716 Py_DECREF(builtins);
717 return NULL;
718 }
719 }
720 else {
721 assert(numfree > 0);
722 --numfree;
723 f = free_list;
724 free_list = free_list->f_back;
725 if (Py_SIZE(f) < extras) {
726 PyFrameObject *new_f = PyObject_GC_Resize(PyFrameObject, f, extras);
727 if (new_f == NULL) {
728 PyObject_GC_Del(f);
729 Py_DECREF(builtins);
730 return NULL;
731 }
732 f = new_f;
733 }
734 _Py_NewReference((PyObject *)f);
735 }
736
737 f->f_code = code;
738 extras = code->co_nlocals + ncells + nfrees;
739 f->f_valuestack = f->f_localsplus + extras;
740 for (i=0; i<extras; i++)
741 f->f_localsplus[i] = NULL;
742 f->f_locals = NULL;
743 f->f_trace = NULL;
744 }
745 f->f_stacktop = f->f_valuestack;
746 f->f_builtins = builtins;
747 Py_XINCREF(back);
748 f->f_back = back;
749 Py_INCREF(code);
750 Py_INCREF(globals);
751 f->f_globals = globals;
752 /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */
753 if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) ==
754 (CO_NEWLOCALS | CO_OPTIMIZED))
755 ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */
756 else if (code->co_flags & CO_NEWLOCALS) {
757 locals = PyDict_New();
758 if (locals == NULL) {
759 Py_DECREF(f);
760 return NULL;
761 }
762 f->f_locals = locals;
763 }
764 else {
765 if (locals == NULL)
766 locals = globals;
767 Py_INCREF(locals);
768 f->f_locals = locals;
769 }
770
771 f->f_lasti = -1;
772 f->f_lineno = code->co_firstlineno;
773 f->f_iblock = 0;
774 f->f_executing = 0;
775 f->f_gen = NULL;
776 f->f_trace_opcodes = 0;
777 f->f_trace_lines = 1;
778
779 return f;
780 }
781
782 PyFrameObject*
PyFrame_New(PyThreadState * tstate,PyCodeObject * code,PyObject * globals,PyObject * locals)783 PyFrame_New(PyThreadState *tstate, PyCodeObject *code,
784 PyObject *globals, PyObject *locals)
785 {
786 PyFrameObject *f = _PyFrame_New_NoTrack(tstate, code, globals, locals);
787 if (f)
788 _PyObject_GC_TRACK(f);
789 return f;
790 }
791
792
793 /* Block management */
794
795 void
PyFrame_BlockSetup(PyFrameObject * f,int type,int handler,int level)796 PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level)
797 {
798 PyTryBlock *b;
799 if (f->f_iblock >= CO_MAXBLOCKS)
800 Py_FatalError("XXX block stack overflow");
801 b = &f->f_blockstack[f->f_iblock++];
802 b->b_type = type;
803 b->b_level = level;
804 b->b_handler = handler;
805 }
806
807 PyTryBlock *
PyFrame_BlockPop(PyFrameObject * f)808 PyFrame_BlockPop(PyFrameObject *f)
809 {
810 PyTryBlock *b;
811 if (f->f_iblock <= 0)
812 Py_FatalError("XXX block stack underflow");
813 b = &f->f_blockstack[--f->f_iblock];
814 return b;
815 }
816
817 /* Convert between "fast" version of locals and dictionary version.
818
819 map and values are input arguments. map is a tuple of strings.
820 values is an array of PyObject*. At index i, map[i] is the name of
821 the variable with value values[i]. The function copies the first
822 nmap variable from map/values into dict. If values[i] is NULL,
823 the variable is deleted from dict.
824
825 If deref is true, then the values being copied are cell variables
826 and the value is extracted from the cell variable before being put
827 in dict.
828 */
829
830 static int
map_to_dict(PyObject * map,Py_ssize_t nmap,PyObject * dict,PyObject ** values,int deref)831 map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
832 int deref)
833 {
834 Py_ssize_t j;
835 assert(PyTuple_Check(map));
836 assert(PyDict_Check(dict));
837 assert(PyTuple_Size(map) >= nmap);
838 for (j=0; j < nmap; j++) {
839 PyObject *key = PyTuple_GET_ITEM(map, j);
840 PyObject *value = values[j];
841 assert(PyUnicode_Check(key));
842 if (deref && value != NULL) {
843 assert(PyCell_Check(value));
844 value = PyCell_GET(value);
845 }
846 if (value == NULL) {
847 if (PyObject_DelItem(dict, key) != 0) {
848 if (PyErr_ExceptionMatches(PyExc_KeyError))
849 PyErr_Clear();
850 else
851 return -1;
852 }
853 }
854 else {
855 if (PyObject_SetItem(dict, key, value) != 0)
856 return -1;
857 }
858 }
859 return 0;
860 }
861
862 /* Copy values from the "locals" dict into the fast locals.
863
864 dict is an input argument containing string keys representing
865 variables names and arbitrary PyObject* as values.
866
867 map and values are input arguments. map is a tuple of strings.
868 values is an array of PyObject*. At index i, map[i] is the name of
869 the variable with value values[i]. The function copies the first
870 nmap variable from map/values into dict. If values[i] is NULL,
871 the variable is deleted from dict.
872
873 If deref is true, then the values being copied are cell variables
874 and the value is extracted from the cell variable before being put
875 in dict. If clear is true, then variables in map but not in dict
876 are set to NULL in map; if clear is false, variables missing in
877 dict are ignored.
878
879 Exceptions raised while modifying the dict are silently ignored,
880 because there is no good way to report them.
881 */
882
883 static void
dict_to_map(PyObject * map,Py_ssize_t nmap,PyObject * dict,PyObject ** values,int deref,int clear)884 dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
885 int deref, int clear)
886 {
887 Py_ssize_t j;
888 assert(PyTuple_Check(map));
889 assert(PyDict_Check(dict));
890 assert(PyTuple_Size(map) >= nmap);
891 for (j=0; j < nmap; j++) {
892 PyObject *key = PyTuple_GET_ITEM(map, j);
893 PyObject *value = PyObject_GetItem(dict, key);
894 assert(PyUnicode_Check(key));
895 /* We only care about NULLs if clear is true. */
896 if (value == NULL) {
897 PyErr_Clear();
898 if (!clear)
899 continue;
900 }
901 if (deref) {
902 assert(PyCell_Check(values[j]));
903 if (PyCell_GET(values[j]) != value) {
904 if (PyCell_Set(values[j], value) < 0)
905 PyErr_Clear();
906 }
907 } else if (values[j] != value) {
908 Py_XINCREF(value);
909 Py_XSETREF(values[j], value);
910 }
911 Py_XDECREF(value);
912 }
913 }
914
915 int
PyFrame_FastToLocalsWithError(PyFrameObject * f)916 PyFrame_FastToLocalsWithError(PyFrameObject *f)
917 {
918 /* Merge fast locals into f->f_locals */
919 PyObject *locals, *map;
920 PyObject **fast;
921 PyCodeObject *co;
922 Py_ssize_t j;
923 Py_ssize_t ncells, nfreevars;
924
925 if (f == NULL) {
926 PyErr_BadInternalCall();
927 return -1;
928 }
929 locals = f->f_locals;
930 if (locals == NULL) {
931 locals = f->f_locals = PyDict_New();
932 if (locals == NULL)
933 return -1;
934 }
935 co = f->f_code;
936 map = co->co_varnames;
937 if (!PyTuple_Check(map)) {
938 PyErr_Format(PyExc_SystemError,
939 "co_varnames must be a tuple, not %s",
940 Py_TYPE(map)->tp_name);
941 return -1;
942 }
943 fast = f->f_localsplus;
944 j = PyTuple_GET_SIZE(map);
945 if (j > co->co_nlocals)
946 j = co->co_nlocals;
947 if (co->co_nlocals) {
948 if (map_to_dict(map, j, locals, fast, 0) < 0)
949 return -1;
950 }
951 ncells = PyTuple_GET_SIZE(co->co_cellvars);
952 nfreevars = PyTuple_GET_SIZE(co->co_freevars);
953 if (ncells || nfreevars) {
954 if (map_to_dict(co->co_cellvars, ncells,
955 locals, fast + co->co_nlocals, 1))
956 return -1;
957
958 /* If the namespace is unoptimized, then one of the
959 following cases applies:
960 1. It does not contain free variables, because it
961 uses import * or is a top-level namespace.
962 2. It is a class namespace.
963 We don't want to accidentally copy free variables
964 into the locals dict used by the class.
965 */
966 if (co->co_flags & CO_OPTIMIZED) {
967 if (map_to_dict(co->co_freevars, nfreevars,
968 locals, fast + co->co_nlocals + ncells, 1) < 0)
969 return -1;
970 }
971 }
972 return 0;
973 }
974
975 void
PyFrame_FastToLocals(PyFrameObject * f)976 PyFrame_FastToLocals(PyFrameObject *f)
977 {
978 int res;
979
980 assert(!PyErr_Occurred());
981
982 res = PyFrame_FastToLocalsWithError(f);
983 if (res < 0)
984 PyErr_Clear();
985 }
986
987 void
PyFrame_LocalsToFast(PyFrameObject * f,int clear)988 PyFrame_LocalsToFast(PyFrameObject *f, int clear)
989 {
990 /* Merge f->f_locals into fast locals */
991 PyObject *locals, *map;
992 PyObject **fast;
993 PyObject *error_type, *error_value, *error_traceback;
994 PyCodeObject *co;
995 Py_ssize_t j;
996 Py_ssize_t ncells, nfreevars;
997 if (f == NULL)
998 return;
999 locals = f->f_locals;
1000 co = f->f_code;
1001 map = co->co_varnames;
1002 if (locals == NULL)
1003 return;
1004 if (!PyTuple_Check(map))
1005 return;
1006 PyErr_Fetch(&error_type, &error_value, &error_traceback);
1007 fast = f->f_localsplus;
1008 j = PyTuple_GET_SIZE(map);
1009 if (j > co->co_nlocals)
1010 j = co->co_nlocals;
1011 if (co->co_nlocals)
1012 dict_to_map(co->co_varnames, j, locals, fast, 0, clear);
1013 ncells = PyTuple_GET_SIZE(co->co_cellvars);
1014 nfreevars = PyTuple_GET_SIZE(co->co_freevars);
1015 if (ncells || nfreevars) {
1016 dict_to_map(co->co_cellvars, ncells,
1017 locals, fast + co->co_nlocals, 1, clear);
1018 /* Same test as in PyFrame_FastToLocals() above. */
1019 if (co->co_flags & CO_OPTIMIZED) {
1020 dict_to_map(co->co_freevars, nfreevars,
1021 locals, fast + co->co_nlocals + ncells, 1,
1022 clear);
1023 }
1024 }
1025 PyErr_Restore(error_type, error_value, error_traceback);
1026 }
1027
1028 /* Clear out the free list */
1029 int
PyFrame_ClearFreeList(void)1030 PyFrame_ClearFreeList(void)
1031 {
1032 int freelist_size = numfree;
1033
1034 while (free_list != NULL) {
1035 PyFrameObject *f = free_list;
1036 free_list = free_list->f_back;
1037 PyObject_GC_Del(f);
1038 --numfree;
1039 }
1040 assert(numfree == 0);
1041 return freelist_size;
1042 }
1043
1044 void
PyFrame_Fini(void)1045 PyFrame_Fini(void)
1046 {
1047 (void)PyFrame_ClearFreeList();
1048 }
1049
1050 /* Print summary info about the state of the optimized allocator */
1051 void
_PyFrame_DebugMallocStats(FILE * out)1052 _PyFrame_DebugMallocStats(FILE *out)
1053 {
1054 _PyDebugAllocatorStats(out,
1055 "free PyFrameObject",
1056 numfree, sizeof(PyFrameObject));
1057 }
1058
1059