1 /* Frame object implementation */
2
3 #include "Python.h"
4
5 #include "code.h"
6 #include "frameobject.h"
7 #include "opcode.h"
8 #include "structmember.h"
9
10 #define OFF(x) offsetof(PyFrameObject, x)
11
12 static PyMemberDef frame_memberlist[] = {
13 {"f_back", T_OBJECT, OFF(f_back), READONLY},
14 {"f_code", T_OBJECT, OFF(f_code), READONLY},
15 {"f_builtins", T_OBJECT, OFF(f_builtins), READONLY},
16 {"f_globals", T_OBJECT, OFF(f_globals), READONLY},
17 {"f_lasti", T_INT, OFF(f_lasti), READONLY},
18 {NULL} /* Sentinel */
19 };
20
21 static PyObject *
frame_getlocals(PyFrameObject * f,void * closure)22 frame_getlocals(PyFrameObject *f, void *closure)
23 {
24 if (PyFrame_FastToLocalsWithError(f) < 0)
25 return NULL;
26 Py_INCREF(f->f_locals);
27 return f->f_locals;
28 }
29
30 int
PyFrame_GetLineNumber(PyFrameObject * f)31 PyFrame_GetLineNumber(PyFrameObject *f)
32 {
33 if (f->f_trace)
34 return f->f_lineno;
35 else
36 return PyCode_Addr2Line(f->f_code, f->f_lasti);
37 }
38
39 static PyObject *
frame_getlineno(PyFrameObject * f,void * closure)40 frame_getlineno(PyFrameObject *f, void *closure)
41 {
42 return PyLong_FromLong(PyFrame_GetLineNumber(f));
43 }
44
45 /* Setter for f_lineno - you can set f_lineno from within a trace function in
46 * order to jump to a given line of code, subject to some restrictions. Most
47 * lines are OK to jump to because they don't make any assumptions about the
48 * state of the stack (obvious because you could remove the line and the code
49 * would still work without any stack errors), but there are some constructs
50 * that limit jumping:
51 *
52 * o Lines with an 'except' statement on them can't be jumped to, because
53 * they expect an exception to be on the top of the stack.
54 * o Lines that live in a 'finally' block can't be jumped from or to, since
55 * the END_FINALLY expects to clean up the stack after the 'try' block.
56 * o 'try'/'for'/'while' blocks can't be jumped into because the blockstack
57 * needs to be set up before their code runs, and for 'for' loops the
58 * iterator needs to be on the stack.
59 */
60 static int
frame_setlineno(PyFrameObject * f,PyObject * p_new_lineno)61 frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
62 {
63 int new_lineno = 0; /* The new value of f_lineno */
64 long l_new_lineno;
65 int overflow;
66 int new_lasti = 0; /* The new value of f_lasti */
67 int new_iblock = 0; /* The new value of f_iblock */
68 unsigned char *code = NULL; /* The bytecode for the frame... */
69 Py_ssize_t code_len = 0; /* ...and its length */
70 unsigned char *lnotab = NULL; /* Iterating over co_lnotab */
71 Py_ssize_t lnotab_len = 0; /* (ditto) */
72 int offset = 0; /* (ditto) */
73 int line = 0; /* (ditto) */
74 int addr = 0; /* (ditto) */
75 int min_addr = 0; /* Scanning the SETUPs and POPs */
76 int max_addr = 0; /* (ditto) */
77 int delta_iblock = 0; /* (ditto) */
78 int min_delta_iblock = 0; /* (ditto) */
79 int min_iblock = 0; /* (ditto) */
80 int f_lasti_setup_addr = 0; /* Policing no-jump-into-finally */
81 int new_lasti_setup_addr = 0; /* (ditto) */
82 int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */
83 int in_finally[CO_MAXBLOCKS]; /* (ditto) */
84 int blockstack_top = 0; /* (ditto) */
85 unsigned char setup_op = 0; /* (ditto) */
86
87 /* f_lineno must be an integer. */
88 if (!PyLong_CheckExact(p_new_lineno)) {
89 PyErr_SetString(PyExc_ValueError,
90 "lineno must be an integer");
91 return -1;
92 }
93
94 /* You can only do this from within a trace function, not via
95 * _getframe or similar hackery. */
96 if (!f->f_trace)
97 {
98 PyErr_Format(PyExc_ValueError,
99 "f_lineno can only be set by a"
100 " line trace function");
101 return -1;
102 }
103
104 /* Fail if the line comes before the start of the code block. */
105 l_new_lineno = PyLong_AsLongAndOverflow(p_new_lineno, &overflow);
106 if (overflow
107 #if SIZEOF_LONG > SIZEOF_INT
108 || l_new_lineno > INT_MAX
109 || l_new_lineno < INT_MIN
110 #endif
111 ) {
112 PyErr_SetString(PyExc_ValueError,
113 "lineno out of range");
114 return -1;
115 }
116 new_lineno = (int)l_new_lineno;
117
118 if (new_lineno < f->f_code->co_firstlineno) {
119 PyErr_Format(PyExc_ValueError,
120 "line %d comes before the current code block",
121 new_lineno);
122 return -1;
123 }
124 else if (new_lineno == f->f_code->co_firstlineno) {
125 new_lasti = 0;
126 new_lineno = f->f_code->co_firstlineno;
127 }
128 else {
129 /* Find the bytecode offset for the start of the given
130 * line, or the first code-owning line after it. */
131 char *tmp;
132 PyBytes_AsStringAndSize(f->f_code->co_lnotab,
133 &tmp, &lnotab_len);
134 lnotab = (unsigned char *) tmp;
135 addr = 0;
136 line = f->f_code->co_firstlineno;
137 new_lasti = -1;
138 for (offset = 0; offset < lnotab_len; offset += 2) {
139 addr += lnotab[offset];
140 line += (signed char)lnotab[offset+1];
141 if (line >= new_lineno) {
142 new_lasti = addr;
143 new_lineno = line;
144 break;
145 }
146 }
147 }
148
149 /* If we didn't reach the requested line, return an error. */
150 if (new_lasti == -1) {
151 PyErr_Format(PyExc_ValueError,
152 "line %d comes after the current code block",
153 new_lineno);
154 return -1;
155 }
156
157 /* We're now ready to look at the bytecode. */
158 PyBytes_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len);
159 min_addr = Py_MIN(new_lasti, f->f_lasti);
160 max_addr = Py_MAX(new_lasti, f->f_lasti);
161
162 /* You can't jump onto a line with an 'except' statement on it -
163 * they expect to have an exception on the top of the stack, which
164 * won't be true if you jump to them. They always start with code
165 * that either pops the exception using POP_TOP (plain 'except:'
166 * lines do this) or duplicates the exception on the stack using
167 * DUP_TOP (if there's an exception type specified). See compile.c,
168 * 'com_try_except' for the full details. There aren't any other
169 * cases (AFAIK) where a line's code can start with DUP_TOP or
170 * POP_TOP, but if any ever appear, they'll be subject to the same
171 * restriction (but with a different error message). */
172 if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) {
173 PyErr_SetString(PyExc_ValueError,
174 "can't jump to 'except' line as there's no exception");
175 return -1;
176 }
177
178 /* You can't jump into or out of a 'finally' block because the 'try'
179 * block leaves something on the stack for the END_FINALLY to clean
180 * up. So we walk the bytecode, maintaining a simulated blockstack.
181 * When we reach the old or new address and it's in a 'finally' block
182 * we note the address of the corresponding SETUP_FINALLY. The jump
183 * is only legal if neither address is in a 'finally' block or
184 * they're both in the same one. 'blockstack' is a stack of the
185 * bytecode addresses of the SETUP_X opcodes, and 'in_finally' tracks
186 * whether we're in a 'finally' block at each blockstack level. */
187 f_lasti_setup_addr = -1;
188 new_lasti_setup_addr = -1;
189 memset(blockstack, '\0', sizeof(blockstack));
190 memset(in_finally, '\0', sizeof(in_finally));
191 blockstack_top = 0;
192 for (addr = 0; addr < code_len; addr += sizeof(_Py_CODEUNIT)) {
193 unsigned char op = code[addr];
194 switch (op) {
195 case SETUP_LOOP:
196 case SETUP_EXCEPT:
197 case SETUP_FINALLY:
198 case SETUP_WITH:
199 case SETUP_ASYNC_WITH:
200 blockstack[blockstack_top++] = addr;
201 in_finally[blockstack_top-1] = 0;
202 break;
203
204 case POP_BLOCK:
205 assert(blockstack_top > 0);
206 setup_op = code[blockstack[blockstack_top-1]];
207 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH
208 || setup_op == SETUP_ASYNC_WITH) {
209 in_finally[blockstack_top-1] = 1;
210 }
211 else {
212 blockstack_top--;
213 }
214 break;
215
216 case END_FINALLY:
217 /* Ignore END_FINALLYs for SETUP_EXCEPTs - they exist
218 * in the bytecode but don't correspond to an actual
219 * 'finally' block. (If blockstack_top is 0, we must
220 * be seeing such an END_FINALLY.) */
221 if (blockstack_top > 0) {
222 setup_op = code[blockstack[blockstack_top-1]];
223 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH
224 || setup_op == SETUP_ASYNC_WITH) {
225 blockstack_top--;
226 }
227 }
228 break;
229 }
230
231 /* For the addresses we're interested in, see whether they're
232 * within a 'finally' block and if so, remember the address
233 * of the SETUP_FINALLY. */
234 if (addr == new_lasti || addr == f->f_lasti) {
235 int i = 0;
236 int setup_addr = -1;
237 for (i = blockstack_top-1; i >= 0; i--) {
238 if (in_finally[i]) {
239 setup_addr = blockstack[i];
240 break;
241 }
242 }
243
244 if (setup_addr != -1) {
245 if (addr == new_lasti) {
246 new_lasti_setup_addr = setup_addr;
247 }
248
249 if (addr == f->f_lasti) {
250 f_lasti_setup_addr = setup_addr;
251 }
252 }
253 }
254 }
255
256 /* Verify that the blockstack tracking code didn't get lost. */
257 assert(blockstack_top == 0);
258
259 /* After all that, are we jumping into / out of a 'finally' block? */
260 if (new_lasti_setup_addr != f_lasti_setup_addr) {
261 PyErr_SetString(PyExc_ValueError,
262 "can't jump into or out of a 'finally' block");
263 return -1;
264 }
265
266
267 /* Police block-jumping (you can't jump into the middle of a block)
268 * and ensure that the blockstack finishes up in a sensible state (by
269 * popping any blocks we're jumping out of). We look at all the
270 * blockstack operations between the current position and the new
271 * one, and keep track of how many blocks we drop out of on the way.
272 * By also keeping track of the lowest blockstack position we see, we
273 * can tell whether the jump goes into any blocks without coming out
274 * again - in that case we raise an exception below. */
275 delta_iblock = 0;
276 for (addr = min_addr; addr < max_addr; addr += sizeof(_Py_CODEUNIT)) {
277 unsigned char op = code[addr];
278 switch (op) {
279 case SETUP_LOOP:
280 case SETUP_EXCEPT:
281 case SETUP_FINALLY:
282 case SETUP_WITH:
283 case SETUP_ASYNC_WITH:
284 delta_iblock++;
285 break;
286
287 case POP_BLOCK:
288 delta_iblock--;
289 break;
290 }
291
292 min_delta_iblock = Py_MIN(min_delta_iblock, delta_iblock);
293 }
294
295 /* Derive the absolute iblock values from the deltas. */
296 min_iblock = f->f_iblock + min_delta_iblock;
297 if (new_lasti > f->f_lasti) {
298 /* Forwards jump. */
299 new_iblock = f->f_iblock + delta_iblock;
300 }
301 else {
302 /* Backwards jump. */
303 new_iblock = f->f_iblock - delta_iblock;
304 }
305
306 /* Are we jumping into a block? */
307 if (new_iblock > min_iblock) {
308 PyErr_SetString(PyExc_ValueError,
309 "can't jump into the middle of a block");
310 return -1;
311 }
312
313 /* Pop any blocks that we're jumping out of. */
314 while (f->f_iblock > new_iblock) {
315 PyTryBlock *b = &f->f_blockstack[--f->f_iblock];
316 while ((f->f_stacktop - f->f_valuestack) > b->b_level) {
317 PyObject *v = (*--f->f_stacktop);
318 Py_DECREF(v);
319 }
320 }
321
322 /* Finally set the new f_lineno and f_lasti and return OK. */
323 f->f_lineno = new_lineno;
324 f->f_lasti = new_lasti;
325 return 0;
326 }
327
328 static PyObject *
frame_gettrace(PyFrameObject * f,void * closure)329 frame_gettrace(PyFrameObject *f, void *closure)
330 {
331 PyObject* trace = f->f_trace;
332
333 if (trace == NULL)
334 trace = Py_None;
335
336 Py_INCREF(trace);
337
338 return trace;
339 }
340
341 static int
frame_settrace(PyFrameObject * f,PyObject * v,void * closure)342 frame_settrace(PyFrameObject *f, PyObject* v, void *closure)
343 {
344 /* We rely on f_lineno being accurate when f_trace is set. */
345 f->f_lineno = PyFrame_GetLineNumber(f);
346
347 if (v == Py_None)
348 v = NULL;
349 Py_XINCREF(v);
350 Py_XSETREF(f->f_trace, v);
351
352 return 0;
353 }
354
355
356 static PyGetSetDef frame_getsetlist[] = {
357 {"f_locals", (getter)frame_getlocals, NULL, NULL},
358 {"f_lineno", (getter)frame_getlineno,
359 (setter)frame_setlineno, NULL},
360 {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL},
361 {0}
362 };
363
364 /* Stack frames are allocated and deallocated at a considerable rate.
365 In an attempt to improve the speed of function calls, we:
366
367 1. Hold a single "zombie" frame on each code object. This retains
368 the allocated and initialised frame object from an invocation of
369 the code object. The zombie is reanimated the next time we need a
370 frame object for that code object. Doing this saves the malloc/
371 realloc required when using a free_list frame that isn't the
372 correct size. It also saves some field initialisation.
373
374 In zombie mode, no field of PyFrameObject holds a reference, but
375 the following fields are still valid:
376
377 * ob_type, ob_size, f_code, f_valuestack;
378
379 * f_locals, f_trace,
380 f_exc_type, f_exc_value, f_exc_traceback are NULL;
381
382 * f_localsplus does not require re-allocation and
383 the local variables in f_localsplus are NULL.
384
385 2. We also maintain a separate free list of stack frames (just like
386 floats are allocated in a special way -- see floatobject.c). When
387 a stack frame is on the free list, only the following members have
388 a meaning:
389 ob_type == &Frametype
390 f_back next item on free list, or NULL
391 f_stacksize size of value stack
392 ob_size size of localsplus
393 Note that the value and block stacks are preserved -- this can save
394 another malloc() call or two (and two free() calls as well!).
395 Also note that, unlike for integers, each frame object is a
396 malloc'ed object in its own right -- it is only the actual calls to
397 malloc() that we are trying to save here, not the administration.
398 After all, while a typical program may make millions of calls, a
399 call depth of more than 20 or 30 is probably already exceptional
400 unless the program contains run-away recursion. I hope.
401
402 Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on
403 free_list. Else programs creating lots of cyclic trash involving
404 frames could provoke free_list into growing without bound.
405 */
406
407 static PyFrameObject *free_list = NULL;
408 static int numfree = 0; /* number of frames currently in free_list */
409 /* max value for numfree */
410 #define PyFrame_MAXFREELIST 200
411
412 static void
frame_dealloc(PyFrameObject * f)413 frame_dealloc(PyFrameObject *f)
414 {
415 PyObject **p, **valuestack;
416 PyCodeObject *co;
417
418 PyObject_GC_UnTrack(f);
419 Py_TRASHCAN_SAFE_BEGIN(f)
420 /* Kill all local variables */
421 valuestack = f->f_valuestack;
422 for (p = f->f_localsplus; p < valuestack; p++)
423 Py_CLEAR(*p);
424
425 /* Free stack */
426 if (f->f_stacktop != NULL) {
427 for (p = valuestack; p < f->f_stacktop; p++)
428 Py_XDECREF(*p);
429 }
430
431 Py_XDECREF(f->f_back);
432 Py_DECREF(f->f_builtins);
433 Py_DECREF(f->f_globals);
434 Py_CLEAR(f->f_locals);
435 Py_CLEAR(f->f_trace);
436 Py_CLEAR(f->f_exc_type);
437 Py_CLEAR(f->f_exc_value);
438 Py_CLEAR(f->f_exc_traceback);
439
440 co = f->f_code;
441 if (co->co_zombieframe == NULL)
442 co->co_zombieframe = f;
443 else if (numfree < PyFrame_MAXFREELIST) {
444 ++numfree;
445 f->f_back = free_list;
446 free_list = f;
447 }
448 else
449 PyObject_GC_Del(f);
450
451 Py_DECREF(co);
452 Py_TRASHCAN_SAFE_END(f)
453 }
454
455 static int
frame_traverse(PyFrameObject * f,visitproc visit,void * arg)456 frame_traverse(PyFrameObject *f, visitproc visit, void *arg)
457 {
458 PyObject **fastlocals, **p;
459 Py_ssize_t i, slots;
460
461 Py_VISIT(f->f_back);
462 Py_VISIT(f->f_code);
463 Py_VISIT(f->f_builtins);
464 Py_VISIT(f->f_globals);
465 Py_VISIT(f->f_locals);
466 Py_VISIT(f->f_trace);
467 Py_VISIT(f->f_exc_type);
468 Py_VISIT(f->f_exc_value);
469 Py_VISIT(f->f_exc_traceback);
470
471 /* locals */
472 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars);
473 fastlocals = f->f_localsplus;
474 for (i = slots; --i >= 0; ++fastlocals)
475 Py_VISIT(*fastlocals);
476
477 /* stack */
478 if (f->f_stacktop != NULL) {
479 for (p = f->f_valuestack; p < f->f_stacktop; p++)
480 Py_VISIT(*p);
481 }
482 return 0;
483 }
484
485 static void
frame_tp_clear(PyFrameObject * f)486 frame_tp_clear(PyFrameObject *f)
487 {
488 PyObject **fastlocals, **p, **oldtop;
489 Py_ssize_t i, slots;
490
491 /* Before anything else, make sure that this frame is clearly marked
492 * as being defunct! Else, e.g., a generator reachable from this
493 * frame may also point to this frame, believe itself to still be
494 * active, and try cleaning up this frame again.
495 */
496 oldtop = f->f_stacktop;
497 f->f_stacktop = NULL;
498 f->f_executing = 0;
499
500 Py_CLEAR(f->f_exc_type);
501 Py_CLEAR(f->f_exc_value);
502 Py_CLEAR(f->f_exc_traceback);
503 Py_CLEAR(f->f_trace);
504
505 /* locals */
506 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars);
507 fastlocals = f->f_localsplus;
508 for (i = slots; --i >= 0; ++fastlocals)
509 Py_CLEAR(*fastlocals);
510
511 /* stack */
512 if (oldtop != NULL) {
513 for (p = f->f_valuestack; p < oldtop; p++)
514 Py_CLEAR(*p);
515 }
516 }
517
518 static PyObject *
frame_clear(PyFrameObject * f)519 frame_clear(PyFrameObject *f)
520 {
521 if (f->f_executing) {
522 PyErr_SetString(PyExc_RuntimeError,
523 "cannot clear an executing frame");
524 return NULL;
525 }
526 if (f->f_gen) {
527 _PyGen_Finalize(f->f_gen);
528 assert(f->f_gen == NULL);
529 }
530 frame_tp_clear(f);
531 Py_RETURN_NONE;
532 }
533
534 PyDoc_STRVAR(clear__doc__,
535 "F.clear(): clear most references held by the frame");
536
537 static PyObject *
frame_sizeof(PyFrameObject * f)538 frame_sizeof(PyFrameObject *f)
539 {
540 Py_ssize_t res, extras, ncells, nfrees;
541
542 ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars);
543 nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars);
544 extras = f->f_code->co_stacksize + f->f_code->co_nlocals +
545 ncells + nfrees;
546 /* subtract one as it is already included in PyFrameObject */
547 res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *);
548
549 return PyLong_FromSsize_t(res);
550 }
551
552 PyDoc_STRVAR(sizeof__doc__,
553 "F.__sizeof__() -> size of F in memory, in bytes");
554
555 static PyMethodDef frame_methods[] = {
556 {"clear", (PyCFunction)frame_clear, METH_NOARGS,
557 clear__doc__},
558 {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS,
559 sizeof__doc__},
560 {NULL, NULL} /* sentinel */
561 };
562
563 PyTypeObject PyFrame_Type = {
564 PyVarObject_HEAD_INIT(&PyType_Type, 0)
565 "frame",
566 sizeof(PyFrameObject),
567 sizeof(PyObject *),
568 (destructor)frame_dealloc, /* tp_dealloc */
569 0, /* tp_print */
570 0, /* tp_getattr */
571 0, /* tp_setattr */
572 0, /* tp_reserved */
573 0, /* tp_repr */
574 0, /* tp_as_number */
575 0, /* tp_as_sequence */
576 0, /* tp_as_mapping */
577 0, /* tp_hash */
578 0, /* tp_call */
579 0, /* tp_str */
580 PyObject_GenericGetAttr, /* tp_getattro */
581 PyObject_GenericSetAttr, /* tp_setattro */
582 0, /* tp_as_buffer */
583 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
584 0, /* tp_doc */
585 (traverseproc)frame_traverse, /* tp_traverse */
586 (inquiry)frame_tp_clear, /* tp_clear */
587 0, /* tp_richcompare */
588 0, /* tp_weaklistoffset */
589 0, /* tp_iter */
590 0, /* tp_iternext */
591 frame_methods, /* tp_methods */
592 frame_memberlist, /* tp_members */
593 frame_getsetlist, /* tp_getset */
594 0, /* tp_base */
595 0, /* tp_dict */
596 };
597
598 _Py_IDENTIFIER(__builtins__);
599
_PyFrame_Init()600 int _PyFrame_Init()
601 {
602 /* Before, PyId___builtins__ was a string created explicitly in
603 this function. Now there is nothing to initialize anymore, but
604 the function is kept for backward compatibility. */
605 return 1;
606 }
607
608 PyFrameObject *
PyFrame_New(PyThreadState * tstate,PyCodeObject * code,PyObject * globals,PyObject * locals)609 PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals,
610 PyObject *locals)
611 {
612 PyFrameObject *back = tstate->frame;
613 PyFrameObject *f;
614 PyObject *builtins;
615 Py_ssize_t i;
616
617 #ifdef Py_DEBUG
618 if (code == NULL || globals == NULL || !PyDict_Check(globals) ||
619 (locals != NULL && !PyMapping_Check(locals))) {
620 PyErr_BadInternalCall();
621 return NULL;
622 }
623 #endif
624 if (back == NULL || back->f_globals != globals) {
625 builtins = _PyDict_GetItemId(globals, &PyId___builtins__);
626 if (builtins) {
627 if (PyModule_Check(builtins)) {
628 builtins = PyModule_GetDict(builtins);
629 assert(builtins != NULL);
630 }
631 }
632 if (builtins == NULL) {
633 /* No builtins! Make up a minimal one
634 Give them 'None', at least. */
635 builtins = PyDict_New();
636 if (builtins == NULL ||
637 PyDict_SetItemString(
638 builtins, "None", Py_None) < 0)
639 return NULL;
640 }
641 else
642 Py_INCREF(builtins);
643
644 }
645 else {
646 /* If we share the globals, we share the builtins.
647 Save a lookup and a call. */
648 builtins = back->f_builtins;
649 assert(builtins != NULL);
650 Py_INCREF(builtins);
651 }
652 if (code->co_zombieframe != NULL) {
653 f = code->co_zombieframe;
654 code->co_zombieframe = NULL;
655 _Py_NewReference((PyObject *)f);
656 assert(f->f_code == code);
657 }
658 else {
659 Py_ssize_t extras, ncells, nfrees;
660 ncells = PyTuple_GET_SIZE(code->co_cellvars);
661 nfrees = PyTuple_GET_SIZE(code->co_freevars);
662 extras = code->co_stacksize + code->co_nlocals + ncells +
663 nfrees;
664 if (free_list == NULL) {
665 f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type,
666 extras);
667 if (f == NULL) {
668 Py_DECREF(builtins);
669 return NULL;
670 }
671 }
672 else {
673 assert(numfree > 0);
674 --numfree;
675 f = free_list;
676 free_list = free_list->f_back;
677 if (Py_SIZE(f) < extras) {
678 PyFrameObject *new_f = PyObject_GC_Resize(PyFrameObject, f, extras);
679 if (new_f == NULL) {
680 PyObject_GC_Del(f);
681 Py_DECREF(builtins);
682 return NULL;
683 }
684 f = new_f;
685 }
686 _Py_NewReference((PyObject *)f);
687 }
688
689 f->f_code = code;
690 extras = code->co_nlocals + ncells + nfrees;
691 f->f_valuestack = f->f_localsplus + extras;
692 for (i=0; i<extras; i++)
693 f->f_localsplus[i] = NULL;
694 f->f_locals = NULL;
695 f->f_trace = NULL;
696 f->f_exc_type = f->f_exc_value = f->f_exc_traceback = NULL;
697 }
698 f->f_stacktop = f->f_valuestack;
699 f->f_builtins = builtins;
700 Py_XINCREF(back);
701 f->f_back = back;
702 Py_INCREF(code);
703 Py_INCREF(globals);
704 f->f_globals = globals;
705 /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */
706 if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) ==
707 (CO_NEWLOCALS | CO_OPTIMIZED))
708 ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */
709 else if (code->co_flags & CO_NEWLOCALS) {
710 locals = PyDict_New();
711 if (locals == NULL) {
712 Py_DECREF(f);
713 return NULL;
714 }
715 f->f_locals = locals;
716 }
717 else {
718 if (locals == NULL)
719 locals = globals;
720 Py_INCREF(locals);
721 f->f_locals = locals;
722 }
723
724 f->f_lasti = -1;
725 f->f_lineno = code->co_firstlineno;
726 f->f_iblock = 0;
727 f->f_executing = 0;
728 f->f_gen = NULL;
729
730 _PyObject_GC_TRACK(f);
731 return f;
732 }
733
734 /* Block management */
735
736 void
PyFrame_BlockSetup(PyFrameObject * f,int type,int handler,int level)737 PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level)
738 {
739 PyTryBlock *b;
740 if (f->f_iblock >= CO_MAXBLOCKS)
741 Py_FatalError("XXX block stack overflow");
742 b = &f->f_blockstack[f->f_iblock++];
743 b->b_type = type;
744 b->b_level = level;
745 b->b_handler = handler;
746 }
747
748 PyTryBlock *
PyFrame_BlockPop(PyFrameObject * f)749 PyFrame_BlockPop(PyFrameObject *f)
750 {
751 PyTryBlock *b;
752 if (f->f_iblock <= 0)
753 Py_FatalError("XXX block stack underflow");
754 b = &f->f_blockstack[--f->f_iblock];
755 return b;
756 }
757
758 /* Convert between "fast" version of locals and dictionary version.
759
760 map and values are input arguments. map is a tuple of strings.
761 values is an array of PyObject*. At index i, map[i] is the name of
762 the variable with value values[i]. The function copies the first
763 nmap variable from map/values into dict. If values[i] is NULL,
764 the variable is deleted from dict.
765
766 If deref is true, then the values being copied are cell variables
767 and the value is extracted from the cell variable before being put
768 in dict.
769 */
770
771 static int
map_to_dict(PyObject * map,Py_ssize_t nmap,PyObject * dict,PyObject ** values,int deref)772 map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
773 int deref)
774 {
775 Py_ssize_t j;
776 assert(PyTuple_Check(map));
777 assert(PyDict_Check(dict));
778 assert(PyTuple_Size(map) >= nmap);
779 for (j = nmap; --j >= 0; ) {
780 PyObject *key = PyTuple_GET_ITEM(map, j);
781 PyObject *value = values[j];
782 assert(PyUnicode_Check(key));
783 if (deref && value != NULL) {
784 assert(PyCell_Check(value));
785 value = PyCell_GET(value);
786 }
787 if (value == NULL) {
788 if (PyObject_DelItem(dict, key) != 0) {
789 if (PyErr_ExceptionMatches(PyExc_KeyError))
790 PyErr_Clear();
791 else
792 return -1;
793 }
794 }
795 else {
796 if (PyObject_SetItem(dict, key, value) != 0)
797 return -1;
798 }
799 }
800 return 0;
801 }
802
803 /* Copy values from the "locals" dict into the fast locals.
804
805 dict is an input argument containing string keys representing
806 variables names and arbitrary PyObject* as values.
807
808 map and values are input arguments. map is a tuple of strings.
809 values is an array of PyObject*. At index i, map[i] is the name of
810 the variable with value values[i]. The function copies the first
811 nmap variable from map/values into dict. If values[i] is NULL,
812 the variable is deleted from dict.
813
814 If deref is true, then the values being copied are cell variables
815 and the value is extracted from the cell variable before being put
816 in dict. If clear is true, then variables in map but not in dict
817 are set to NULL in map; if clear is false, variables missing in
818 dict are ignored.
819
820 Exceptions raised while modifying the dict are silently ignored,
821 because there is no good way to report them.
822 */
823
824 static void
dict_to_map(PyObject * map,Py_ssize_t nmap,PyObject * dict,PyObject ** values,int deref,int clear)825 dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values,
826 int deref, int clear)
827 {
828 Py_ssize_t j;
829 assert(PyTuple_Check(map));
830 assert(PyDict_Check(dict));
831 assert(PyTuple_Size(map) >= nmap);
832 for (j = nmap; --j >= 0; ) {
833 PyObject *key = PyTuple_GET_ITEM(map, j);
834 PyObject *value = PyObject_GetItem(dict, key);
835 assert(PyUnicode_Check(key));
836 /* We only care about NULLs if clear is true. */
837 if (value == NULL) {
838 PyErr_Clear();
839 if (!clear)
840 continue;
841 }
842 if (deref) {
843 assert(PyCell_Check(values[j]));
844 if (PyCell_GET(values[j]) != value) {
845 if (PyCell_Set(values[j], value) < 0)
846 PyErr_Clear();
847 }
848 } else if (values[j] != value) {
849 Py_XINCREF(value);
850 Py_XSETREF(values[j], value);
851 }
852 Py_XDECREF(value);
853 }
854 }
855
856 int
PyFrame_FastToLocalsWithError(PyFrameObject * f)857 PyFrame_FastToLocalsWithError(PyFrameObject *f)
858 {
859 /* Merge fast locals into f->f_locals */
860 PyObject *locals, *map;
861 PyObject **fast;
862 PyCodeObject *co;
863 Py_ssize_t j;
864 Py_ssize_t ncells, nfreevars;
865
866 if (f == NULL) {
867 PyErr_BadInternalCall();
868 return -1;
869 }
870 locals = f->f_locals;
871 if (locals == NULL) {
872 locals = f->f_locals = PyDict_New();
873 if (locals == NULL)
874 return -1;
875 }
876 co = f->f_code;
877 map = co->co_varnames;
878 if (!PyTuple_Check(map)) {
879 PyErr_Format(PyExc_SystemError,
880 "co_varnames must be a tuple, not %s",
881 Py_TYPE(map)->tp_name);
882 return -1;
883 }
884 fast = f->f_localsplus;
885 j = PyTuple_GET_SIZE(map);
886 if (j > co->co_nlocals)
887 j = co->co_nlocals;
888 if (co->co_nlocals) {
889 if (map_to_dict(map, j, locals, fast, 0) < 0)
890 return -1;
891 }
892 ncells = PyTuple_GET_SIZE(co->co_cellvars);
893 nfreevars = PyTuple_GET_SIZE(co->co_freevars);
894 if (ncells || nfreevars) {
895 if (map_to_dict(co->co_cellvars, ncells,
896 locals, fast + co->co_nlocals, 1))
897 return -1;
898
899 /* If the namespace is unoptimized, then one of the
900 following cases applies:
901 1. It does not contain free variables, because it
902 uses import * or is a top-level namespace.
903 2. It is a class namespace.
904 We don't want to accidentally copy free variables
905 into the locals dict used by the class.
906 */
907 if (co->co_flags & CO_OPTIMIZED) {
908 if (map_to_dict(co->co_freevars, nfreevars,
909 locals, fast + co->co_nlocals + ncells, 1) < 0)
910 return -1;
911 }
912 }
913 return 0;
914 }
915
916 void
PyFrame_FastToLocals(PyFrameObject * f)917 PyFrame_FastToLocals(PyFrameObject *f)
918 {
919 int res;
920
921 assert(!PyErr_Occurred());
922
923 res = PyFrame_FastToLocalsWithError(f);
924 if (res < 0)
925 PyErr_Clear();
926 }
927
928 void
PyFrame_LocalsToFast(PyFrameObject * f,int clear)929 PyFrame_LocalsToFast(PyFrameObject *f, int clear)
930 {
931 /* Merge f->f_locals into fast locals */
932 PyObject *locals, *map;
933 PyObject **fast;
934 PyObject *error_type, *error_value, *error_traceback;
935 PyCodeObject *co;
936 Py_ssize_t j;
937 Py_ssize_t ncells, nfreevars;
938 if (f == NULL)
939 return;
940 locals = f->f_locals;
941 co = f->f_code;
942 map = co->co_varnames;
943 if (locals == NULL)
944 return;
945 if (!PyTuple_Check(map))
946 return;
947 PyErr_Fetch(&error_type, &error_value, &error_traceback);
948 fast = f->f_localsplus;
949 j = PyTuple_GET_SIZE(map);
950 if (j > co->co_nlocals)
951 j = co->co_nlocals;
952 if (co->co_nlocals)
953 dict_to_map(co->co_varnames, j, locals, fast, 0, clear);
954 ncells = PyTuple_GET_SIZE(co->co_cellvars);
955 nfreevars = PyTuple_GET_SIZE(co->co_freevars);
956 if (ncells || nfreevars) {
957 dict_to_map(co->co_cellvars, ncells,
958 locals, fast + co->co_nlocals, 1, clear);
959 /* Same test as in PyFrame_FastToLocals() above. */
960 if (co->co_flags & CO_OPTIMIZED) {
961 dict_to_map(co->co_freevars, nfreevars,
962 locals, fast + co->co_nlocals + ncells, 1,
963 clear);
964 }
965 }
966 PyErr_Restore(error_type, error_value, error_traceback);
967 }
968
969 /* Clear out the free list */
970 int
PyFrame_ClearFreeList(void)971 PyFrame_ClearFreeList(void)
972 {
973 int freelist_size = numfree;
974
975 while (free_list != NULL) {
976 PyFrameObject *f = free_list;
977 free_list = free_list->f_back;
978 PyObject_GC_Del(f);
979 --numfree;
980 }
981 assert(numfree == 0);
982 return freelist_size;
983 }
984
985 void
PyFrame_Fini(void)986 PyFrame_Fini(void)
987 {
988 (void)PyFrame_ClearFreeList();
989 }
990
991 /* Print summary info about the state of the optimized allocator */
992 void
_PyFrame_DebugMallocStats(FILE * out)993 _PyFrame_DebugMallocStats(FILE *out)
994 {
995 _PyDebugAllocatorStats(out,
996 "free PyFrameObject",
997 numfree, sizeof(PyFrameObject));
998 }
999
1000