• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef Py_INTERNAL_CODE_H
2 #define Py_INTERNAL_CODE_H
3 #ifdef __cplusplus
4 extern "C" {
5 #endif
6 
7 #ifndef Py_BUILD_CORE
8 #  error "this header requires Py_BUILD_CORE define"
9 #endif
10 
11 #include "pycore_lock.h"        // PyMutex
12 #include "pycore_backoff.h"     // _Py_BackoffCounter
13 
14 
15 /* Each instruction in a code object is a fixed-width value,
16  * currently 2 bytes: 1-byte opcode + 1-byte oparg.  The EXTENDED_ARG
17  * opcode allows for larger values but the current limit is 3 uses
18  * of EXTENDED_ARG (see Python/compile.c), for a maximum
19  * 32-bit value.  This aligns with the note in Python/compile.c
20  * (compiler_addop_i_line) indicating that the max oparg value is
21  * 2**32 - 1, rather than INT_MAX.
22  */
23 
24 typedef union {
25     uint16_t cache;
26     struct {
27         uint8_t code;
28         uint8_t arg;
29     } op;
30     _Py_BackoffCounter counter;  // First cache entry of specializable op
31 } _Py_CODEUNIT;
32 
33 #define _PyCode_CODE(CO) _Py_RVALUE((_Py_CODEUNIT *)(CO)->co_code_adaptive)
34 #define _PyCode_NBYTES(CO) (Py_SIZE(CO) * (Py_ssize_t)sizeof(_Py_CODEUNIT))
35 
36 
37 /* These macros only remain defined for compatibility. */
38 #define _Py_OPCODE(word) ((word).op.code)
39 #define _Py_OPARG(word) ((word).op.arg)
40 
41 static inline _Py_CODEUNIT
_py_make_codeunit(uint8_t opcode,uint8_t oparg)42 _py_make_codeunit(uint8_t opcode, uint8_t oparg)
43 {
44     // No designated initialisers because of C++ compat
45     _Py_CODEUNIT word;
46     word.op.code = opcode;
47     word.op.arg = oparg;
48     return word;
49 }
50 
51 static inline void
_py_set_opcode(_Py_CODEUNIT * word,uint8_t opcode)52 _py_set_opcode(_Py_CODEUNIT *word, uint8_t opcode)
53 {
54     word->op.code = opcode;
55 }
56 
57 #define _Py_MAKE_CODEUNIT(opcode, oparg) _py_make_codeunit((opcode), (oparg))
58 #define _Py_SET_OPCODE(word, opcode) _py_set_opcode(&(word), (opcode))
59 
60 
61 // We hide some of the newer PyCodeObject fields behind macros.
62 // This helps with backporting certain changes to 3.12.
63 #define _PyCode_HAS_EXECUTORS(CODE) \
64     (CODE->co_executors != NULL)
65 #define _PyCode_HAS_INSTRUMENTATION(CODE) \
66     (CODE->_co_instrumentation_version > 0)
67 
68 struct _py_code_state {
69     PyMutex mutex;
70     // Interned constants from code objects. Used by the free-threaded build.
71     struct _Py_hashtable_t *constants;
72 };
73 
74 extern PyStatus _PyCode_Init(PyInterpreterState *interp);
75 extern void _PyCode_Fini(PyInterpreterState *interp);
76 
77 #define CODE_MAX_WATCHERS 8
78 
79 /* PEP 659
80  * Specialization and quickening structs and helper functions
81  */
82 
83 
84 // Inline caches. If you change the number of cache entries for an instruction,
85 // you must *also* update the number of cache entries in Lib/opcode.py and bump
86 // the magic number in Lib/importlib/_bootstrap_external.py!
87 
88 #define CACHE_ENTRIES(cache) (sizeof(cache)/sizeof(_Py_CODEUNIT))
89 
90 typedef struct {
91     _Py_BackoffCounter counter;
92     uint16_t module_keys_version;
93     uint16_t builtin_keys_version;
94     uint16_t index;
95 } _PyLoadGlobalCache;
96 
97 #define INLINE_CACHE_ENTRIES_LOAD_GLOBAL CACHE_ENTRIES(_PyLoadGlobalCache)
98 
99 typedef struct {
100     _Py_BackoffCounter counter;
101 } _PyBinaryOpCache;
102 
103 #define INLINE_CACHE_ENTRIES_BINARY_OP CACHE_ENTRIES(_PyBinaryOpCache)
104 
105 typedef struct {
106     _Py_BackoffCounter counter;
107 } _PyUnpackSequenceCache;
108 
109 #define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \
110     CACHE_ENTRIES(_PyUnpackSequenceCache)
111 
112 typedef struct {
113     _Py_BackoffCounter counter;
114 } _PyCompareOpCache;
115 
116 #define INLINE_CACHE_ENTRIES_COMPARE_OP CACHE_ENTRIES(_PyCompareOpCache)
117 
118 typedef struct {
119     _Py_BackoffCounter counter;
120 } _PyBinarySubscrCache;
121 
122 #define INLINE_CACHE_ENTRIES_BINARY_SUBSCR CACHE_ENTRIES(_PyBinarySubscrCache)
123 
124 typedef struct {
125     _Py_BackoffCounter counter;
126 } _PySuperAttrCache;
127 
128 #define INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR CACHE_ENTRIES(_PySuperAttrCache)
129 
130 typedef struct {
131     _Py_BackoffCounter counter;
132     uint16_t version[2];
133     uint16_t index;
134 } _PyAttrCache;
135 
136 typedef struct {
137     _Py_BackoffCounter counter;
138     uint16_t type_version[2];
139     union {
140         uint16_t keys_version[2];
141         uint16_t dict_offset;
142     };
143     uint16_t descr[4];
144 } _PyLoadMethodCache;
145 
146 
147 // MUST be the max(_PyAttrCache, _PyLoadMethodCache)
148 #define INLINE_CACHE_ENTRIES_LOAD_ATTR CACHE_ENTRIES(_PyLoadMethodCache)
149 
150 #define INLINE_CACHE_ENTRIES_STORE_ATTR CACHE_ENTRIES(_PyAttrCache)
151 
152 typedef struct {
153     _Py_BackoffCounter counter;
154     uint16_t func_version[2];
155 } _PyCallCache;
156 
157 #define INLINE_CACHE_ENTRIES_CALL CACHE_ENTRIES(_PyCallCache)
158 
159 typedef struct {
160     _Py_BackoffCounter counter;
161 } _PyStoreSubscrCache;
162 
163 #define INLINE_CACHE_ENTRIES_STORE_SUBSCR CACHE_ENTRIES(_PyStoreSubscrCache)
164 
165 typedef struct {
166     _Py_BackoffCounter counter;
167 } _PyForIterCache;
168 
169 #define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
170 
171 typedef struct {
172     _Py_BackoffCounter counter;
173 } _PySendCache;
174 
175 #define INLINE_CACHE_ENTRIES_SEND CACHE_ENTRIES(_PySendCache)
176 
177 typedef struct {
178     _Py_BackoffCounter counter;
179     uint16_t version[2];
180 } _PyToBoolCache;
181 
182 #define INLINE_CACHE_ENTRIES_TO_BOOL CACHE_ENTRIES(_PyToBoolCache)
183 
184 typedef struct {
185     _Py_BackoffCounter counter;
186 } _PyContainsOpCache;
187 
188 #define INLINE_CACHE_ENTRIES_CONTAINS_OP CACHE_ENTRIES(_PyContainsOpCache)
189 
190 // Borrowed references to common callables:
191 struct callable_cache {
192     PyObject *isinstance;
193     PyObject *len;
194     PyObject *list_append;
195     PyObject *object__getattribute__;
196 };
197 
198 /* "Locals plus" for a code object is the set of locals + cell vars +
199  * free vars.  This relates to variable names as well as offsets into
200  * the "fast locals" storage array of execution frames.  The compiler
201  * builds the list of names, their offsets, and the corresponding
202  * kind of local.
203  *
204  * Those kinds represent the source of the initial value and the
205  * variable's scope (as related to closures).  A "local" is an
206  * argument or other variable defined in the current scope.  A "free"
207  * variable is one that is defined in an outer scope and comes from
208  * the function's closure.  A "cell" variable is a local that escapes
209  * into an inner function as part of a closure, and thus must be
210  * wrapped in a cell.  Any "local" can also be a "cell", but the
211  * "free" kind is mutually exclusive with both.
212  */
213 
214 // Note that these all fit within a byte, as do combinations.
215 // Later, we will use the smaller numbers to differentiate the different
216 // kinds of locals (e.g. pos-only arg, varkwargs, local-only).
217 #define CO_FAST_HIDDEN  0x10
218 #define CO_FAST_LOCAL   0x20
219 #define CO_FAST_CELL    0x40
220 #define CO_FAST_FREE    0x80
221 
222 typedef unsigned char _PyLocals_Kind;
223 
224 static inline _PyLocals_Kind
_PyLocals_GetKind(PyObject * kinds,int i)225 _PyLocals_GetKind(PyObject *kinds, int i)
226 {
227     assert(PyBytes_Check(kinds));
228     assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
229     char *ptr = PyBytes_AS_STRING(kinds);
230     return (_PyLocals_Kind)(ptr[i]);
231 }
232 
233 static inline void
_PyLocals_SetKind(PyObject * kinds,int i,_PyLocals_Kind kind)234 _PyLocals_SetKind(PyObject *kinds, int i, _PyLocals_Kind kind)
235 {
236     assert(PyBytes_Check(kinds));
237     assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
238     char *ptr = PyBytes_AS_STRING(kinds);
239     ptr[i] = (char) kind;
240 }
241 
242 
243 struct _PyCodeConstructor {
244     /* metadata */
245     PyObject *filename;
246     PyObject *name;
247     PyObject *qualname;
248     int flags;
249 
250     /* the code */
251     PyObject *code;
252     int firstlineno;
253     PyObject *linetable;
254 
255     /* used by the code */
256     PyObject *consts;
257     PyObject *names;
258 
259     /* mapping frame offsets to information */
260     PyObject *localsplusnames;  // Tuple of strings
261     PyObject *localspluskinds;  // Bytes object, one byte per variable
262 
263     /* args (within varnames) */
264     int argcount;
265     int posonlyargcount;
266     // XXX Replace argcount with posorkwargcount (argcount - posonlyargcount).
267     int kwonlyargcount;
268 
269     /* needed to create the frame */
270     int stacksize;
271 
272     /* used by the eval loop */
273     PyObject *exceptiontable;
274 };
275 
276 // Using an "arguments struct" like this is helpful for maintainability
277 // in a case such as this with many parameters.  It does bear a risk:
278 // if the struct changes and callers are not updated properly then the
279 // compiler will not catch problems (like a missing argument).  This can
280 // cause hard-to-debug problems.  The risk is mitigated by the use of
281 // check_code() in codeobject.c.  However, we may decide to switch
282 // back to a regular function signature.  Regardless, this approach
283 // wouldn't be appropriate if this weren't a strictly internal API.
284 // (See the comments in https://github.com/python/cpython/pull/26258.)
285 extern int _PyCode_Validate(struct _PyCodeConstructor *);
286 extern PyCodeObject* _PyCode_New(struct _PyCodeConstructor *);
287 
288 
289 /* Private API */
290 
291 /* Getters for internal PyCodeObject data. */
292 extern PyObject* _PyCode_GetVarnames(PyCodeObject *);
293 extern PyObject* _PyCode_GetCellvars(PyCodeObject *);
294 extern PyObject* _PyCode_GetFreevars(PyCodeObject *);
295 extern PyObject* _PyCode_GetCode(PyCodeObject *);
296 
297 /** API for initializing the line number tables. */
298 extern int _PyCode_InitAddressRange(PyCodeObject* co, PyCodeAddressRange *bounds);
299 
300 /** Out of process API for initializing the location table. */
301 extern void _PyLineTable_InitAddressRange(
302     const char *linetable,
303     Py_ssize_t length,
304     int firstlineno,
305     PyCodeAddressRange *range);
306 
307 /** API for traversing the line number table. */
308 extern int _PyLineTable_NextAddressRange(PyCodeAddressRange *range);
309 extern int _PyLineTable_PreviousAddressRange(PyCodeAddressRange *range);
310 
311 /** API for executors */
312 extern void _PyCode_Clear_Executors(PyCodeObject *code);
313 
314 #ifdef Py_GIL_DISABLED
315 // gh-115999 tracks progress on addressing this.
316 #define ENABLE_SPECIALIZATION 0
317 #else
318 #define ENABLE_SPECIALIZATION 1
319 #endif
320 
321 /* Specialization functions */
322 
323 extern void _Py_Specialize_LoadSuperAttr(PyObject *global_super, PyObject *cls,
324                                          _Py_CODEUNIT *instr, int load_method);
325 extern void _Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr,
326                                     PyObject *name);
327 extern void _Py_Specialize_StoreAttr(PyObject *owner, _Py_CODEUNIT *instr,
328                                      PyObject *name);
329 extern void _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins,
330                                       _Py_CODEUNIT *instr, PyObject *name);
331 extern void _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container,
332                                         _Py_CODEUNIT *instr);
333 extern void _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub,
334                                        _Py_CODEUNIT *instr);
335 extern void _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr,
336                                 int nargs);
337 extern void _Py_Specialize_BinaryOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr,
338                                     int oparg, PyObject **locals);
339 extern void _Py_Specialize_CompareOp(PyObject *lhs, PyObject *rhs,
340                                      _Py_CODEUNIT *instr, int oparg);
341 extern void _Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr,
342                                           int oparg);
343 extern void _Py_Specialize_ForIter(PyObject *iter, _Py_CODEUNIT *instr, int oparg);
344 extern void _Py_Specialize_Send(PyObject *receiver, _Py_CODEUNIT *instr);
345 extern void _Py_Specialize_ToBool(PyObject *value, _Py_CODEUNIT *instr);
346 extern void _Py_Specialize_ContainsOp(PyObject *value, _Py_CODEUNIT *instr);
347 
348 #ifdef Py_STATS
349 
350 #include "pycore_bitutils.h"  // _Py_bit_length
351 
352 #define STAT_INC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name++; } while (0)
353 #define STAT_DEC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name--; } while (0)
354 #define OPCODE_EXE_INC(opname) do { if (_Py_stats) _Py_stats->opcode_stats[opname].execution_count++; } while (0)
355 #define CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.name++; } while (0)
356 #define OBJECT_STAT_INC(name) do { if (_Py_stats) _Py_stats->object_stats.name++; } while (0)
357 #define OBJECT_STAT_INC_COND(name, cond) \
358     do { if (_Py_stats && cond) _Py_stats->object_stats.name++; } while (0)
359 #define EVAL_CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.eval_calls[name]++; } while (0)
360 #define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) \
361     do { if (_Py_stats && PyFunction_Check(callable)) _Py_stats->call_stats.eval_calls[name]++; } while (0)
362 #define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0)
363 #define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0)
364 #define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0)
365 #define UOP_PAIR_INC(uopcode, lastuop)                                              \
366     do {                                                                            \
367         if (lastuop && _Py_stats) {                                                 \
368             _Py_stats->optimization_stats.opcode[lastuop].pair_count[uopcode]++;    \
369         }                                                                           \
370         lastuop = uopcode;                                                          \
371     } while (0)
372 #define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0)
373 #define OPT_ERROR_IN_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.error_in_opcode[opname]++; } while (0)
374 #define OPT_HIST(length, name) \
375     do { \
376         if (_Py_stats) { \
377             int bucket = _Py_bit_length(length >= 1 ? length - 1 : 0); \
378             bucket = (bucket >= _Py_UOP_HIST_SIZE) ? _Py_UOP_HIST_SIZE - 1 : bucket; \
379             _Py_stats->optimization_stats.name[bucket]++; \
380         } \
381     } while (0)
382 #define RARE_EVENT_STAT_INC(name) do { if (_Py_stats) _Py_stats->rare_event_stats.name++; } while (0)
383 
384 // Export for '_opcode' shared extension
385 PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void);
386 
387 #else
388 #define STAT_INC(opname, name) ((void)0)
389 #define STAT_DEC(opname, name) ((void)0)
390 #define OPCODE_EXE_INC(opname) ((void)0)
391 #define CALL_STAT_INC(name) ((void)0)
392 #define OBJECT_STAT_INC(name) ((void)0)
393 #define OBJECT_STAT_INC_COND(name, cond) ((void)0)
394 #define EVAL_CALL_STAT_INC(name) ((void)0)
395 #define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) ((void)0)
396 #define GC_STAT_ADD(gen, name, n) ((void)0)
397 #define OPT_STAT_INC(name) ((void)0)
398 #define UOP_STAT_INC(opname, name) ((void)0)
399 #define UOP_PAIR_INC(uopcode, lastuop) ((void)0)
400 #define OPT_UNSUPPORTED_OPCODE(opname) ((void)0)
401 #define OPT_ERROR_IN_OPCODE(opname) ((void)0)
402 #define OPT_HIST(length, name) ((void)0)
403 #define RARE_EVENT_STAT_INC(name) ((void)0)
404 #endif  // !Py_STATS
405 
406 // Utility functions for reading/writing 32/64-bit values in the inline caches.
407 // Great care should be taken to ensure that these functions remain correct and
408 // performant! They should compile to just "move" instructions on all supported
409 // compilers and platforms.
410 
411 // We use memcpy to let the C compiler handle unaligned accesses and endianness
412 // issues for us. It also seems to produce better code than manual copying for
413 // most compilers (see https://blog.regehr.org/archives/959 for more info).
414 
415 static inline void
write_u32(uint16_t * p,uint32_t val)416 write_u32(uint16_t *p, uint32_t val)
417 {
418     memcpy(p, &val, sizeof(val));
419 }
420 
421 static inline void
write_u64(uint16_t * p,uint64_t val)422 write_u64(uint16_t *p, uint64_t val)
423 {
424     memcpy(p, &val, sizeof(val));
425 }
426 
427 static inline void
write_obj(uint16_t * p,PyObject * val)428 write_obj(uint16_t *p, PyObject *val)
429 {
430     memcpy(p, &val, sizeof(val));
431 }
432 
433 static inline uint16_t
read_u16(uint16_t * p)434 read_u16(uint16_t *p)
435 {
436     return *p;
437 }
438 
439 static inline uint32_t
read_u32(uint16_t * p)440 read_u32(uint16_t *p)
441 {
442     uint32_t val;
443     memcpy(&val, p, sizeof(val));
444     return val;
445 }
446 
447 static inline uint64_t
read_u64(uint16_t * p)448 read_u64(uint16_t *p)
449 {
450     uint64_t val;
451     memcpy(&val, p, sizeof(val));
452     return val;
453 }
454 
455 static inline PyObject *
read_obj(uint16_t * p)456 read_obj(uint16_t *p)
457 {
458     PyObject *val;
459     memcpy(&val, p, sizeof(val));
460     return val;
461 }
462 
463 /* See Objects/exception_handling_notes.txt for details.
464  */
465 static inline unsigned char *
parse_varint(unsigned char * p,int * result)466 parse_varint(unsigned char *p, int *result) {
467     int val = p[0] & 63;
468     while (p[0] & 64) {
469         p++;
470         val = (val << 6) | (p[0] & 63);
471     }
472     *result = val;
473     return p+1;
474 }
475 
476 static inline int
write_varint(uint8_t * ptr,unsigned int val)477 write_varint(uint8_t *ptr, unsigned int val)
478 {
479     int written = 1;
480     while (val >= 64) {
481         *ptr++ = 64 | (val & 63);
482         val >>= 6;
483         written++;
484     }
485     *ptr = (uint8_t)val;
486     return written;
487 }
488 
489 static inline int
write_signed_varint(uint8_t * ptr,int val)490 write_signed_varint(uint8_t *ptr, int val)
491 {
492     unsigned int uval;
493     if (val < 0) {
494         // (unsigned int)(-val) has an undefined behavior for INT_MIN
495         uval = ((0 - (unsigned int)val) << 1) | 1;
496     }
497     else {
498         uval = (unsigned int)val << 1;
499     }
500     return write_varint(ptr, uval);
501 }
502 
503 static inline int
write_location_entry_start(uint8_t * ptr,int code,int length)504 write_location_entry_start(uint8_t *ptr, int code, int length)
505 {
506     assert((code & 15) == code);
507     *ptr = 128 | (uint8_t)(code << 3) | (uint8_t)(length - 1);
508     return 1;
509 }
510 
511 
512 /** Counters
513  * The first 16-bit value in each inline cache is a counter.
514  *
515  * When counting executions until the next specialization attempt,
516  * exponential backoff is used to reduce the number of specialization failures.
517  * See pycore_backoff.h for more details.
518  * On a specialization failure, the backoff counter is restarted.
519  */
520 
521 #include "pycore_backoff.h"
522 
523 // A value of 1 means that we attempt to specialize the *second* time each
524 // instruction is executed. Executing twice is a much better indicator of
525 // "hotness" than executing once, but additional warmup delays only prevent
526 // specialization. Most types stabilize by the second execution, too:
527 #define ADAPTIVE_WARMUP_VALUE 1
528 #define ADAPTIVE_WARMUP_BACKOFF 1
529 
530 // A value of 52 means that we attempt to re-specialize after 53 misses (a prime
531 // number, useful for avoiding artifacts if every nth value is a different type
532 // or something). Setting the backoff to 0 means that the counter is reset to
533 // the same state as a warming-up instruction (value == 1, backoff == 1) after
534 // deoptimization. This isn't strictly necessary, but it is bit easier to reason
535 // about when thinking about the opcode transitions as a state machine:
536 #define ADAPTIVE_COOLDOWN_VALUE 52
537 #define ADAPTIVE_COOLDOWN_BACKOFF 0
538 
539 // Can't assert this in pycore_backoff.h because of header order dependencies
540 #if COLD_EXIT_INITIAL_VALUE <= ADAPTIVE_COOLDOWN_VALUE
541 #  error  "Cold exit value should be larger than adaptive cooldown value"
542 #endif
543 
544 static inline _Py_BackoffCounter
adaptive_counter_bits(uint16_t value,uint16_t backoff)545 adaptive_counter_bits(uint16_t value, uint16_t backoff) {
546     return make_backoff_counter(value, backoff);
547 }
548 
549 static inline _Py_BackoffCounter
adaptive_counter_warmup(void)550 adaptive_counter_warmup(void) {
551     return adaptive_counter_bits(ADAPTIVE_WARMUP_VALUE,
552                                  ADAPTIVE_WARMUP_BACKOFF);
553 }
554 
555 static inline _Py_BackoffCounter
adaptive_counter_cooldown(void)556 adaptive_counter_cooldown(void) {
557     return adaptive_counter_bits(ADAPTIVE_COOLDOWN_VALUE,
558                                  ADAPTIVE_COOLDOWN_BACKOFF);
559 }
560 
561 static inline _Py_BackoffCounter
adaptive_counter_backoff(_Py_BackoffCounter counter)562 adaptive_counter_backoff(_Py_BackoffCounter counter) {
563     return restart_backoff_counter(counter);
564 }
565 
566 
567 /* Comparison bit masks. */
568 
569 /* Note this evaluates its arguments twice each */
570 #define COMPARISON_BIT(x, y) (1 << (2 * ((x) >= (y)) + ((x) <= (y))))
571 
572 /*
573  * The following bits are chosen so that the value of
574  * COMPARSION_BIT(left, right)
575  * masked by the values below will be non-zero if the
576  * comparison is true, and zero if it is false */
577 
578 /* This is for values that are unordered, ie. NaN, not types that are unordered, e.g. sets */
579 #define COMPARISON_UNORDERED 1
580 
581 #define COMPARISON_LESS_THAN 2
582 #define COMPARISON_GREATER_THAN 4
583 #define COMPARISON_EQUALS 8
584 
585 #define COMPARISON_NOT_EQUALS (COMPARISON_UNORDERED | COMPARISON_LESS_THAN | COMPARISON_GREATER_THAN)
586 
587 extern int _Py_Instrument(PyCodeObject *co, PyInterpreterState *interp);
588 
589 extern int _Py_GetBaseOpcode(PyCodeObject *code, int offset);
590 
591 extern int _PyInstruction_GetLength(PyCodeObject *code, int offset);
592 
593 #ifdef __cplusplus
594 }
595 #endif
596 #endif /* !Py_INTERNAL_CODE_H */
597