Merge deoptimization blocks in interpreter (GH-32155)

This commit is contained in:
Mark Shannon 2022-03-30 13:11:33 +01:00 committed by GitHub
parent 75eee1d57e
commit 04acfa94bb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 28 additions and 36 deletions

View file

@ -110,6 +110,8 @@ _PyCode_Warmup(PyCodeObject *code)
}
}
extern uint8_t _PyOpcode_Adaptive[256];
extern Py_ssize_t _Py_QuickenedCount;
// Borrowed references to common callables:

View file

@ -1431,7 +1431,7 @@ eval_frame_handle_pending(PyThreadState *tstate)
#define JUMP_TO_INSTRUCTION(op) goto PREDICT_ID(op)
#define DEOPT_IF(cond, instname) if (cond) { goto instname ## _miss; }
#define DEOPT_IF(cond, instname) if (cond) { goto miss; }
#define GLOBALS() frame->f_globals
@ -4595,7 +4595,6 @@ handle_eval_breaker:
}
TARGET(CALL) {
PREDICTED(CALL);
int is_meth;
call_function:
is_meth = is_method(stack_pointer, oparg);
@ -5524,33 +5523,24 @@ handle_eval_breaker:
/* Specialization misses */
#define MISS_WITH_INLINE_CACHE(opname) \
opname ## _miss: \
{ \
STAT_INC(opcode, miss); \
STAT_INC(opname, miss); \
/* The counter is always the first cache entry: */ \
_Py_CODEUNIT *counter = (_Py_CODEUNIT *)next_instr; \
*counter -= 1; \
if (*counter == 0) { \
_Py_SET_OPCODE(next_instr[-1], opname ## _ADAPTIVE); \
STAT_INC(opname, deopt); \
*counter = ADAPTIVE_CACHE_BACKOFF; \
} \
JUMP_TO_INSTRUCTION(opname); \
miss:
{
STAT_INC(opcode, miss);
opcode = _PyOpcode_Deopt[opcode];
STAT_INC(opcode, miss);
/* The counter is always the first cache entry: */
_Py_CODEUNIT *counter = (_Py_CODEUNIT *)next_instr;
*counter -= 1;
if (*counter == 0) {
int adaptive_opcode = _PyOpcode_Adaptive[opcode];
assert(adaptive_opcode);
_Py_SET_OPCODE(next_instr[-1], adaptive_opcode);
STAT_INC(opcode, deopt);
*counter = ADAPTIVE_CACHE_BACKOFF;
}
next_instr--;
DISPATCH_GOTO();
}
MISS_WITH_INLINE_CACHE(LOAD_ATTR)
MISS_WITH_INLINE_CACHE(STORE_ATTR)
MISS_WITH_INLINE_CACHE(LOAD_GLOBAL)
MISS_WITH_INLINE_CACHE(LOAD_METHOD)
MISS_WITH_INLINE_CACHE(PRECALL)
MISS_WITH_INLINE_CACHE(CALL)
MISS_WITH_INLINE_CACHE(BINARY_OP)
MISS_WITH_INLINE_CACHE(COMPARE_OP)
MISS_WITH_INLINE_CACHE(BINARY_SUBSCR)
MISS_WITH_INLINE_CACHE(UNPACK_SEQUENCE)
MISS_WITH_INLINE_CACHE(STORE_SUBSCR)
binary_subscr_dict_error:
{

View file

@ -17,7 +17,7 @@
/* Map from opcode to adaptive opcode.
Values of zero are ignored. */
static uint8_t adaptive_opcodes[256] = {
uint8_t _PyOpcode_Adaptive[256] = {
[LOAD_ATTR] = LOAD_ATTR_ADAPTIVE,
[LOAD_GLOBAL] = LOAD_GLOBAL_ADAPTIVE,
[LOAD_METHOD] = LOAD_METHOD_ADAPTIVE,
@ -143,7 +143,7 @@ print_spec_stats(FILE *out, OpcodeStats *stats)
* even though we don't specialize them yet. */
fprintf(out, "opcode[%d].specializable : 1\n", FOR_ITER);
for (int i = 0; i < 256; i++) {
if (adaptive_opcodes[i]) {
if (_PyOpcode_Adaptive[i]) {
fprintf(out, "opcode[%d].specializable : 1\n", i);
}
PRINT_STAT(i, specialization.success);
@ -259,7 +259,7 @@ _PyCode_Quicken(PyCodeObject *code)
_Py_CODEUNIT *instructions = _PyCode_CODE(code);
for (int i = 0; i < Py_SIZE(code); i++) {
int opcode = _Py_OPCODE(instructions[i]);
uint8_t adaptive_opcode = adaptive_opcodes[opcode];
uint8_t adaptive_opcode = _PyOpcode_Adaptive[opcode];
if (adaptive_opcode) {
_Py_SET_OPCODE(instructions[i], adaptive_opcode);
// Make sure the adaptive counter is zero: