bpo-46841: Use inline caching for calls (GH-31709)

This commit is contained in:
Brandt Bucher 2022-03-07 11:45:00 -08:00 committed by GitHub
parent 105b9ac001
commit f193631387
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 494 additions and 735 deletions

View file

@ -1321,6 +1321,10 @@ eval_frame_handle_pending(PyThreadState *tstate)
#define JUMPTO(x) (next_instr = first_instr + (x))
#define JUMPBY(x) (next_instr += (x))
// Skip from a PRECALL over a CALL to the next instruction:
#define SKIP_CALL() \
JUMPBY(INLINE_CACHE_ENTRIES_PRECALL + 1 + INLINE_CACHE_ENTRIES_CALL)
/* Get opcode and oparg from original instructions, not quickened form. */
#define TRACING_NEXTOPARG() do { \
_Py_CODEUNIT word = ((_Py_CODEUNIT *)PyBytes_AS_STRING(frame->f_code->co_code))[INSTR_OFFSET()]; \
@ -1431,9 +1435,6 @@ eval_frame_handle_pending(PyThreadState *tstate)
#define JUMP_TO_INSTRUCTION(op) goto PREDICT_ID(op)
#define GET_CACHE() \
_GetSpecializedCacheEntryForInstruction(first_instr, INSTR_OFFSET(), oparg)
#define DEOPT_IF(cond, instname) if (cond) { goto instname ## _miss; }
@ -3003,8 +3004,8 @@ handle_eval_breaker:
TARGET(LOAD_GLOBAL_ADAPTIVE) {
assert(cframe.use_tracing == 0);
uint16_t counter = *next_instr;
if (counter == 0) {
_PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr;
if (cache->counter == 0) {
PyObject *name = GETITEM(names, oparg);
next_instr--;
if (_Py_Specialize_LoadGlobal(GLOBALS(), BUILTINS(), next_instr, name) < 0) {
@ -3014,7 +3015,7 @@ handle_eval_breaker:
}
else {
STAT_INC(LOAD_GLOBAL, deferred);
*next_instr = counter-1;
cache->counter--;
JUMP_TO_INSTRUCTION(LOAD_GLOBAL);
}
}
@ -4563,20 +4564,12 @@ handle_eval_breaker:
We'll be passing `oparg + 1` to call_function, to
make it accept the `self` as a first argument.
*/
int is_method = (PEEK(oparg + 2) != NULL);
int nargs = oparg + is_method;
int is_meth = is_method(stack_pointer, oparg);
int nargs = oparg + is_meth;
/* Move ownership of reference from stack to call_shape
* and make sure that NULL is cleared from stack */
PyObject *function = PEEK(nargs + 1);
#ifdef Py_STATS
extern int _PySpecialization_ClassifyCallable(PyObject *);
SpecializationStats *stats =
&_py_stats.opcode_stats[PRECALL].specialization;
stats->failure++;
int kind = _PySpecialization_ClassifyCallable(function);
stats->failure_kinds[kind]++;
#endif
if (!is_method && Py_TYPE(function) == &PyMethod_Type) {
if (!is_meth && Py_TYPE(function) == &PyMethod_Type) {
PyObject *meth = ((PyMethodObject *)function)->im_func;
PyObject *self = ((PyMethodObject *)function)->im_self;
Py_INCREF(meth);
@ -4585,35 +4578,32 @@ handle_eval_breaker:
PEEK(oparg+2) = meth;
Py_DECREF(function);
}
JUMPBY(INLINE_CACHE_ENTRIES_PRECALL);
DISPATCH();
}
TARGET(PRECALL_BOUND_METHOD) {
SpecializedCacheEntry *cache = GET_CACHE();
int original_oparg = cache->adaptive.original_oparg;
int is_method = (PEEK(original_oparg + 2) != NULL);
DEOPT_IF(is_method, PRECALL);
PyObject *function = PEEK(original_oparg + 1);
DEOPT_IF(is_method(stack_pointer, oparg), PRECALL);
PyObject *function = PEEK(oparg + 1);
DEOPT_IF(Py_TYPE(function) != &PyMethod_Type, PRECALL);
STAT_INC(PRECALL, hit);
PyObject *meth = ((PyMethodObject *)function)->im_func;
PyObject *self = ((PyMethodObject *)function)->im_self;
Py_INCREF(meth);
Py_INCREF(self);
PEEK(original_oparg+1) = self;
PEEK(original_oparg+2) = meth;
PEEK(oparg + 1) = self;
PEEK(oparg + 2) = meth;
Py_DECREF(function);
JUMPBY(INLINE_CACHE_ENTRIES_PRECALL);
DISPATCH();
}
TARGET(PRECALL_PYFUNC) {
SpecializedCacheEntry *cache = GET_CACHE();
int original_oparg = cache->adaptive.original_oparg;
int is_method = (PEEK(original_oparg + 2) != NULL);
int nargs = original_oparg + is_method;
int nargs = oparg + is_method(stack_pointer, oparg);
PyObject *function = PEEK(nargs + 1);
DEOPT_IF(Py_TYPE(function) != &PyFunction_Type, PRECALL);
STAT_INC(PRECALL, hit);
JUMPBY(INLINE_CACHE_ENTRIES_PRECALL);
DISPATCH();
}
@ -4649,6 +4639,7 @@ handle_eval_breaker:
goto error;
}
_PyFrame_SetStackPointer(frame, stack_pointer);
frame->f_lasti += INLINE_CACHE_ENTRIES_CALL;
new_frame->previous = frame;
cframe.current_frame = frame = new_frame;
CALL_STAT_INC(inlined_py_calls);
@ -4680,21 +4671,20 @@ handle_eval_breaker:
if (res == NULL) {
goto error;
}
JUMPBY(INLINE_CACHE_ENTRIES_CALL);
CHECK_EVAL_BREAKER();
DISPATCH();
}
TARGET(PRECALL_ADAPTIVE) {
SpecializedCacheEntry *cache = GET_CACHE();
int original_oparg = cache->adaptive.original_oparg;
if (cache->adaptive.counter == 0) {
_PyPrecallCache *cache = (_PyPrecallCache *)next_instr;
if (cache->counter == 0) {
next_instr--;
int is_meth = is_method(stack_pointer, original_oparg);
int nargs = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int nargs = oparg + is_meth;
PyObject *callable = PEEK(nargs + 1);
int err = _Py_Specialize_Precall(
callable, next_instr, nargs,
call_shape.kwnames, cache, BUILTINS());
int err = _Py_Specialize_Precall(callable, next_instr, nargs,
call_shape.kwnames, oparg);
if (err < 0) {
goto error;
}
@ -4702,23 +4692,20 @@ handle_eval_breaker:
}
else {
STAT_INC(PRECALL, deferred);
cache->adaptive.counter--;
oparg = original_oparg;
cache->counter--;
JUMP_TO_INSTRUCTION(PRECALL);
}
}
TARGET(CALL_ADAPTIVE) {
SpecializedCacheEntry *cache = GET_CACHE();
int original_oparg = cache->adaptive.original_oparg;
if (cache->adaptive.counter == 0) {
_PyCallCache *cache = (_PyCallCache *)next_instr;
if (cache->counter == 0) {
next_instr--;
int is_meth = is_method(stack_pointer, original_oparg);
int nargs = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int nargs = oparg + is_meth;
PyObject *callable = PEEK(nargs + 1);
int err = _Py_Specialize_Call(
callable, next_instr, nargs,
call_shape.kwnames, cache);
int err = _Py_Specialize_Call(callable, next_instr, nargs,
call_shape.kwnames);
if (err < 0) {
goto error;
}
@ -4726,23 +4713,20 @@ handle_eval_breaker:
}
else {
STAT_INC(CALL, deferred);
cache->adaptive.counter--;
oparg = original_oparg;
cache->counter--;
goto call_function;
}
}
TARGET(CALL_PY_EXACT_ARGS) {
assert(call_shape.kwnames == NULL);
SpecializedCacheEntry *caches = GET_CACHE();
int original_oparg = caches->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int argcount = original_oparg + is_meth;
_PyCallCache *cache = (_PyCallCache *)next_instr;
int is_meth = is_method(stack_pointer, oparg);
int argcount = oparg + is_meth;
PyObject *callable = PEEK(argcount + 1);
DEOPT_IF(!PyFunction_Check(callable), CALL);
_PyCallCache *cache1 = &caches[-1].call;
PyFunctionObject *func = (PyFunctionObject *)callable;
DEOPT_IF(func->func_version != cache1->func_version, CALL);
DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL);
PyCodeObject *code = (PyCodeObject *)func->func_code;
DEOPT_IF(code->co_argcount != argcount, CALL);
STAT_INC(CALL, hit);
@ -4760,6 +4744,7 @@ handle_eval_breaker:
}
STACK_SHRINK(2-is_meth);
_PyFrame_SetStackPointer(frame, stack_pointer);
frame->f_lasti += INLINE_CACHE_ENTRIES_CALL;
new_frame->previous = frame;
frame = cframe.current_frame = new_frame;
goto start_frame;
@ -4767,18 +4752,16 @@ handle_eval_breaker:
TARGET(CALL_PY_WITH_DEFAULTS) {
assert(call_shape.kwnames == NULL);
SpecializedCacheEntry *caches = GET_CACHE();
int original_oparg = caches->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int argcount = original_oparg + is_meth;
_PyCallCache *cache = (_PyCallCache *)next_instr;
int is_meth = is_method(stack_pointer, oparg);
int argcount = oparg + is_meth;
PyObject *callable = PEEK(argcount + 1);
DEOPT_IF(!PyFunction_Check(callable), CALL);
_PyCallCache *cache1 = &caches[-1].call;
PyFunctionObject *func = (PyFunctionObject *)callable;
DEOPT_IF(func->func_version != cache1->func_version, CALL);
DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL);
PyCodeObject *code = (PyCodeObject *)func->func_code;
DEOPT_IF(argcount > code->co_argcount, CALL);
int minargs = cache1->min_args;
int minargs = cache->min_args;
DEOPT_IF(argcount < minargs, CALL);
STAT_INC(CALL, hit);
_PyInterpreterFrame *new_frame = _PyFrame_Push(tstate, func);
@ -4790,9 +4773,9 @@ handle_eval_breaker:
for (int i = 0; i < argcount; i++) {
new_frame->localsplus[i] = stack_pointer[i];
}
int def_offset = cache1->defaults_len - code->co_argcount;
for (int i = argcount; i < code->co_argcount; i++) {
PyObject *def = PyTuple_GET_ITEM(func->func_defaults, i + def_offset);
PyObject *def = PyTuple_GET_ITEM(func->func_defaults,
i - minargs);
Py_INCREF(def);
new_frame->localsplus[i] = def;
}
@ -4801,6 +4784,7 @@ handle_eval_breaker:
}
STACK_SHRINK(2-is_meth);
_PyFrame_SetStackPointer(frame, stack_pointer);
frame->f_lasti += INLINE_CACHE_ENTRIES_CALL;
new_frame->previous = frame;
frame = cframe.current_frame = new_frame;
goto start_frame;
@ -4809,13 +4793,13 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_TYPE_1) {
assert(call_shape.kwnames == NULL);
assert(cframe.use_tracing == 0);
assert(GET_CACHE()->adaptive.original_oparg == 1);
assert(oparg == 1);
DEOPT_IF(is_method(stack_pointer, 1), PRECALL);
PyObject *obj = TOP();
PyObject *callable = SECOND();
DEOPT_IF(callable != (PyObject *)&PyType_Type, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyObject *res = Py_NewRef(Py_TYPE(obj));
Py_DECREF(callable);
Py_DECREF(obj);
@ -4827,12 +4811,12 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_STR_1) {
assert(call_shape.kwnames == NULL);
assert(cframe.use_tracing == 0);
assert(GET_CACHE()->adaptive.original_oparg == 1);
assert(oparg == 1);
DEOPT_IF(is_method(stack_pointer, 1), PRECALL);
PyObject *callable = PEEK(2);
DEOPT_IF(callable != (PyObject *)&PyUnicode_Type, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyObject *arg = TOP();
PyObject *res = PyObject_Str(arg);
Py_DECREF(arg);
@ -4848,12 +4832,12 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_TUPLE_1) {
assert(call_shape.kwnames == NULL);
assert(GET_CACHE()->adaptive.original_oparg == 1);
assert(oparg == 1);
DEOPT_IF(is_method(stack_pointer, 1), PRECALL);
PyObject *callable = PEEK(2);
DEOPT_IF(callable != (PyObject *)&PyTuple_Type, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyObject *arg = TOP();
PyObject *res = PySequence_Tuple(arg);
Py_DECREF(arg);
@ -4868,16 +4852,15 @@ handle_eval_breaker:
}
TARGET(PRECALL_BUILTIN_CLASS) {
int original_oparg = GET_CACHE()->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
int kwnames_len = KWNAMES_LEN();
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyType_Check(callable), PRECALL);
PyTypeObject *tp = (PyTypeObject *)callable;
DEOPT_IF(tp->tp_vectorcall == NULL, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
STACK_SHRINK(total_args);
PyObject *res = tp->tp_vectorcall((PyObject *)tp, stack_pointer,
total_args-kwnames_len, call_shape.kwnames);
@ -4900,16 +4883,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
/* Builtin METH_O functions */
assert(call_shape.kwnames == NULL);
SpecializedCacheEntry *caches = GET_CACHE();
int original_oparg = caches->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
DEOPT_IF(total_args != 1, PRECALL);
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyCFunction_CheckExact(callable), PRECALL);
DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_O, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable);
// This is slower but CPython promises to check all non-vectorcall
// function calls.
@ -4936,16 +4917,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
/* Builtin METH_FASTCALL functions, without keywords */
assert(call_shape.kwnames == NULL);
SpecializedCacheEntry *caches = GET_CACHE();
int original_oparg = caches->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyCFunction_CheckExact(callable), PRECALL);
DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL,
PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable);
STACK_SHRINK(total_args);
/* res = func(self, args, nargs) */
@ -4977,16 +4956,14 @@ handle_eval_breaker:
TARGET(PRECALL_BUILTIN_FAST_WITH_KEYWORDS) {
assert(cframe.use_tracing == 0);
/* Builtin METH_FASTCALL | METH_KEYWORDS functions */
SpecializedCacheEntry *caches = GET_CACHE();
int original_oparg = caches->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(!PyCFunction_CheckExact(callable), PRECALL);
DEOPT_IF(PyCFunction_GET_FLAGS(callable) !=
(METH_FASTCALL | METH_KEYWORDS), PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
STACK_SHRINK(total_args);
/* res = func(self, args, nargs, kwnames) */
_PyCFunctionFastWithKeywords cfunc =
@ -5019,16 +4996,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
assert(call_shape.kwnames == NULL);
/* len(o) */
SpecializedCacheEntry *caches = GET_CACHE();
int original_oparg = caches->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
DEOPT_IF(total_args != 1, PRECALL);
_PyObjectCache *cache1 = &caches[-1].obj;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(callable != cache1->obj, PRECALL);
next_instr++; // Skip following call
PyInterpreterState *interp = _PyInterpreterState_GET();
DEOPT_IF(callable != interp->callable_cache.len, PRECALL);
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyObject *arg = TOP();
Py_ssize_t len_i = PyObject_Length(arg);
if (len_i < 0) {
@ -5051,17 +5026,14 @@ handle_eval_breaker:
assert(cframe.use_tracing == 0);
assert(call_shape.kwnames == NULL);
/* isinstance(o, o2) */
SpecializedCacheEntry *caches = GET_CACHE();
int original_oparg = caches->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(total_args != 2, PRECALL);
_PyObjectCache *cache1 = &caches[-1].obj;
DEOPT_IF(callable != cache1->obj, PRECALL);
next_instr++; // Skip following call
PyInterpreterState *interp = _PyInterpreterState_GET();
DEOPT_IF(callable != interp->callable_cache.isinstance, PRECALL);
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyObject *cls = POP();
PyObject *inst = TOP();
int retval = PyObject_IsInstance(inst, cls);
@ -5086,16 +5058,14 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_LIST_APPEND) {
assert(cframe.use_tracing == 0);
assert(call_shape.kwnames == NULL);
assert(GET_CACHE()->adaptive.original_oparg == 1);
SpecializedCacheEntry *caches = GET_CACHE();
_PyObjectCache *cache1 = &caches[-1].obj;
assert(cache1->obj != NULL);
assert(oparg == 1);
PyObject *callable = PEEK(3);
DEOPT_IF(callable != cache1->obj, PRECALL);
PyInterpreterState *interp = _PyInterpreterState_GET();
DEOPT_IF(callable != interp->callable_cache.list_append, PRECALL);
PyObject *list = SECOND();
DEOPT_IF(!PyList_Check(list), PRECALL);
STAT_INC(PRECALL, hit);
next_instr++; // Skip following call
SKIP_CALL();
PyObject *arg = TOP();
int err = PyList_Append(list, arg);
if (err) {
@ -5112,16 +5082,15 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_METHOD_DESCRIPTOR_O) {
assert(call_shape.kwnames == NULL);
int original_oparg = GET_CACHE()->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
DEOPT_IF(total_args != 2, PRECALL);
DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), PRECALL);
PyMethodDef *meth = ((PyMethodDescrObject *)callable)->d_method;
DEOPT_IF(meth->ml_flags != METH_O, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyCFunction cfunc = meth->ml_meth;
// This is slower but CPython promises to check all non-vectorcall
// function calls.
@ -5135,7 +5104,7 @@ handle_eval_breaker:
assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
Py_DECREF(self);
Py_DECREF(arg);
STACK_SHRINK(original_oparg+1);
STACK_SHRINK(oparg + 1);
SET_TOP(res);
Py_DECREF(callable);
if (res == NULL) {
@ -5147,17 +5116,16 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_METHOD_DESCRIPTOR_NOARGS) {
assert(call_shape.kwnames == NULL);
int original_oparg = GET_CACHE()->adaptive.original_oparg;
assert(original_oparg == 0 || original_oparg == 1);
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
assert(oparg == 0 || oparg == 1);
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
DEOPT_IF(total_args != 1, PRECALL);
PyObject *callable = SECOND();
DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), PRECALL);
PyMethodDef *meth = ((PyMethodDescrObject *)callable)->d_method;
DEOPT_IF(meth->ml_flags != METH_NOARGS, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
PyCFunction cfunc = meth->ml_meth;
// This is slower but CPython promises to check all non-vectorcall
// function calls.
@ -5169,7 +5137,7 @@ handle_eval_breaker:
_Py_LeaveRecursiveCall(tstate);
assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
Py_DECREF(self);
STACK_SHRINK(original_oparg+1);
STACK_SHRINK(oparg + 1);
SET_TOP(res);
Py_DECREF(callable);
if (res == NULL) {
@ -5181,16 +5149,15 @@ handle_eval_breaker:
TARGET(PRECALL_NO_KW_METHOD_DESCRIPTOR_FAST) {
assert(call_shape.kwnames == NULL);
int original_oparg = GET_CACHE()->adaptive.original_oparg;
int is_meth = is_method(stack_pointer, original_oparg);
int total_args = original_oparg + is_meth;
int is_meth = is_method(stack_pointer, oparg);
int total_args = oparg + is_meth;
PyObject *callable = PEEK(total_args + 1);
/* Builtin METH_FASTCALL methods, without keywords */
DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), PRECALL);
PyMethodDef *meth = ((PyMethodDescrObject *)callable)->d_method;
DEOPT_IF(meth->ml_flags != METH_FASTCALL, PRECALL);
next_instr++; // Skip following call
STAT_INC(PRECALL, hit);
SKIP_CALL();
_PyCFunctionFast cfunc = (_PyCFunctionFast)(void(*)(void))meth->ml_meth;
int nargs = total_args-1;
STACK_SHRINK(nargs);
@ -5537,22 +5504,6 @@ handle_eval_breaker:
/* Specialization misses */
#define MISS_WITH_CACHE(opname) \
opname ## _miss: \
{ \
STAT_INC(opcode, miss); \
STAT_INC(opname, miss); \
_PyAdaptiveEntry *cache = &GET_CACHE()->adaptive; \
cache->counter--; \
if (cache->counter == 0) { \
next_instr[-1] = _Py_MAKECODEUNIT(opname ## _ADAPTIVE, _Py_OPARG(next_instr[-1])); \
STAT_INC(opname, deopt); \
cache_backoff(cache); \
} \
oparg = cache->original_oparg; \
JUMP_TO_INSTRUCTION(opname); \
}
#define MISS_WITH_INLINE_CACHE(opname) \
opname ## _miss: \
{ \
@ -5588,8 +5539,8 @@ MISS_WITH_INLINE_CACHE(LOAD_ATTR)
MISS_WITH_INLINE_CACHE(STORE_ATTR)
MISS_WITH_INLINE_CACHE(LOAD_GLOBAL)
MISS_WITH_INLINE_CACHE(LOAD_METHOD)
MISS_WITH_CACHE(PRECALL)
MISS_WITH_CACHE(CALL)
MISS_WITH_INLINE_CACHE(PRECALL)
MISS_WITH_INLINE_CACHE(CALL)
MISS_WITH_INLINE_CACHE(BINARY_OP)
MISS_WITH_INLINE_CACHE(COMPARE_OP)
MISS_WITH_INLINE_CACHE(BINARY_SUBSCR)