GH-126599: Remove the PyOptimizer API (GH-129194)

This commit is contained in:
Brandt Bucher 2025-01-28 16:10:51 -08:00 committed by GitHub
parent 5c930a26fb
commit 828b27680f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 345 additions and 435 deletions

View file

@ -2782,13 +2782,26 @@ dummy_func(
JUMPBY(oparg);
}
tier1 op(_JUMP_BACKWARD, (the_counter/1 --)) {
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
#ifdef _Py_TIER2
#if ENABLE_SPECIALIZATION
family(JUMP_BACKWARD, 1) = {
JUMP_BACKWARD_NO_JIT,
JUMP_BACKWARD_JIT,
};
tier1 op(_SPECIALIZE_JUMP_BACKWARD, (--)) {
#if ENABLE_SPECIALIZATION
if (this_instr->op.code == JUMP_BACKWARD) {
this_instr->op.code = tstate->interp->jit ? JUMP_BACKWARD_JIT : JUMP_BACKWARD_NO_JIT;
// Need to re-dispatch so the warmup counter isn't off by one:
next_instr = this_instr;
DISPATCH_SAME_OPARG();
}
#endif
}
tier1 op(_JIT, (--)) {
#ifdef _Py_TIER2
_Py_BackoffCounter counter = this_instr[1].counter;
if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD) {
if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD_JIT) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
while (oparg > 255) {
@ -2811,13 +2824,25 @@ dummy_func(
else {
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
}
#endif /* ENABLE_SPECIALIZATION */
#endif /* _Py_TIER2 */
#endif
}
macro(JUMP_BACKWARD) =
unused/1 +
_SPECIALIZE_JUMP_BACKWARD +
_CHECK_PERIODIC +
_JUMP_BACKWARD;
JUMP_BACKWARD_NO_INTERRUPT;
macro(JUMP_BACKWARD_NO_JIT) =
unused/1 +
_CHECK_PERIODIC +
JUMP_BACKWARD_NO_INTERRUPT;
macro(JUMP_BACKWARD_JIT) =
unused/1 +
_CHECK_PERIODIC +
JUMP_BACKWARD_NO_INTERRUPT +
_JIT;
pseudo(JUMP, (--)) = {
JUMP_FORWARD,
@ -2906,6 +2931,7 @@ dummy_func(
* generator or coroutine, so we deliberately do not check it here.
* (see bpo-30039).
*/
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
}

View file

@ -180,6 +180,7 @@
TARGET(BINARY_OP_EXTEND) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 6;
INSTRUCTION_STATS(BINARY_OP_EXTEND);
static_assert(INLINE_CACHE_ENTRIES_BINARY_OP == 5, "incorrect cache size");
@ -1087,6 +1088,7 @@
TARGET(CALL_ALLOC_AND_ENTER_INIT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_ALLOC_AND_ENTER_INIT);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@ -1185,6 +1187,7 @@
TARGET(CALL_BOUND_METHOD_EXACT_ARGS) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_BOUND_METHOD_EXACT_ARGS);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@ -1288,6 +1291,7 @@
TARGET(CALL_BOUND_METHOD_GENERAL) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_BOUND_METHOD_GENERAL);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@ -2113,6 +2117,7 @@
TARGET(CALL_KW_BOUND_METHOD) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_KW_BOUND_METHOD);
static_assert(INLINE_CACHE_ENTRIES_CALL_KW == 3, "incorrect cache size");
@ -2313,6 +2318,7 @@
TARGET(CALL_KW_PY) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_KW_PY);
static_assert(INLINE_CACHE_ENTRIES_CALL_KW == 3, "incorrect cache size");
@ -2890,6 +2896,7 @@
TARGET(CALL_PY_EXACT_ARGS) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_PY_EXACT_ARGS);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@ -2971,6 +2978,7 @@
TARGET(CALL_PY_GENERAL) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_PY_GENERAL);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@ -5160,10 +5168,24 @@
}
TARGET(JUMP_BACKWARD) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
frame->instr_ptr = next_instr;
next_instr += 2;
INSTRUCTION_STATS(JUMP_BACKWARD);
PREDICTED_JUMP_BACKWARD:;
_Py_CODEUNIT* const this_instr = next_instr - 2;
(void)this_instr;
/* Skip 1 cache entry */
// _SPECIALIZE_JUMP_BACKWARD
{
#if ENABLE_SPECIALIZATION
if (this_instr->op.code == JUMP_BACKWARD) {
this_instr->op.code = tstate->interp->jit ? JUMP_BACKWARD_JIT : JUMP_BACKWARD_NO_JIT;
// Need to re-dispatch so the warmup counter isn't off by one:
next_instr = this_instr;
DISPATCH_SAME_OPARG();
}
#endif
}
// _CHECK_PERIODIC
{
_Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
@ -5175,16 +5197,52 @@
if (err != 0) goto error;
}
}
// _JUMP_BACKWARD
// _JUMP_BACKWARD_NO_INTERRUPT
{
uint16_t the_counter = read_u16(&this_instr[1].cache);
(void)the_counter;
/* This bytecode is used in the `yield from` or `await` loop.
* If there is an interrupt, we want it handled in the innermost
* generator or coroutine, so we deliberately do not check it here.
* (see bpo-30039).
*/
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
}
DISPATCH();
}
TARGET(JUMP_BACKWARD_JIT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 2;
INSTRUCTION_STATS(JUMP_BACKWARD_JIT);
static_assert(1 == 1, "incorrect cache size");
/* Skip 1 cache entry */
// _CHECK_PERIODIC
{
_Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
QSBR_QUIESCENT_STATE(tstate);
if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _Py_HandlePending(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err != 0) goto error;
}
}
// _JUMP_BACKWARD_NO_INTERRUPT
{
/* This bytecode is used in the `yield from` or `await` loop.
* If there is an interrupt, we want it handled in the innermost
* generator or coroutine, so we deliberately do not check it here.
* (see bpo-30039).
*/
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
}
// _JIT
{
#ifdef _Py_TIER2
#if ENABLE_SPECIALIZATION
_Py_BackoffCounter counter = this_instr[1].counter;
if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD) {
if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD_JIT) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
while (oparg > 255) {
@ -5211,8 +5269,7 @@
else {
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
}
#endif /* ENABLE_SPECIALIZATION */
#endif /* _Py_TIER2 */
#endif
}
DISPATCH();
}
@ -5226,10 +5283,41 @@
* generator or coroutine, so we deliberately do not check it here.
* (see bpo-30039).
*/
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
DISPATCH();
}
TARGET(JUMP_BACKWARD_NO_JIT) {
frame->instr_ptr = next_instr;
next_instr += 2;
INSTRUCTION_STATS(JUMP_BACKWARD_NO_JIT);
static_assert(1 == 1, "incorrect cache size");
/* Skip 1 cache entry */
// _CHECK_PERIODIC
{
_Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
QSBR_QUIESCENT_STATE(tstate);
if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = _Py_HandlePending(tstate);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (err != 0) goto error;
}
}
// _JUMP_BACKWARD_NO_INTERRUPT
{
/* This bytecode is used in the `yield from` or `await` loop.
* If there is an interrupt, we want it handled in the innermost
* generator or coroutine, so we deliberately do not check it here.
* (see bpo-30039).
*/
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
}
DISPATCH();
}
TARGET(JUMP_FORWARD) {
frame->instr_ptr = next_instr;
next_instr += 1;
@ -5369,6 +5457,7 @@
TARGET(LOAD_ATTR_CLASS) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_CLASS);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5407,6 +5496,7 @@
TARGET(LOAD_ATTR_CLASS_WITH_METACLASS_CHECK) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_CLASS_WITH_METACLASS_CHECK);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5451,6 +5541,7 @@
TARGET(LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5487,6 +5578,7 @@
TARGET(LOAD_ATTR_INSTANCE_VALUE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_INSTANCE_VALUE);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5540,6 +5632,7 @@
TARGET(LOAD_ATTR_METHOD_LAZY_DICT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_METHOD_LAZY_DICT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5583,6 +5676,7 @@
TARGET(LOAD_ATTR_METHOD_NO_DICT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_METHOD_NO_DICT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5619,6 +5713,7 @@
TARGET(LOAD_ATTR_METHOD_WITH_VALUES) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_METHOD_WITH_VALUES);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5669,6 +5764,7 @@
TARGET(LOAD_ATTR_MODULE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_MODULE);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5724,6 +5820,7 @@
TARGET(LOAD_ATTR_NONDESCRIPTOR_NO_DICT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_NONDESCRIPTOR_NO_DICT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5755,6 +5852,7 @@
TARGET(LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5799,6 +5897,7 @@
TARGET(LOAD_ATTR_PROPERTY) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_PROPERTY);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5864,6 +5963,7 @@
TARGET(LOAD_ATTR_SLOT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_SLOT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -5909,6 +6009,7 @@
TARGET(LOAD_ATTR_WITH_HINT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_WITH_HINT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@ -6326,6 +6427,7 @@
TARGET(LOAD_GLOBAL_BUILTIN) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(LOAD_GLOBAL_BUILTIN);
static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 4, "incorrect cache size");
@ -6380,6 +6482,7 @@
TARGET(LOAD_GLOBAL_MODULE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(LOAD_GLOBAL_MODULE);
static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 4, "incorrect cache size");
@ -7557,6 +7660,7 @@
TARGET(STORE_ATTR_INSTANCE_VALUE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(STORE_ATTR_INSTANCE_VALUE);
static_assert(INLINE_CACHE_ENTRIES_STORE_ATTR == 4, "incorrect cache size");
@ -7615,6 +7719,7 @@
TARGET(STORE_ATTR_SLOT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(STORE_ATTR_SLOT);
static_assert(INLINE_CACHE_ENTRIES_STORE_ATTR == 4, "incorrect cache size");
@ -7652,6 +7757,7 @@
TARGET(STORE_ATTR_WITH_HINT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(STORE_ATTR_WITH_HINT);
static_assert(INLINE_CACHE_ENTRIES_STORE_ATTR == 4, "incorrect cache size");
@ -8046,6 +8152,7 @@
TARGET(TO_BOOL_ALWAYS_TRUE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(TO_BOOL_ALWAYS_TRUE);
static_assert(INLINE_CACHE_ENTRIES_TO_BOOL == 3, "incorrect cache size");

View file

@ -194,6 +194,8 @@ static void *opcode_targets[256] = {
&&TARGET_FOR_ITER_LIST,
&&TARGET_FOR_ITER_RANGE,
&&TARGET_FOR_ITER_TUPLE,
&&TARGET_JUMP_BACKWARD_JIT,
&&TARGET_JUMP_BACKWARD_NO_JIT,
&&TARGET_LOAD_ATTR_CLASS,
&&TARGET_LOAD_ATTR_CLASS_WITH_METACLASS_CHECK,
&&TARGET_LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN,
@ -232,8 +234,6 @@ static void *opcode_targets[256] = {
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_INSTRUMENTED_END_FOR,
&&TARGET_INSTRUMENTED_POP_ITER,
&&TARGET_INSTRUMENTED_END_SEND,

View file

@ -91,70 +91,13 @@ insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorO
instr->op.arg = index;
}
static int
never_optimize(
_PyOptimizerObject* self,
_PyInterpreterFrame *frame,
_Py_CODEUNIT *instr,
_PyExecutorObject **exec,
int Py_UNUSED(stack_entries),
bool Py_UNUSED(progress_needed))
{
// This may be called if the optimizer is reset
return 0;
}
PyTypeObject _PyDefaultOptimizer_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
.tp_name = "noop_optimizer",
.tp_basicsize = sizeof(_PyOptimizerObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
};
static _PyOptimizerObject _PyOptimizer_Default = {
PyObject_HEAD_INIT(&_PyDefaultOptimizer_Type)
.optimize = never_optimize,
};
_PyOptimizerObject *
_Py_GetOptimizer(void)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
if (interp->optimizer == &_PyOptimizer_Default) {
return NULL;
}
Py_INCREF(interp->optimizer);
return interp->optimizer;
}
static _PyExecutorObject *
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies);
_PyOptimizerObject *
_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject *optimizer)
{
if (optimizer == NULL) {
optimizer = &_PyOptimizer_Default;
}
_PyOptimizerObject *old = interp->optimizer;
if (old == NULL) {
old = &_PyOptimizer_Default;
}
Py_INCREF(optimizer);
interp->optimizer = optimizer;
return old;
}
int
_Py_SetTier2Optimizer(_PyOptimizerObject *optimizer)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
_PyOptimizerObject *old = _Py_SetOptimizer(interp, optimizer);
Py_XDECREF(old);
return old == NULL ? -1 : 0;
}
static int
uop_optimize(_PyInterpreterFrame *frame, _Py_CODEUNIT *instr,
_PyExecutorObject **exec_ptr, int curr_stackentries,
bool progress_needed);
/* Returns 1 if optimized, 0 if not optimized, and -1 for an error.
* If optimized, *executor_ptr contains a new reference to the executor
@ -164,6 +107,7 @@ _PyOptimizer_Optimize(
_PyInterpreterFrame *frame, _Py_CODEUNIT *start,
_PyStackRef *stack_pointer, _PyExecutorObject **executor_ptr, int chain_depth)
{
assert(_PyInterpreterState_GET()->jit);
// The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must*
// make progress in order to avoid infinite loops or excessively-long
// side-exit chains. We can only insert the executor into the bytecode if
@ -172,12 +116,10 @@ _PyOptimizer_Optimize(
bool progress_needed = chain_depth == 0;
PyCodeObject *code = _PyFrame_GetCode(frame);
assert(PyCode_Check(code));
PyInterpreterState *interp = _PyInterpreterState_GET();
if (progress_needed && !has_space_for_executor(code, start)) {
return 0;
}
_PyOptimizerObject *opt = interp->optimizer;
int err = opt->optimize(opt, frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed);
int err = uop_optimize(frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed);
if (err <= 0) {
return err;
}
@ -684,6 +626,7 @@ translate_bytecode_to_trace(
}
case JUMP_BACKWARD:
case JUMP_BACKWARD_JIT:
ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target);
_Py_FALLTHROUGH;
case JUMP_BACKWARD_NO_INTERRUPT:
@ -1241,7 +1184,6 @@ int effective_trace_length(_PyUOpInstruction *buffer, int length)
static int
uop_optimize(
_PyOptimizerObject *self,
_PyInterpreterFrame *frame,
_Py_CODEUNIT *instr,
_PyExecutorObject **exec_ptr,
@ -1299,31 +1241,6 @@ uop_optimize(
return 1;
}
static void
uop_opt_dealloc(PyObject *self) {
PyObject_Free(self);
}
PyTypeObject _PyUOpOptimizer_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
.tp_name = "uop_optimizer",
.tp_basicsize = sizeof(_PyOptimizerObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.tp_dealloc = uop_opt_dealloc,
};
PyObject *
_PyOptimizer_NewUOpOptimizer(void)
{
_PyOptimizerObject *opt = PyObject_New(_PyOptimizerObject, &_PyUOpOptimizer_Type);
if (opt == NULL) {
return NULL;
}
opt->optimize = uop_optimize;
return (PyObject *)opt;
}
/*****************************************
* Executor management

View file

@ -1306,14 +1306,7 @@ init_interp_main(PyThreadState *tstate)
} else
#endif
{
PyObject *opt = _PyOptimizer_NewUOpOptimizer();
if (opt == NULL) {
return _PyStatus_ERR("can't initialize optimizer");
}
if (_Py_SetTier2Optimizer((_PyOptimizerObject *)opt)) {
return _PyStatus_ERR("can't install optimizer");
}
Py_DECREF(opt);
interp->jit = true;
}
}
}
@ -1665,11 +1658,10 @@ finalize_modules(PyThreadState *tstate)
{
PyInterpreterState *interp = tstate->interp;
// Invalidate all executors and turn off JIT:
interp->jit = false;
#ifdef _Py_TIER2
// Invalidate all executors and turn off tier 2 optimizer
_Py_Executors_InvalidateAll(interp, 0);
_PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL);
Py_XDECREF(old);
#endif
// Stop watching __builtin__ modifications

View file

@ -655,11 +655,9 @@ init_interpreter(PyInterpreterState *interp,
}
interp->sys_profile_initialized = false;
interp->sys_trace_initialized = false;
#ifdef _Py_TIER2
(void)_Py_SetOptimizer(interp, NULL);
interp->jit = false;
interp->executor_list_head = NULL;
interp->trace_run_counter = JIT_CLEANUP_THRESHOLD;
#endif
if (interp != &runtime->_main_interpreter) {
/* Fix the self-referential, statically initialized fields. */
interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
@ -829,12 +827,6 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
tstate->_status.cleared = 0;
}
#ifdef _Py_TIER2
_PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL);
assert(old != NULL);
Py_DECREF(old);
#endif
/* It is possible that any of the objects below have a finalizer
that runs Python code or otherwise relies on a thread state
or even the interpreter state. For now we trust that isn't

View file

@ -2265,9 +2265,7 @@ sys_activate_stack_trampoline_impl(PyObject *module, const char *backend)
{
#ifdef PY_HAVE_PERF_TRAMPOLINE
#ifdef _Py_JIT
_PyOptimizerObject* optimizer = _Py_GetOptimizer();
if (optimizer != NULL) {
Py_DECREF(optimizer);
if (_PyInterpreterState_GET()->jit) {
PyErr_SetString(PyExc_ValueError, "Cannot activate the perf trampoline if the JIT is active");
return NULL;
}