gh-116968: Reimplement Tier 2 counters (#117144)

Introduce a unified 16-bit backoff counter type (``_Py_BackoffCounter``),
shared between the Tier 1 adaptive specializer and the Tier 2 optimizer. The
API used for adaptive specialization counters is changed but the behavior is
(supposed to be) identical.

The behavior of the Tier 2 counters is changed:
- There are no longer dynamic thresholds (we never varied these).
- All counters now use the same exponential backoff.
- The counter for ``JUMP_BACKWARD`` starts counting down from 16.
- The ``temperature`` in side exits starts counting down from 64.
This commit is contained in:
Guido van Rossum 2024-04-04 08:03:27 -07:00 committed by GitHub
parent 63bbe77d9b
commit 060a96f1a9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 313 additions and 235 deletions

View file

@ -8,6 +8,7 @@
#include "Python.h"
#include "pycore_abstract.h" // _PyIndex_Check()
#include "pycore_backoff.h"
#include "pycore_cell.h" // PyCell_GetRef()
#include "pycore_code.h"
#include "pycore_emscripten_signal.h" // _Py_CHECK_EMSCRIPTEN_SIGNALS
@ -326,13 +327,13 @@ dummy_func(
specializing op(_SPECIALIZE_TO_BOOL, (counter/1, value -- value)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_ToBool(value, next_instr);
DISPATCH_SAME_OPARG();
}
STAT_INC(TO_BOOL, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -551,13 +552,13 @@ dummy_func(
specializing op(_SPECIALIZE_BINARY_SUBSCR, (counter/1, container, sub -- container, sub)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_BinarySubscr(container, sub, next_instr);
DISPATCH_SAME_OPARG();
}
STAT_INC(BINARY_SUBSCR, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -698,13 +699,13 @@ dummy_func(
specializing op(_SPECIALIZE_STORE_SUBSCR, (counter/1, container, sub -- container, sub)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_StoreSubscr(container, sub, next_instr);
DISPATCH_SAME_OPARG();
}
STAT_INC(STORE_SUBSCR, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -982,13 +983,13 @@ dummy_func(
specializing op(_SPECIALIZE_SEND, (counter/1, receiver, unused -- receiver, unused)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_Send(receiver, next_instr);
DISPATCH_SAME_OPARG();
}
STAT_INC(SEND, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -1211,13 +1212,13 @@ dummy_func(
specializing op(_SPECIALIZE_UNPACK_SEQUENCE, (counter/1, seq -- seq)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_UnpackSequence(seq, next_instr, oparg);
DISPATCH_SAME_OPARG();
}
STAT_INC(UNPACK_SEQUENCE, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
(void)seq;
(void)counter;
@ -1280,14 +1281,14 @@ dummy_func(
specializing op(_SPECIALIZE_STORE_ATTR, (counter/1, owner -- owner)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg);
next_instr = this_instr;
_Py_Specialize_StoreAttr(owner, next_instr, name);
DISPATCH_SAME_OPARG();
}
STAT_INC(STORE_ATTR, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -1398,14 +1399,14 @@ dummy_func(
specializing op(_SPECIALIZE_LOAD_GLOBAL, (counter/1 -- )) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
_Py_Specialize_LoadGlobal(GLOBALS(), BUILTINS(), next_instr, name);
DISPATCH_SAME_OPARG();
}
STAT_INC(LOAD_GLOBAL, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -1711,7 +1712,7 @@ dummy_func(
inst(INSTRUMENTED_LOAD_SUPER_ATTR, (unused/1, unused, unused, unused -- unused, unused if (oparg & 1))) {
// cancel out the decrement that will happen in LOAD_SUPER_ATTR; we
// don't want to specialize instrumented instructions
INCREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
PAUSE_ADAPTIVE_COUNTER(this_instr[1].counter);
GO_TO_INSTRUCTION(LOAD_SUPER_ATTR);
}
@ -1723,13 +1724,13 @@ dummy_func(
specializing op(_SPECIALIZE_LOAD_SUPER_ATTR, (counter/1, global_super, class, unused -- global_super, class, unused)) {
#if ENABLE_SPECIALIZATION
int load_method = oparg & 1;
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_LoadSuperAttr(global_super, class, next_instr, load_method);
DISPATCH_SAME_OPARG();
}
STAT_INC(LOAD_SUPER_ATTR, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -1836,14 +1837,14 @@ dummy_func(
specializing op(_SPECIALIZE_LOAD_ATTR, (counter/1, owner -- owner)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg>>1);
next_instr = this_instr;
_Py_Specialize_LoadAttr(owner, next_instr, name);
DISPATCH_SAME_OPARG();
}
STAT_INC(LOAD_ATTR, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -2157,13 +2158,13 @@ dummy_func(
specializing op(_SPECIALIZE_COMPARE_OP, (counter/1, left, right -- left, right)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_CompareOp(left, right, next_instr, oparg);
DISPATCH_SAME_OPARG();
}
STAT_INC(COMPARE_OP, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -2254,13 +2255,13 @@ dummy_func(
specializing op(_SPECIALIZE_CONTAINS_OP, (counter/1, left, right -- left, right)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_ContainsOp(right, next_instr);
DISPATCH_SAME_OPARG();
}
STAT_INC(CONTAINS_OP, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -2340,16 +2341,8 @@ dummy_func(
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
#if ENABLE_SPECIALIZATION
uint16_t counter = this_instr[1].cache;
this_instr[1].cache = counter + (1 << OPTIMIZER_BITS_IN_COUNTER);
/* We are using unsigned values, but we really want signed values, so
* do the 2s complement adjustment manually */
uint32_t offset_counter = counter ^ (1 << 15);
uint32_t threshold = tstate->interp->optimizer_backedge_threshold;
assert((threshold & OPTIMIZER_BITS_MASK) == 0);
// Use '>=' not '>' so that the optimizer/backoff bits do not effect the result.
// Double-check that the opcode isn't instrumented or something:
if (offset_counter >= threshold && this_instr->op.code == JUMP_BACKWARD) {
_Py_BackoffCounter counter = this_instr[1].counter;
if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
while (oparg > 255) {
@ -2365,17 +2358,12 @@ dummy_func(
GOTO_TIER_TWO(executor);
}
else {
int backoff = this_instr[1].cache & OPTIMIZER_BITS_MASK;
backoff++;
if (backoff < MIN_TIER2_BACKOFF) {
backoff = MIN_TIER2_BACKOFF;
}
else if (backoff > MAX_TIER2_BACKOFF) {
backoff = MAX_TIER2_BACKOFF;
}
this_instr[1].cache = ((UINT16_MAX << OPTIMIZER_BITS_IN_COUNTER) << backoff) | backoff;
this_instr[1].counter = restart_backoff_counter(counter);
}
}
else {
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
}
#endif /* ENABLE_SPECIALIZATION */
}
@ -2535,13 +2523,13 @@ dummy_func(
specializing op(_SPECIALIZE_FOR_ITER, (counter/1, iter -- iter)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_ForIter(iter, next_instr, oparg);
DISPATCH_SAME_OPARG();
}
STAT_INC(FOR_ITER, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -3001,7 +2989,7 @@ dummy_func(
tstate, PY_MONITORING_EVENT_CALL,
frame, this_instr, function, arg);
ERROR_IF(err, error);
INCREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
PAUSE_ADAPTIVE_COUNTER(this_instr[1].counter);
GO_TO_INSTRUCTION(CALL);
}
@ -3030,13 +3018,13 @@ dummy_func(
specializing op(_SPECIALIZE_CALL, (counter/1, callable, self_or_null, args[oparg] -- callable, self_or_null, args[oparg])) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_Call(callable, next_instr, oparg + (self_or_null != NULL));
DISPATCH_SAME_OPARG();
}
STAT_INC(CALL, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
}
@ -3933,13 +3921,13 @@ dummy_func(
specializing op(_SPECIALIZE_BINARY_OP, (counter/1, lhs, rhs -- lhs, rhs)) {
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_BinaryOp(lhs, rhs, next_instr, oparg, LOCALS_ARRAY);
DISPATCH_SAME_OPARG();
}
STAT_INC(BINARY_OP, deferred);
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
assert(NB_ADD <= oparg);
assert(oparg <= NB_INPLACE_XOR);
@ -3965,7 +3953,7 @@ dummy_func(
ERROR_IF(next_opcode < 0, error);
next_instr = this_instr;
if (_PyOpcode_Caches[next_opcode]) {
INCREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
PAUSE_ADAPTIVE_COUNTER(next_instr[1].counter);
}
assert(next_opcode > 0 && next_opcode < 256);
opcode = next_opcode;
@ -4157,21 +4145,22 @@ dummy_func(
tier2 op(_COLD_EXIT, (--)) {
_PyExecutorObject *previous = (_PyExecutorObject *)tstate->previous_executor;
_PyExitData *exit = &previous->exits[oparg];
exit->temperature++;
PyCodeObject *code = _PyFrame_GetCode(frame);
_Py_CODEUNIT *target = _PyCode_CODE(code) + exit->target;
if (exit->temperature < (int32_t)tstate->interp->optimizer_side_threshold) {
_Py_BackoffCounter temperature = exit->temperature;
if (!backoff_counter_triggers(temperature)) {
exit->temperature = advance_backoff_counter(temperature);
GOTO_TIER_ONE(target);
}
_PyExecutorObject *executor;
if (target->op.code == ENTER_EXECUTOR) {
executor = code->co_executors->executors[target->op.arg];
Py_INCREF(executor);
} else {
}
else {
int optimized = _PyOptimizer_Optimize(frame, target, stack_pointer, &executor);
if (optimized <= 0) {
int32_t new_temp = -1 * tstate->interp->optimizer_side_threshold;
exit->temperature = (new_temp < INT16_MIN) ? INT16_MIN : new_temp;
exit->temperature = restart_backoff_counter(temperature);
if (optimized < 0) {
Py_DECREF(previous);
tstate->previous_executor = Py_None;
@ -4181,7 +4170,7 @@ dummy_func(
}
}
/* We need two references. One to store in exit->executor and
* one to keep the executor alive when executing. */
* one to keep the executor alive when executing. */
Py_INCREF(executor);
exit->executor = executor;
GOTO_TIER_TWO(executor);