mirror of
https://github.com/python/cpython.git
synced 2025-10-09 08:31:26 +00:00
GH-98686: Get rid of "adaptive" and "quick" instructions (GH-99182)
This commit is contained in:
parent
6e3cc72afe
commit
c7f5708714
18 changed files with 562 additions and 758 deletions
|
@ -91,8 +91,6 @@ typedef struct {
|
|||
|
||||
#define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
|
||||
|
||||
extern uint8_t _PyOpcode_Adaptive[256];
|
||||
|
||||
// Borrowed references to common callables:
|
||||
struct callable_cache {
|
||||
PyObject *isinstance;
|
||||
|
@ -219,11 +217,14 @@ extern int _Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr,
|
|||
PyObject *name);
|
||||
extern int _Py_Specialize_StoreAttr(PyObject *owner, _Py_CODEUNIT *instr,
|
||||
PyObject *name);
|
||||
extern int _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins, _Py_CODEUNIT *instr, PyObject *name);
|
||||
extern int _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container, _Py_CODEUNIT *instr);
|
||||
extern int _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub, _Py_CODEUNIT *instr);
|
||||
extern int _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr,
|
||||
int nargs, PyObject *kwnames);
|
||||
extern void _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins,
|
||||
_Py_CODEUNIT *instr, PyObject *name);
|
||||
extern void _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container,
|
||||
_Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub,
|
||||
_Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr,
|
||||
int nargs, PyObject *kwnames);
|
||||
extern void _Py_Specialize_BinaryOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr,
|
||||
int oparg, PyObject **locals);
|
||||
extern void _Py_Specialize_CompareOp(PyObject *lhs, PyObject *rhs,
|
||||
|
@ -377,8 +378,22 @@ write_location_entry_start(uint8_t *ptr, int code, int length)
|
|||
|
||||
/* With a 16-bit counter, we have 12 bits for the counter value, and 4 bits for the backoff */
|
||||
#define ADAPTIVE_BACKOFF_BITS 4
|
||||
/* The initial counter value is 1 == 2**ADAPTIVE_BACKOFF_START - 1 */
|
||||
#define ADAPTIVE_BACKOFF_START 1
|
||||
|
||||
// A value of 1 means that we attempt to specialize the *second* time each
|
||||
// instruction is executed. Executing twice is a much better indicator of
|
||||
// "hotness" than executing once, but additional warmup delays only prevent
|
||||
// specialization. Most types stabilize by the second execution, too:
|
||||
#define ADAPTIVE_WARMUP_VALUE 1
|
||||
#define ADAPTIVE_WARMUP_BACKOFF 1
|
||||
|
||||
// A value of 52 means that we attempt to re-specialize after 53 misses (a prime
|
||||
// number, useful for avoiding artifacts if every nth value is a different type
|
||||
// or something). Setting the backoff to 0 means that the counter is reset to
|
||||
// the same state as a warming-up instruction (value == 1, backoff == 1) after
|
||||
// deoptimization. This isn't strictly necessary, but it is bit easier to reason
|
||||
// about when thinking about the opcode transitions as a state machine:
|
||||
#define ADAPTIVE_COOLDOWN_VALUE 52
|
||||
#define ADAPTIVE_COOLDOWN_BACKOFF 0
|
||||
|
||||
#define MAX_BACKOFF_VALUE (16 - ADAPTIVE_BACKOFF_BITS)
|
||||
|
||||
|
@ -390,9 +405,15 @@ adaptive_counter_bits(int value, int backoff) {
|
|||
}
|
||||
|
||||
static inline uint16_t
|
||||
adaptive_counter_start(void) {
|
||||
unsigned int value = (1 << ADAPTIVE_BACKOFF_START) - 1;
|
||||
return adaptive_counter_bits(value, ADAPTIVE_BACKOFF_START);
|
||||
adaptive_counter_warmup(void) {
|
||||
return adaptive_counter_bits(ADAPTIVE_WARMUP_VALUE,
|
||||
ADAPTIVE_WARMUP_BACKOFF);
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
adaptive_counter_cooldown(void) {
|
||||
return adaptive_counter_bits(ADAPTIVE_COOLDOWN_VALUE,
|
||||
ADAPTIVE_COOLDOWN_BACKOFF);
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue