bpo-44187: Quickening infrastructure (GH-26264)

* Add co_firstinstr field to code object.

* Implement barebones quickening.

* Use non-quickened bytecode when tracing.

* Add NEWS item

* Add new file to Windows build.

* Don't specialize instructions with EXTENDED_ARG.
This commit is contained in:
Mark Shannon 2021-06-07 18:38:06 +01:00 committed by GitHub
parent 89e50ab36f
commit 001eb520b5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 416 additions and 12 deletions

View file

@ -1343,6 +1343,14 @@ eval_frame_handle_pending(PyThreadState *tstate)
#define JUMPTO(x) (next_instr = first_instr + (x))
#define JUMPBY(x) (next_instr += (x))
/* Get opcode and oparg from original instructions, not quickened form. */
#define TRACING_NEXTOPARG() do { \
_Py_CODEUNIT word = ((_Py_CODEUNIT *)PyBytes_AS_STRING(co->co_code))[INSTR_OFFSET()]; \
opcode = _Py_OPCODE(word); \
oparg = _Py_OPARG(word); \
next_instr++; \
} while (0)
/* OpCode prediction macros
Some opcodes tend to come in pairs thus making it possible to
predict the second code when the first is run. For example,
@ -1644,15 +1652,23 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, PyFrameObject *f, int throwflag)
if (PyDTrace_FUNCTION_ENTRY_ENABLED())
dtrace_function_entry(f);
/* Increment the warmup counter and quicken if warm enough
* _Py_Quicken is idempotent so we don't worry about overflow */
if (!PyCodeObject_IsWarmedUp(co)) {
PyCodeObject_IncrementWarmup(co);
if (PyCodeObject_IsWarmedUp(co)) {
if (_Py_Quicken(co)) {
goto exit_eval_frame;
}
}
}
names = co->co_names;
consts = co->co_consts;
fastlocals = f->f_localsptr;
first_instr = co->co_firstinstr;
freevars = f->f_localsptr + co->co_nlocals;
assert(PyBytes_Check(co->co_code));
assert(PyBytes_GET_SIZE(co->co_code) <= INT_MAX);
assert(PyBytes_GET_SIZE(co->co_code) % sizeof(_Py_CODEUNIT) == 0);
assert(_Py_IS_ALIGNED(PyBytes_AS_STRING(co->co_code), sizeof(_Py_CODEUNIT)));
first_instr = (_Py_CODEUNIT *) PyBytes_AS_STRING(co->co_code);
/*
f->f_lasti refers to the index of the last instruction,
unless it's -1 in which case next_instr should be first_instr.
@ -1757,7 +1773,7 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, PyFrameObject *f, int throwflag)
tracing_dispatch:
f->f_lasti = INSTR_OFFSET();
NEXTOPARG();
TRACING_NEXTOPARG();
if (PyDTrace_LINE_ENABLED())
maybe_dtrace_line(f, &trace_info);

View file

@ -710,6 +710,33 @@ exit:
#endif /* defined(Py_REF_DEBUG) */
PyDoc_STRVAR(sys__getquickenedcount__doc__,
"_getquickenedcount($module, /)\n"
"--\n"
"\n");
#define SYS__GETQUICKENEDCOUNT_METHODDEF \
{"_getquickenedcount", (PyCFunction)sys__getquickenedcount, METH_NOARGS, sys__getquickenedcount__doc__},
static Py_ssize_t
sys__getquickenedcount_impl(PyObject *module);
static PyObject *
sys__getquickenedcount(PyObject *module, PyObject *Py_UNUSED(ignored))
{
PyObject *return_value = NULL;
Py_ssize_t _return_value;
_return_value = sys__getquickenedcount_impl(module);
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
}
return_value = PyLong_FromSsize_t(_return_value);
exit:
return return_value;
}
PyDoc_STRVAR(sys_getallocatedblocks__doc__,
"getallocatedblocks($module, /)\n"
"--\n"
@ -983,4 +1010,4 @@ sys__deactivate_opcache(PyObject *module, PyObject *Py_UNUSED(ignored))
#ifndef SYS_GETANDROIDAPILEVEL_METHODDEF
#define SYS_GETANDROIDAPILEVEL_METHODDEF
#endif /* !defined(SYS_GETANDROIDAPILEVEL_METHODDEF) */
/*[clinic end generated code: output=68c62b9ca317a0c8 input=a9049054013a1b77]*/
/*[clinic end generated code: output=e77bf636a177c5c3 input=a9049054013a1b77]*/

197
Python/specialize.c Normal file
View file

@ -0,0 +1,197 @@
#include "Python.h"
#include "pycore_code.h"
#include "opcode.h"
/* We layout the quickened data as a bi-directional array:
* Instructions upwards, cache entries downwards.
* first_instr is aligned to a SpecializedCacheEntry.
* The nth instruction is located at first_instr[n]
* The nth cache is located at ((SpecializedCacheEntry *)first_instr)[-1-n]
* The first (index 0) cache entry is reserved for the count, to enable finding
* the first instruction from the base pointer.
* The cache_count argument must include space for the count.
* We use the SpecializedCacheOrInstruction union to refer to the data
* to avoid type punning.
Layout of quickened data, each line 8 bytes for M cache entries and N instructions:
<cache_count> <---- co->co_quickened
<cache M-1>
<cache M-2>
...
<cache 0>
<instr 0> <instr 1> <instr 2> <instr 3> <--- co->co_first_instr
<instr 4> <instr 5> <instr 6> <instr 7>
...
<instr N-1>
*/
Py_ssize_t _Py_QuickenedCount = 0;
static SpecializedCacheOrInstruction *
allocate(int cache_count, int instruction_count)
{
assert(sizeof(SpecializedCacheOrInstruction) == 2*sizeof(int32_t));
assert(sizeof(SpecializedCacheEntry) == 2*sizeof(int32_t));
assert(cache_count > 0);
assert(instruction_count > 0);
int count = cache_count + (instruction_count + INSTRUCTIONS_PER_ENTRY -1)/INSTRUCTIONS_PER_ENTRY;
SpecializedCacheOrInstruction *array = (SpecializedCacheOrInstruction *)
PyMem_Malloc(sizeof(SpecializedCacheOrInstruction) * count);
if (array == NULL) {
PyErr_NoMemory();
return NULL;
}
_Py_QuickenedCount++;
array[0].entry.zero.cache_count = cache_count;
return array;
}
static int
get_cache_count(SpecializedCacheOrInstruction *quickened) {
return quickened[0].entry.zero.cache_count;
}
/* Map from opcode to adaptive opcode.
Values of zero are ignored. */
static uint8_t adaptive_opcodes[256] = { 0 };
/* The number of cache entries required for a "family" of instructions. */
static uint8_t cache_requirements[256] = { 0 };
/* Return the oparg for the cache_offset and instruction index.
*
* If no cache is needed then return the original oparg.
* If a cache is needed, but cannot be accessed because
* oparg would be too large, then return -1.
*
* Also updates the cache_offset, as it may need to be incremented by
* more than the cache requirements, if many instructions do not need caches.
*
* See pycore_code.h for details of how the cache offset,
* instruction index and oparg are related */
static int
oparg_from_instruction_and_update_offset(int index, int opcode, int original_oparg, int *cache_offset) {
/* The instruction pointer in the interpreter points to the next
* instruction, so we compute the offset using nexti (index + 1) */
int nexti = index + 1;
uint8_t need = cache_requirements[opcode];
if (need == 0) {
return original_oparg;
}
assert(adaptive_opcodes[opcode] != 0);
int oparg = oparg_from_offset_and_nexti(*cache_offset, nexti);
assert(*cache_offset == offset_from_oparg_and_nexti(oparg, nexti));
/* Some cache space is wasted here as the minimum possible offset is (nexti>>1) */
if (oparg < 0) {
oparg = 0;
*cache_offset = offset_from_oparg_and_nexti(oparg, nexti);
}
else if (oparg > 255) {
return -1;
}
*cache_offset += need;
return oparg;
}
static int
entries_needed(_Py_CODEUNIT *code, int len)
{
int cache_offset = 0;
int previous_opcode = -1;
for (int i = 0; i < len; i++) {
uint8_t opcode = _Py_OPCODE(code[i]);
if (previous_opcode != EXTENDED_ARG) {
oparg_from_instruction_and_update_offset(i, opcode, 0, &cache_offset);
}
previous_opcode = opcode;
}
return cache_offset + 1; // One extra for the count entry
}
static inline _Py_CODEUNIT *
first_instruction(SpecializedCacheOrInstruction *quickened)
{
return &quickened[get_cache_count(quickened)].code[0];
}
/** Insert adaptive instructions and superinstructions.
*
* Skip instruction preceded by EXTENDED_ARG for adaptive
* instructions as those are both very rare and tricky
* to handle.
*/
static void
optimize(SpecializedCacheOrInstruction *quickened, int len)
{
_Py_CODEUNIT *instructions = first_instruction(quickened);
int cache_offset = 0;
int previous_opcode = -1;
for(int i = 0; i < len; i++) {
int opcode = _Py_OPCODE(instructions[i]);
int oparg = _Py_OPARG(instructions[i]);
uint8_t adaptive_opcode = adaptive_opcodes[opcode];
if (adaptive_opcode && previous_opcode != EXTENDED_ARG) {
int new_oparg = oparg_from_instruction_and_update_offset(
i, opcode, oparg, &cache_offset
);
if (new_oparg < 0) {
/* Not possible to allocate a cache for this instruction */
previous_opcode = opcode;
continue;
}
instructions[i] = _Py_MAKECODEUNIT(adaptive_opcode, new_oparg);
previous_opcode = adaptive_opcode;
int entries_needed = cache_requirements[opcode];
if (entries_needed) {
/* Initialize the adpative cache entry */
int cache0_offset = cache_offset-entries_needed;
SpecializedCacheEntry *cache =
_GetSpecializedCacheEntry(instructions, cache0_offset);
cache->adaptive.original_oparg = oparg;
cache->adaptive.counter = 0;
}
}
else {
/* Super instructions don't use the cache,
* so no need to update the offset. */
switch (opcode) {
/* Insert superinstructions here
E.g.
case LOAD_FAST:
if (previous_opcode == LOAD_FAST)
instructions[i-1] = _Py_MAKECODEUNIT(LOAD_FAST__LOAD_FAST, oparg);
*/
}
previous_opcode = opcode;
}
}
assert(cache_offset+1 == get_cache_count(quickened));
}
int
_Py_Quicken(PyCodeObject *code) {
if (code->co_quickened) {
return 0;
}
Py_ssize_t size = PyBytes_GET_SIZE(code->co_code);
int instr_count = (int)(size/sizeof(_Py_CODEUNIT));
if (instr_count > MAX_SIZE_TO_QUICKEN) {
code->co_warmup = QUICKENING_WARMUP_COLDEST;
return 0;
}
int entry_count = entries_needed(code->co_firstinstr, instr_count);
SpecializedCacheOrInstruction *quickened = allocate(entry_count, instr_count);
if (quickened == NULL) {
return -1;
}
_Py_CODEUNIT *new_instructions = first_instruction(quickened);
memcpy(new_instructions, code->co_firstinstr, size);
optimize(quickened, instr_count);
code->co_quickened = quickened;
code->co_firstinstr = new_instructions;
return 0;
}

View file

@ -18,6 +18,7 @@ Data members:
#include "pycore_ceval.h" // _Py_RecursionLimitLowerWaterMark()
#include "pycore_initconfig.h" // _PyStatus_EXCEPTION()
#include "pycore_object.h" // _PyObject_IS_GC()
#include "pycore_code.h" // _Py_QuickenedCount
#include "pycore_pathconfig.h" // _PyPathConfig_ComputeSysPath0()
#include "pycore_pyerrors.h" // _PyErr_Fetch()
#include "pycore_pylifecycle.h" // _PyErr_WriteUnraisableDefaultHook()
@ -1763,8 +1764,20 @@ sys_gettotalrefcount_impl(PyObject *module)
{
return _Py_GetRefTotal();
}
#endif /* Py_REF_DEBUG */
/*[clinic input]
sys._getquickenedcount -> Py_ssize_t
[clinic start generated code]*/
static Py_ssize_t
sys__getquickenedcount_impl(PyObject *module)
/*[clinic end generated code: output=1ab259e7f91248a2 input=249d448159eca912]*/
{
return _Py_QuickenedCount;
}
/*[clinic input]
sys.getallocatedblocks -> Py_ssize_t
@ -1995,6 +2008,7 @@ static PyMethodDef sys_methods[] = {
#endif
SYS_GETFILESYSTEMENCODING_METHODDEF
SYS_GETFILESYSTEMENCODEERRORS_METHODDEF
SYS__GETQUICKENEDCOUNT_METHODDEF
#ifdef Py_TRACE_REFS
{"getobjects", _Py_GetObjects, METH_VARARGS},
#endif