bpo-30860: Consolidate stateful runtime globals. (#2594)

* group the (stateful) runtime globals into various topical structs
* consolidate the topical structs under a single top-level _PyRuntimeState struct
* add a check-c-globals.py script that helps identify runtime globals

Other globals are excluded (see globals.txt and check-c-globals.py).
This commit is contained in:
Eric Snow 2017-09-05 18:26:16 -07:00 committed by GitHub
parent 501b324d3a
commit 76d5abc868
40 changed files with 2727 additions and 1327 deletions

View file

@ -36,7 +36,8 @@ extern int _PyObject_GetMethod(PyObject *, PyObject *, PyObject **);
typedef PyObject *(*callproc)(PyObject *, PyObject *, PyObject *);
/* Forward declarations */
Py_LOCAL_INLINE(PyObject *) call_function(PyObject ***, Py_ssize_t, PyObject *);
Py_LOCAL_INLINE(PyObject *) call_function(PyObject ***, Py_ssize_t,
PyObject *);
static PyObject * do_call_core(PyObject *, PyObject *, PyObject *);
#ifdef LLTRACE
@ -52,13 +53,15 @@ static int call_trace_protected(Py_tracefunc, PyObject *,
static void call_exc_trace(Py_tracefunc, PyObject *,
PyThreadState *, PyFrameObject *);
static int maybe_call_line_trace(Py_tracefunc, PyObject *,
PyThreadState *, PyFrameObject *, int *, int *, int *);
PyThreadState *, PyFrameObject *,
int *, int *, int *);
static void maybe_dtrace_line(PyFrameObject *, int *, int *, int *);
static void dtrace_function_entry(PyFrameObject *);
static void dtrace_function_return(PyFrameObject *);
static PyObject * cmp_outcome(int, PyObject *, PyObject *);
static PyObject * import_name(PyFrameObject *, PyObject *, PyObject *, PyObject *);
static PyObject * import_name(PyFrameObject *, PyObject *, PyObject *,
PyObject *);
static PyObject * import_from(PyObject *, PyObject *);
static int import_all_from(PyObject *, PyObject *);
static void format_exc_check_arg(PyObject *, const char *, PyObject *);
@ -88,7 +91,7 @@ static long dxp[256];
#endif
#ifdef WITH_THREAD
#define GIL_REQUEST _Py_atomic_load_relaxed(&gil_drop_request)
#define GIL_REQUEST _Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)
#else
#define GIL_REQUEST 0
#endif
@ -98,22 +101,22 @@ static long dxp[256];
the GIL eventually anyway. */
#define COMPUTE_EVAL_BREAKER() \
_Py_atomic_store_relaxed( \
&eval_breaker, \
&_PyRuntime.ceval.eval_breaker, \
GIL_REQUEST | \
_Py_atomic_load_relaxed(&pendingcalls_to_do) | \
pending_async_exc)
_Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \
_PyRuntime.ceval.pending.async_exc)
#ifdef WITH_THREAD
#define SET_GIL_DROP_REQUEST() \
do { \
_Py_atomic_store_relaxed(&gil_drop_request, 1); \
_Py_atomic_store_relaxed(&eval_breaker, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#define RESET_GIL_DROP_REQUEST() \
do { \
_Py_atomic_store_relaxed(&gil_drop_request, 0); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
@ -122,47 +125,35 @@ static long dxp[256];
/* Pending calls are only modified under pending_lock */
#define SIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&pendingcalls_to_do, 1); \
_Py_atomic_store_relaxed(&eval_breaker, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#define UNSIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&pendingcalls_to_do, 0); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
#define SIGNAL_ASYNC_EXC() \
do { \
pending_async_exc = 1; \
_Py_atomic_store_relaxed(&eval_breaker, 1); \
_PyRuntime.ceval.pending.async_exc = 1; \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#define UNSIGNAL_ASYNC_EXC() \
do { pending_async_exc = 0; COMPUTE_EVAL_BREAKER(); } while (0)
do { \
_PyRuntime.ceval.pending.async_exc = 0; \
COMPUTE_EVAL_BREAKER(); \
} while (0)
/* This single variable consolidates all requests to break out of the fast path
in the eval loop. */
static _Py_atomic_int eval_breaker = {0};
/* Request for running pending calls. */
static _Py_atomic_int pendingcalls_to_do = {0};
/* Request for looking at the `async_exc` field of the current thread state.
Guarded by the GIL. */
static int pending_async_exc = 0;
#ifdef WITH_THREAD
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
#include "pythread.h"
static PyThread_type_lock pending_lock = 0; /* for pending calls */
static unsigned long main_thread = 0;
/* Request for dropping the GIL */
static _Py_atomic_int gil_drop_request = {0};
#include "ceval_gil.h"
int
@ -178,9 +169,9 @@ PyEval_InitThreads(void)
return;
create_gil();
take_gil(PyThreadState_GET());
main_thread = PyThread_get_thread_ident();
if (!pending_lock)
pending_lock = PyThread_allocate_lock();
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
if (!_PyRuntime.ceval.pending.lock)
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
}
void
@ -248,9 +239,9 @@ PyEval_ReInitThreads(void)
if (!gil_created())
return;
recreate_gil();
pending_lock = PyThread_allocate_lock();
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
take_gil(current_tstate);
main_thread = PyThread_get_thread_ident();
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
/* Destroy all threads except the current one */
_PyThreadState_DeleteExcept(current_tstate);
@ -294,7 +285,7 @@ PyEval_RestoreThread(PyThreadState *tstate)
int err = errno;
take_gil(tstate);
/* _Py_Finalizing is protected by the GIL */
if (_Py_Finalizing && tstate != _Py_Finalizing) {
if (_Py_IS_FINALIZING() && !_Py_CURRENTLY_FINALIZING(tstate)) {
drop_gil(tstate);
PyThread_exit_thread();
assert(0); /* unreachable */
@ -346,19 +337,11 @@ _PyEval_SignalReceived(void)
callback.
*/
#define NPENDINGCALLS 32
static struct {
int (*func)(void *);
void *arg;
} pendingcalls[NPENDINGCALLS];
static int pendingfirst = 0;
static int pendinglast = 0;
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
int i, j, result=0;
PyThread_type_lock lock = pending_lock;
PyThread_type_lock lock = _PyRuntime.ceval.pending.lock;
/* try a few times for the lock. Since this mechanism is used
* for signal handling (on the main thread), there is a (slim)
@ -380,14 +363,14 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
return -1;
}
i = pendinglast;
i = _PyRuntime.ceval.pending.last;
j = (i + 1) % NPENDINGCALLS;
if (j == pendingfirst) {
if (j == _PyRuntime.ceval.pending.first) {
result = -1; /* Queue full */
} else {
pendingcalls[i].func = func;
pendingcalls[i].arg = arg;
pendinglast = j;
_PyRuntime.ceval.pending.calls[i].func = func;
_PyRuntime.ceval.pending.calls[i].arg = arg;
_PyRuntime.ceval.pending.last = j;
}
/* signal main loop */
SIGNAL_PENDING_CALLS();
@ -405,16 +388,19 @@ Py_MakePendingCalls(void)
assert(PyGILState_Check());
if (!pending_lock) {
if (!_PyRuntime.ceval.pending.lock) {
/* initial allocation of the lock */
pending_lock = PyThread_allocate_lock();
if (pending_lock == NULL)
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
if (_PyRuntime.ceval.pending.lock == NULL)
return -1;
}
/* only service pending calls on main thread */
if (main_thread && PyThread_get_thread_ident() != main_thread)
if (_PyRuntime.ceval.pending.main_thread &&
PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread)
{
return 0;
}
/* don't perform recursive pending calls */
if (busy)
return 0;
@ -436,16 +422,16 @@ Py_MakePendingCalls(void)
void *arg = NULL;
/* pop one item off the queue while holding the lock */
PyThread_acquire_lock(pending_lock, WAIT_LOCK);
j = pendingfirst;
if (j == pendinglast) {
PyThread_acquire_lock(_PyRuntime.ceval.pending.lock, WAIT_LOCK);
j = _PyRuntime.ceval.pending.first;
if (j == _PyRuntime.ceval.pending.last) {
func = NULL; /* Queue empty */
} else {
func = pendingcalls[j].func;
arg = pendingcalls[j].arg;
pendingfirst = (j + 1) % NPENDINGCALLS;
func = _PyRuntime.ceval.pending.calls[j].func;
arg = _PyRuntime.ceval.pending.calls[j].arg;
_PyRuntime.ceval.pending.first = (j + 1) % NPENDINGCALLS;
}
PyThread_release_lock(pending_lock);
PyThread_release_lock(_PyRuntime.ceval.pending.lock);
/* having released the lock, perform the callback */
if (func == NULL)
break;
@ -489,14 +475,6 @@ error:
The two threads could theoretically wiggle around the "busy" variable.
*/
#define NPENDINGCALLS 32
static struct {
int (*func)(void *);
void *arg;
} pendingcalls[NPENDINGCALLS];
static volatile int pendingfirst = 0;
static volatile int pendinglast = 0;
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
@ -506,15 +484,15 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
if (busy)
return -1;
busy = 1;
i = pendinglast;
i = _PyRuntime.ceval.pending.last;
j = (i + 1) % NPENDINGCALLS;
if (j == pendingfirst) {
if (j == _PyRuntime.ceval.pending.first) {
busy = 0;
return -1; /* Queue full */
}
pendingcalls[i].func = func;
pendingcalls[i].arg = arg;
pendinglast = j;
_PyRuntime.ceval.pending.calls[i].func = func;
_PyRuntime.ceval.pending.calls[i].arg = arg;
_PyRuntime.ceval.pending.last = j;
SIGNAL_PENDING_CALLS();
busy = 0;
@ -543,12 +521,12 @@ Py_MakePendingCalls(void)
int i;
int (*func)(void *);
void *arg;
i = pendingfirst;
if (i == pendinglast)
i = _PyRuntime.ceval.pending.first;
if (i == _PyRuntime.ceval.pending.last)
break; /* Queue empty */
func = pendingcalls[i].func;
arg = pendingcalls[i].arg;
pendingfirst = (i + 1) % NPENDINGCALLS;
func = _PyRuntime.ceval.pending.calls[i].func;
arg = _PyRuntime.ceval.pending.calls[i].arg;
_PyRuntime.ceval.pending.first = (i + 1) % NPENDINGCALLS;
if (func(arg) < 0) {
goto error;
}
@ -570,20 +548,32 @@ error:
#ifndef Py_DEFAULT_RECURSION_LIMIT
#define Py_DEFAULT_RECURSION_LIMIT 1000
#endif
static int recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
int _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
void
_PyEval_Initialize(struct _ceval_runtime_state *state)
{
state->recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
state->check_recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
_gil_initialize(&state->gil);
}
int
_PyEval_CheckRecursionLimit(void)
{
return _PyRuntime.ceval.check_recursion_limit;
}
int
Py_GetRecursionLimit(void)
{
return recursion_limit;
return _PyRuntime.ceval.recursion_limit;
}
void
Py_SetRecursionLimit(int new_limit)
{
recursion_limit = new_limit;
_Py_CheckRecursionLimit = recursion_limit;
_PyRuntime.ceval.recursion_limit = new_limit;
_PyRuntime.ceval.check_recursion_limit = _PyRuntime.ceval.recursion_limit;
}
/* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
@ -595,6 +585,7 @@ int
_Py_CheckRecursiveCall(const char *where)
{
PyThreadState *tstate = PyThreadState_GET();
int recursion_limit = _PyRuntime.ceval.recursion_limit;
#ifdef USE_STACKCHECK
if (PyOS_CheckStack()) {
@ -603,7 +594,7 @@ _Py_CheckRecursiveCall(const char *where)
return -1;
}
#endif
_Py_CheckRecursionLimit = recursion_limit;
_PyRuntime.ceval.check_recursion_limit = recursion_limit;
if (tstate->recursion_critical)
/* Somebody asked that we don't check for recursion. */
return 0;
@ -642,13 +633,7 @@ static void restore_and_clear_exc_state(PyThreadState *, PyFrameObject *);
static int do_raise(PyObject *, PyObject *);
static int unpack_iterable(PyObject *, int, int, PyObject **);
/* Records whether tracing is on for any thread. Counts the number of
threads for which tstate->c_tracefunc is non-NULL, so if the value
is 0, we know we don't have to check this thread's c_tracefunc.
This speeds up the if statement in PyEval_EvalFrameEx() after
fast_next_opcode*/
static int _Py_TracingPossible = 0;
#define _Py_TracingPossible _PyRuntime.ceval.tracing_possible
PyObject *
@ -779,7 +764,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
#define DISPATCH() \
{ \
if (!_Py_atomic_load_relaxed(&eval_breaker)) { \
if (!_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { \
FAST_DISPATCH(); \
} \
continue; \
@ -827,7 +812,8 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
/* Code access macros */
/* The integer overflow is checked by an assertion below. */
#define INSTR_OFFSET() (sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
#define INSTR_OFFSET() \
(sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
#define NEXTOPARG() do { \
_Py_CODEUNIT word = *next_instr; \
opcode = _Py_OPCODE(word); \
@ -1080,7 +1066,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
async I/O handler); see Py_AddPendingCall() and
Py_MakePendingCalls() above. */
if (_Py_atomic_load_relaxed(&eval_breaker)) {
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) {
if (_Py_OPCODE(*next_instr) == SETUP_FINALLY ||
_Py_OPCODE(*next_instr) == YIELD_FROM) {
/* Two cases where we skip running signal handlers and other
@ -1097,12 +1083,16 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
*/
goto fast_next_opcode;
}
if (_Py_atomic_load_relaxed(&pendingcalls_to_do)) {
if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.pending.calls_to_do))
{
if (Py_MakePendingCalls() < 0)
goto error;
}
#ifdef WITH_THREAD
if (_Py_atomic_load_relaxed(&gil_drop_request)) {
if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.gil_drop_request))
{
/* Give another thread a chance */
if (PyThreadState_Swap(NULL) != tstate)
Py_FatalError("ceval: tstate mix-up");
@ -1113,7 +1103,9 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
take_gil(tstate);
/* Check if we should make a quick exit. */
if (_Py_Finalizing && _Py_Finalizing != tstate) {
if (_Py_IS_FINALIZING() &&
!_Py_CURRENTLY_FINALIZING(tstate))
{
drop_gil(tstate);
PyThread_exit_thread();
}