Revert "bpo-30860: Consolidate stateful runtime globals." (#3379)

Windows buildbots started failing due to include-related errors.
This commit is contained in:
Eric Snow 2017-09-05 21:43:08 -07:00 committed by GitHub
parent 833860615b
commit 05351c1bd8
40 changed files with 1327 additions and 2726 deletions

View file

@ -36,8 +36,7 @@ extern int _PyObject_GetMethod(PyObject *, PyObject *, PyObject **);
typedef PyObject *(*callproc)(PyObject *, PyObject *, PyObject *);
/* Forward declarations */
Py_LOCAL_INLINE(PyObject *) call_function(PyObject ***, Py_ssize_t,
PyObject *);
Py_LOCAL_INLINE(PyObject *) call_function(PyObject ***, Py_ssize_t, PyObject *);
static PyObject * do_call_core(PyObject *, PyObject *, PyObject *);
#ifdef LLTRACE
@ -53,15 +52,13 @@ static int call_trace_protected(Py_tracefunc, PyObject *,
static void call_exc_trace(Py_tracefunc, PyObject *,
PyThreadState *, PyFrameObject *);
static int maybe_call_line_trace(Py_tracefunc, PyObject *,
PyThreadState *, PyFrameObject *,
int *, int *, int *);
PyThreadState *, PyFrameObject *, int *, int *, int *);
static void maybe_dtrace_line(PyFrameObject *, int *, int *, int *);
static void dtrace_function_entry(PyFrameObject *);
static void dtrace_function_return(PyFrameObject *);
static PyObject * cmp_outcome(int, PyObject *, PyObject *);
static PyObject * import_name(PyFrameObject *, PyObject *, PyObject *,
PyObject *);
static PyObject * import_name(PyFrameObject *, PyObject *, PyObject *, PyObject *);
static PyObject * import_from(PyObject *, PyObject *);
static int import_all_from(PyObject *, PyObject *);
static void format_exc_check_arg(PyObject *, const char *, PyObject *);
@ -91,7 +88,7 @@ static long dxp[256];
#endif
#ifdef WITH_THREAD
#define GIL_REQUEST _Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)
#define GIL_REQUEST _Py_atomic_load_relaxed(&gil_drop_request)
#else
#define GIL_REQUEST 0
#endif
@ -101,22 +98,22 @@ static long dxp[256];
the GIL eventually anyway. */
#define COMPUTE_EVAL_BREAKER() \
_Py_atomic_store_relaxed( \
&_PyRuntime.ceval.eval_breaker, \
&eval_breaker, \
GIL_REQUEST | \
_Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \
_PyRuntime.ceval.pending.async_exc)
_Py_atomic_load_relaxed(&pendingcalls_to_do) | \
pending_async_exc)
#ifdef WITH_THREAD
#define SET_GIL_DROP_REQUEST() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
_Py_atomic_store_relaxed(&gil_drop_request, 1); \
_Py_atomic_store_relaxed(&eval_breaker, 1); \
} while (0)
#define RESET_GIL_DROP_REQUEST() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \
_Py_atomic_store_relaxed(&gil_drop_request, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
@ -125,35 +122,47 @@ static long dxp[256];
/* Pending calls are only modified under pending_lock */
#define SIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
_Py_atomic_store_relaxed(&pendingcalls_to_do, 1); \
_Py_atomic_store_relaxed(&eval_breaker, 1); \
} while (0)
#define UNSIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \
_Py_atomic_store_relaxed(&pendingcalls_to_do, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
#define SIGNAL_ASYNC_EXC() \
do { \
_PyRuntime.ceval.pending.async_exc = 1; \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
pending_async_exc = 1; \
_Py_atomic_store_relaxed(&eval_breaker, 1); \
} while (0)
#define UNSIGNAL_ASYNC_EXC() \
do { \
_PyRuntime.ceval.pending.async_exc = 0; \
COMPUTE_EVAL_BREAKER(); \
} while (0)
do { pending_async_exc = 0; COMPUTE_EVAL_BREAKER(); } while (0)
/* This single variable consolidates all requests to break out of the fast path
in the eval loop. */
static _Py_atomic_int eval_breaker = {0};
/* Request for running pending calls. */
static _Py_atomic_int pendingcalls_to_do = {0};
/* Request for looking at the `async_exc` field of the current thread state.
Guarded by the GIL. */
static int pending_async_exc = 0;
#ifdef WITH_THREAD
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
#include "pythread.h"
static PyThread_type_lock pending_lock = 0; /* for pending calls */
static unsigned long main_thread = 0;
/* Request for dropping the GIL */
static _Py_atomic_int gil_drop_request = {0};
#include "ceval_gil.h"
int
@ -169,9 +178,9 @@ PyEval_InitThreads(void)
return;
create_gil();
take_gil(PyThreadState_GET());
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
if (!_PyRuntime.ceval.pending.lock)
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
main_thread = PyThread_get_thread_ident();
if (!pending_lock)
pending_lock = PyThread_allocate_lock();
}
void
@ -239,9 +248,9 @@ PyEval_ReInitThreads(void)
if (!gil_created())
return;
recreate_gil();
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
pending_lock = PyThread_allocate_lock();
take_gil(current_tstate);
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
main_thread = PyThread_get_thread_ident();
/* Destroy all threads except the current one */
_PyThreadState_DeleteExcept(current_tstate);
@ -285,7 +294,7 @@ PyEval_RestoreThread(PyThreadState *tstate)
int err = errno;
take_gil(tstate);
/* _Py_Finalizing is protected by the GIL */
if (_Py_IS_FINALIZING() && !_Py_CURRENTLY_FINALIZING(tstate)) {
if (_Py_Finalizing && tstate != _Py_Finalizing) {
drop_gil(tstate);
PyThread_exit_thread();
assert(0); /* unreachable */
@ -337,11 +346,19 @@ _PyEval_SignalReceived(void)
callback.
*/
#define NPENDINGCALLS 32
static struct {
int (*func)(void *);
void *arg;
} pendingcalls[NPENDINGCALLS];
static int pendingfirst = 0;
static int pendinglast = 0;
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
int i, j, result=0;
PyThread_type_lock lock = _PyRuntime.ceval.pending.lock;
PyThread_type_lock lock = pending_lock;
/* try a few times for the lock. Since this mechanism is used
* for signal handling (on the main thread), there is a (slim)
@ -363,14 +380,14 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
return -1;
}
i = _PyRuntime.ceval.pending.last;
i = pendinglast;
j = (i + 1) % NPENDINGCALLS;
if (j == _PyRuntime.ceval.pending.first) {
if (j == pendingfirst) {
result = -1; /* Queue full */
} else {
_PyRuntime.ceval.pending.calls[i].func = func;
_PyRuntime.ceval.pending.calls[i].arg = arg;
_PyRuntime.ceval.pending.last = j;
pendingcalls[i].func = func;
pendingcalls[i].arg = arg;
pendinglast = j;
}
/* signal main loop */
SIGNAL_PENDING_CALLS();
@ -388,19 +405,16 @@ Py_MakePendingCalls(void)
assert(PyGILState_Check());
if (!_PyRuntime.ceval.pending.lock) {
if (!pending_lock) {
/* initial allocation of the lock */
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
if (_PyRuntime.ceval.pending.lock == NULL)
pending_lock = PyThread_allocate_lock();
if (pending_lock == NULL)
return -1;
}
/* only service pending calls on main thread */
if (_PyRuntime.ceval.pending.main_thread &&
PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread)
{
if (main_thread && PyThread_get_thread_ident() != main_thread)
return 0;
}
/* don't perform recursive pending calls */
if (busy)
return 0;
@ -422,16 +436,16 @@ Py_MakePendingCalls(void)
void *arg = NULL;
/* pop one item off the queue while holding the lock */
PyThread_acquire_lock(_PyRuntime.ceval.pending.lock, WAIT_LOCK);
j = _PyRuntime.ceval.pending.first;
if (j == _PyRuntime.ceval.pending.last) {
PyThread_acquire_lock(pending_lock, WAIT_LOCK);
j = pendingfirst;
if (j == pendinglast) {
func = NULL; /* Queue empty */
} else {
func = _PyRuntime.ceval.pending.calls[j].func;
arg = _PyRuntime.ceval.pending.calls[j].arg;
_PyRuntime.ceval.pending.first = (j + 1) % NPENDINGCALLS;
func = pendingcalls[j].func;
arg = pendingcalls[j].arg;
pendingfirst = (j + 1) % NPENDINGCALLS;
}
PyThread_release_lock(_PyRuntime.ceval.pending.lock);
PyThread_release_lock(pending_lock);
/* having released the lock, perform the callback */
if (func == NULL)
break;
@ -475,6 +489,14 @@ error:
The two threads could theoretically wiggle around the "busy" variable.
*/
#define NPENDINGCALLS 32
static struct {
int (*func)(void *);
void *arg;
} pendingcalls[NPENDINGCALLS];
static volatile int pendingfirst = 0;
static volatile int pendinglast = 0;
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
@ -484,15 +506,15 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
if (busy)
return -1;
busy = 1;
i = _PyRuntime.ceval.pending.last;
i = pendinglast;
j = (i + 1) % NPENDINGCALLS;
if (j == _PyRuntime.ceval.pending.first) {
if (j == pendingfirst) {
busy = 0;
return -1; /* Queue full */
}
_PyRuntime.ceval.pending.calls[i].func = func;
_PyRuntime.ceval.pending.calls[i].arg = arg;
_PyRuntime.ceval.pending.last = j;
pendingcalls[i].func = func;
pendingcalls[i].arg = arg;
pendinglast = j;
SIGNAL_PENDING_CALLS();
busy = 0;
@ -521,12 +543,12 @@ Py_MakePendingCalls(void)
int i;
int (*func)(void *);
void *arg;
i = _PyRuntime.ceval.pending.first;
if (i == _PyRuntime.ceval.pending.last)
i = pendingfirst;
if (i == pendinglast)
break; /* Queue empty */
func = _PyRuntime.ceval.pending.calls[i].func;
arg = _PyRuntime.ceval.pending.calls[i].arg;
_PyRuntime.ceval.pending.first = (i + 1) % NPENDINGCALLS;
func = pendingcalls[i].func;
arg = pendingcalls[i].arg;
pendingfirst = (i + 1) % NPENDINGCALLS;
if (func(arg) < 0) {
goto error;
}
@ -548,32 +570,20 @@ error:
#ifndef Py_DEFAULT_RECURSION_LIMIT
#define Py_DEFAULT_RECURSION_LIMIT 1000
#endif
void
_PyEval_Initialize(struct _ceval_runtime_state *state)
{
state->recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
state->check_recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
_gil_initialize(&state->gil);
}
int
_PyEval_CheckRecursionLimit(void)
{
return _PyRuntime.ceval.check_recursion_limit;
}
static int recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
int _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
int
Py_GetRecursionLimit(void)
{
return _PyRuntime.ceval.recursion_limit;
return recursion_limit;
}
void
Py_SetRecursionLimit(int new_limit)
{
_PyRuntime.ceval.recursion_limit = new_limit;
_PyRuntime.ceval.check_recursion_limit = _PyRuntime.ceval.recursion_limit;
recursion_limit = new_limit;
_Py_CheckRecursionLimit = recursion_limit;
}
/* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
@ -585,7 +595,6 @@ int
_Py_CheckRecursiveCall(const char *where)
{
PyThreadState *tstate = PyThreadState_GET();
int recursion_limit = _PyRuntime.ceval.recursion_limit;
#ifdef USE_STACKCHECK
if (PyOS_CheckStack()) {
@ -594,7 +603,7 @@ _Py_CheckRecursiveCall(const char *where)
return -1;
}
#endif
_PyRuntime.ceval.check_recursion_limit = recursion_limit;
_Py_CheckRecursionLimit = recursion_limit;
if (tstate->recursion_critical)
/* Somebody asked that we don't check for recursion. */
return 0;
@ -633,7 +642,13 @@ static void restore_and_clear_exc_state(PyThreadState *, PyFrameObject *);
static int do_raise(PyObject *, PyObject *);
static int unpack_iterable(PyObject *, int, int, PyObject **);
#define _Py_TracingPossible _PyRuntime.ceval.tracing_possible
/* Records whether tracing is on for any thread. Counts the number of
threads for which tstate->c_tracefunc is non-NULL, so if the value
is 0, we know we don't have to check this thread's c_tracefunc.
This speeds up the if statement in PyEval_EvalFrameEx() after
fast_next_opcode*/
static int _Py_TracingPossible = 0;
PyObject *
@ -764,7 +779,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
#define DISPATCH() \
{ \
if (!_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { \
if (!_Py_atomic_load_relaxed(&eval_breaker)) { \
FAST_DISPATCH(); \
} \
continue; \
@ -812,8 +827,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
/* Code access macros */
/* The integer overflow is checked by an assertion below. */
#define INSTR_OFFSET() \
(sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
#define INSTR_OFFSET() (sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
#define NEXTOPARG() do { \
_Py_CODEUNIT word = *next_instr; \
opcode = _Py_OPCODE(word); \
@ -1066,7 +1080,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
async I/O handler); see Py_AddPendingCall() and
Py_MakePendingCalls() above. */
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) {
if (_Py_atomic_load_relaxed(&eval_breaker)) {
if (_Py_OPCODE(*next_instr) == SETUP_FINALLY ||
_Py_OPCODE(*next_instr) == YIELD_FROM) {
/* Two cases where we skip running signal handlers and other
@ -1083,16 +1097,12 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
*/
goto fast_next_opcode;
}
if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.pending.calls_to_do))
{
if (_Py_atomic_load_relaxed(&pendingcalls_to_do)) {
if (Py_MakePendingCalls() < 0)
goto error;
}
#ifdef WITH_THREAD
if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.gil_drop_request))
{
if (_Py_atomic_load_relaxed(&gil_drop_request)) {
/* Give another thread a chance */
if (PyThreadState_Swap(NULL) != tstate)
Py_FatalError("ceval: tstate mix-up");
@ -1103,9 +1113,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
take_gil(tstate);
/* Check if we should make a quick exit. */
if (_Py_IS_FINALIZING() &&
!_Py_CURRENTLY_FINALIZING(tstate))
{
if (_Py_Finalizing && _Py_Finalizing != tstate) {
drop_gil(tstate);
PyThread_exit_thread();
}