mirror of
https://github.com/python/cpython.git
synced 2025-11-01 18:51:43 +00:00
GH-131238: More refactoring of core header files (GH-131351)
Adds new pycore_stats.h header file to help break dependencies involving the pycore_code.h header.
This commit is contained in:
parent
bb0268f60d
commit
a45f25361d
51 changed files with 264 additions and 187 deletions
|
|
@ -9,6 +9,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include "pycore_stats.h"
|
||||
|
||||
/* Suggested size (number of positional arguments) for arrays of PyObject*
|
||||
allocated on a C stack to avoid allocating memory on the heap memory. Such
|
||||
|
|
|
|||
|
|
@ -318,68 +318,6 @@ extern void _Py_Specialize_Send(_PyStackRef receiver, _Py_CODEUNIT *instr);
|
|||
extern void _Py_Specialize_ToBool(_PyStackRef value, _Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_ContainsOp(_PyStackRef value, _Py_CODEUNIT *instr);
|
||||
|
||||
#ifdef Py_STATS
|
||||
|
||||
#include "pycore_bitutils.h" // _Py_bit_length
|
||||
|
||||
#define STAT_INC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name++; } while (0)
|
||||
#define STAT_DEC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name--; } while (0)
|
||||
#define OPCODE_EXE_INC(opname) do { if (_Py_stats) _Py_stats->opcode_stats[opname].execution_count++; } while (0)
|
||||
#define CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.name++; } while (0)
|
||||
#define OBJECT_STAT_INC(name) do { if (_Py_stats) _Py_stats->object_stats.name++; } while (0)
|
||||
#define OBJECT_STAT_INC_COND(name, cond) \
|
||||
do { if (_Py_stats && cond) _Py_stats->object_stats.name++; } while (0)
|
||||
#define EVAL_CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.eval_calls[name]++; } while (0)
|
||||
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) \
|
||||
do { if (_Py_stats && PyFunction_Check(callable)) _Py_stats->call_stats.eval_calls[name]++; } while (0)
|
||||
#define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0)
|
||||
#define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0)
|
||||
#define OPT_STAT_ADD(name, n) do { if (_Py_stats) _Py_stats->optimization_stats.name += (n); } while (0)
|
||||
#define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0)
|
||||
#define UOP_PAIR_INC(uopcode, lastuop) \
|
||||
do { \
|
||||
if (lastuop && _Py_stats) { \
|
||||
_Py_stats->optimization_stats.opcode[lastuop].pair_count[uopcode]++; \
|
||||
} \
|
||||
lastuop = uopcode; \
|
||||
} while (0)
|
||||
#define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0)
|
||||
#define OPT_ERROR_IN_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.error_in_opcode[opname]++; } while (0)
|
||||
#define OPT_HIST(length, name) \
|
||||
do { \
|
||||
if (_Py_stats) { \
|
||||
int bucket = _Py_bit_length(length >= 1 ? length - 1 : 0); \
|
||||
bucket = (bucket >= _Py_UOP_HIST_SIZE) ? _Py_UOP_HIST_SIZE - 1 : bucket; \
|
||||
_Py_stats->optimization_stats.name[bucket]++; \
|
||||
} \
|
||||
} while (0)
|
||||
#define RARE_EVENT_STAT_INC(name) do { if (_Py_stats) _Py_stats->rare_event_stats.name++; } while (0)
|
||||
#define OPCODE_DEFERRED_INC(opname) do { if (_Py_stats && opcode == opname) _Py_stats->opcode_stats[opname].specialization.deferred++; } while (0)
|
||||
|
||||
// Export for '_opcode' shared extension
|
||||
PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void);
|
||||
|
||||
#else
|
||||
#define STAT_INC(opname, name) ((void)0)
|
||||
#define STAT_DEC(opname, name) ((void)0)
|
||||
#define OPCODE_EXE_INC(opname) ((void)0)
|
||||
#define CALL_STAT_INC(name) ((void)0)
|
||||
#define OBJECT_STAT_INC(name) ((void)0)
|
||||
#define OBJECT_STAT_INC_COND(name, cond) ((void)0)
|
||||
#define EVAL_CALL_STAT_INC(name) ((void)0)
|
||||
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) ((void)0)
|
||||
#define GC_STAT_ADD(gen, name, n) ((void)0)
|
||||
#define OPT_STAT_INC(name) ((void)0)
|
||||
#define OPT_STAT_ADD(name, n) ((void)0)
|
||||
#define UOP_STAT_INC(opname, name) ((void)0)
|
||||
#define UOP_PAIR_INC(uopcode, lastuop) ((void)0)
|
||||
#define OPT_UNSUPPORTED_OPCODE(opname) ((void)0)
|
||||
#define OPT_ERROR_IN_OPCODE(opname) ((void)0)
|
||||
#define OPT_HIST(length, name) ((void)0)
|
||||
#define RARE_EVENT_STAT_INC(name) ((void)0)
|
||||
#define OPCODE_DEFERRED_INC(opname) ((void)0)
|
||||
#endif // !Py_STATS
|
||||
|
||||
// Utility functions for reading/writing 32/64-bit values in the inline caches.
|
||||
// Great care should be taken to ensure that these functions remain correct and
|
||||
// performant! They should compile to just "move" instructions on all supported
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ extern "C" {
|
|||
#include "pycore_object.h" // PyManagedDictPointer
|
||||
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_LOAD_SSIZE_ACQUIRE
|
||||
#include "pycore_stackref.h" // _PyStackRef
|
||||
#include "pycore_stats.h"
|
||||
|
||||
// Unsafe flavor of PyDict_GetItemWithError(): no error checking
|
||||
extern PyObject* _PyDict_GetItemWithError(PyObject *dp, PyObject *key);
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ extern "C" {
|
|||
#include <stddef.h> // offsetof()
|
||||
#include "pycore_code.h" // STATS
|
||||
#include "pycore_stackref.h" // _PyStackRef
|
||||
#include "pycore_stats.h"
|
||||
|
||||
/* See InternalDocs/frames.md for an explanation of the frame stack
|
||||
* including explanation of the PyFrameObject and _PyInterpreterFrame
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ extern "C" {
|
|||
#include "pycore_freelist_state.h" // struct _Py_freelists
|
||||
#include "pycore_object.h" // _PyObject_IS_GC
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET
|
||||
#include "pycore_code.h" // OBJECT_STAT_INC
|
||||
#include "pycore_stats.h" // OBJECT_STAT_INC
|
||||
|
||||
static inline struct _Py_freelists *
|
||||
_Py_freelists_GET(void)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "pycore_runtime_structs.h"
|
||||
#include "pycore_pystate.h"
|
||||
|
||||
|
||||
/* Get an object's GC head */
|
||||
|
|
@ -203,6 +204,86 @@ static inline void _PyGC_CLEAR_FINALIZED(PyObject *op) {
|
|||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* Tell the GC to track this object.
|
||||
*
|
||||
* The object must not be tracked by the GC.
|
||||
*
|
||||
* NB: While the object is tracked by the collector, it must be safe to call the
|
||||
* ob_traverse method.
|
||||
*
|
||||
* Internal note: interp->gc.generation0->_gc_prev doesn't have any bit flags
|
||||
* because it's not object header. So we don't use _PyGCHead_PREV() and
|
||||
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
|
||||
*
|
||||
* See also the public PyObject_GC_Track() function.
|
||||
*/
|
||||
static inline void _PyObject_GC_TRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
|
||||
"object already tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
_PyObject_ASSERT_FROM(op,
|
||||
(gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
|
||||
"object is in generation which is garbage collected",
|
||||
filename, lineno, __func__);
|
||||
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
PyGC_Head *generation0 = &interp->gc.young.head;
|
||||
PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
|
||||
_PyGCHead_SET_NEXT(last, gc);
|
||||
_PyGCHead_SET_PREV(gc, last);
|
||||
uintptr_t not_visited = 1 ^ interp->gc.visited_space;
|
||||
gc->_gc_next = ((uintptr_t)generation0) | not_visited;
|
||||
generation0->_gc_prev = (uintptr_t)gc;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Tell the GC to stop tracking this object.
|
||||
*
|
||||
* Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
|
||||
* must be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
|
||||
*
|
||||
* The object must be tracked by the GC.
|
||||
*
|
||||
* See also the public PyObject_GC_UnTrack() which accept an object which is
|
||||
* not tracked.
|
||||
*/
|
||||
static inline void _PyObject_GC_UNTRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
|
||||
"object not tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
PyGC_Head *prev = _PyGCHead_PREV(gc);
|
||||
PyGC_Head *next = _PyGCHead_NEXT(gc);
|
||||
_PyGCHead_SET_NEXT(prev, next);
|
||||
_PyGCHead_SET_PREV(next, prev);
|
||||
gc->_gc_next = 0;
|
||||
gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
NOTE: about untracking of mutable objects.
|
||||
|
||||
|
|
|
|||
|
|
@ -100,9 +100,6 @@ extern void _PyInterpreterState_SetWhence(
|
|||
PyInterpreterState *interp,
|
||||
long whence);
|
||||
|
||||
extern const PyConfig* _PyInterpreterState_GetConfig(PyInterpreterState *interp);
|
||||
|
||||
|
||||
/*
|
||||
Runtime Feature Flags
|
||||
|
||||
|
|
@ -137,23 +134,6 @@ PyAPI_FUNC(PyStatus) _PyInterpreterState_New(
|
|||
PyThreadState *tstate,
|
||||
PyInterpreterState **pinterp);
|
||||
|
||||
|
||||
#define RARE_EVENT_INTERP_INC(interp, name) \
|
||||
do { \
|
||||
/* saturating add */ \
|
||||
int val = FT_ATOMIC_LOAD_UINT8_RELAXED(interp->rare_events.name); \
|
||||
if (val < UINT8_MAX) { \
|
||||
FT_ATOMIC_STORE_UINT8(interp->rare_events.name, val + 1); \
|
||||
} \
|
||||
RARE_EVENT_STAT_INC(name); \
|
||||
} while (0); \
|
||||
|
||||
#define RARE_EVENT_INC(name) \
|
||||
do { \
|
||||
PyInterpreterState *interp = PyInterpreterState_Get(); \
|
||||
RARE_EVENT_INTERP_INC(interp, name); \
|
||||
} while (0); \
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -9,12 +9,11 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED()
|
||||
#include "pycore_emscripten_trampoline.h" // _PyCFunction_TrampolineCall()
|
||||
#include "pycore_interp.h" // PyInterpreterState.gc
|
||||
#include "pycore_object_deferred.h" // _PyObject_HasDeferredRefcount
|
||||
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_STORE_PTR_RELAXED
|
||||
#include "pycore_pystate.h" // _PyInterpreterState_GET()
|
||||
#include "pycore_stackref.h"
|
||||
#include "pycore_typeobject.h" // _PyStaticType_GetState()
|
||||
#include "pycore_uniqueid.h" // _PyObject_ThreadIncrefSlow()
|
||||
|
||||
// This value is added to `ob_ref_shared` for objects that use deferred
|
||||
|
|
@ -455,84 +454,6 @@ _PyObject_InitVar(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
|
|||
Py_SET_SIZE(op, size);
|
||||
}
|
||||
|
||||
|
||||
/* Tell the GC to track this object.
|
||||
*
|
||||
* The object must not be tracked by the GC.
|
||||
*
|
||||
* NB: While the object is tracked by the collector, it must be safe to call the
|
||||
* ob_traverse method.
|
||||
*
|
||||
* Internal note: interp->gc.generation0->_gc_prev doesn't have any bit flags
|
||||
* because it's not object header. So we don't use _PyGCHead_PREV() and
|
||||
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
|
||||
*
|
||||
* See also the public PyObject_GC_Track() function.
|
||||
*/
|
||||
static inline void _PyObject_GC_TRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
|
||||
"object already tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
_PyObject_ASSERT_FROM(op,
|
||||
(gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
|
||||
"object is in generation which is garbage collected",
|
||||
filename, lineno, __func__);
|
||||
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
PyGC_Head *generation0 = &interp->gc.young.head;
|
||||
PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
|
||||
_PyGCHead_SET_NEXT(last, gc);
|
||||
_PyGCHead_SET_PREV(gc, last);
|
||||
uintptr_t not_visited = 1 ^ interp->gc.visited_space;
|
||||
gc->_gc_next = ((uintptr_t)generation0) | not_visited;
|
||||
generation0->_gc_prev = (uintptr_t)gc;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Tell the GC to stop tracking this object.
|
||||
*
|
||||
* Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
|
||||
* must be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
|
||||
*
|
||||
* The object must be tracked by the GC.
|
||||
*
|
||||
* See also the public PyObject_GC_UnTrack() which accept an object which is
|
||||
* not tracked.
|
||||
*/
|
||||
static inline void _PyObject_GC_UNTRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
|
||||
"object not tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
PyGC_Head *prev = _PyGCHead_PREV(gc);
|
||||
PyGC_Head *next = _PyGCHead_NEXT(gc);
|
||||
_PyGCHead_SET_NEXT(prev, next);
|
||||
_PyGCHead_SET_PREV(next, prev);
|
||||
gc->_gc_next = 0;
|
||||
gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Macros to accept any type for the parameter, and to automatically pass
|
||||
// the filename and the filename (if NDEBUG is not defined) where the macro
|
||||
// is called.
|
||||
|
|
@ -618,20 +539,6 @@ _Py_TryIncrefCompare(PyObject **src, PyObject *op)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_TryIncrefCompareStackRef(PyObject **src, PyObject *op, _PyStackRef *out)
|
||||
{
|
||||
if (_PyObject_HasDeferredRefcount(op)) {
|
||||
*out = (_PyStackRef){ .bits = (intptr_t)op | Py_TAG_DEFERRED };
|
||||
return 1;
|
||||
}
|
||||
if (_Py_TryIncrefCompare(src, op)) {
|
||||
*out = PyStackRef_FromPyObjectSteal(op);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Loads and increfs an object from ptr, which may contain a NULL value.
|
||||
Safe with concurrent (atomic) updates to ptr.
|
||||
NOTE: The writer must set maybe-weakref on the stored object! */
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@ extern "C" {
|
|||
#include "pycore_runtime_structs.h" // _PyRuntime
|
||||
#include "pycore_runtime.h" // _PyRuntimeState_GetFinalizing
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
#include "pycore_interp.h" // _PyInterpreterState_GetConfig
|
||||
|
||||
extern const PyConfig* _PyInterpreterState_GetConfig(PyInterpreterState *interp);
|
||||
|
||||
// Values for PyThreadState.state. A thread must be in the "attached" state
|
||||
// before calling most Python APIs. If the GIL is enabled, then "attached"
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "pycore_object_deferred.h"
|
||||
#include "pycore_object.h"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
|
|
@ -639,6 +640,24 @@ PyStackRef_FunctionCheck(_PyStackRef stackref)
|
|||
return PyFunction_Check(PyStackRef_AsPyObjectBorrow(stackref));
|
||||
}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
static inline int
|
||||
_Py_TryIncrefCompareStackRef(PyObject **src, PyObject *op, _PyStackRef *out)
|
||||
{
|
||||
if (_PyObject_HasDeferredRefcount(op)) {
|
||||
*out = (_PyStackRef){ .bits = (intptr_t)op | Py_TAG_DEFERRED };
|
||||
return 1;
|
||||
}
|
||||
if (_Py_TryIncrefCompare(src, op)) {
|
||||
*out = PyStackRef_FromPyObjectSteal(op);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
97
Include/internal/pycore_stats.h
Normal file
97
Include/internal/pycore_stats.h
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
#ifndef Py_INTERNAL_STATS_H
|
||||
#define Py_INTERNAL_STATS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_structs.h" //
|
||||
|
||||
|
||||
#ifdef Py_STATS
|
||||
|
||||
#include "pycore_bitutils.h" // _Py_bit_length
|
||||
|
||||
#define STAT_INC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name++; } while (0)
|
||||
#define STAT_DEC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name--; } while (0)
|
||||
#define OPCODE_EXE_INC(opname) do { if (_Py_stats) _Py_stats->opcode_stats[opname].execution_count++; } while (0)
|
||||
#define CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.name++; } while (0)
|
||||
#define OBJECT_STAT_INC(name) do { if (_Py_stats) _Py_stats->object_stats.name++; } while (0)
|
||||
#define OBJECT_STAT_INC_COND(name, cond) \
|
||||
do { if (_Py_stats && cond) _Py_stats->object_stats.name++; } while (0)
|
||||
#define EVAL_CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.eval_calls[name]++; } while (0)
|
||||
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) \
|
||||
do { if (_Py_stats && PyFunction_Check(callable)) _Py_stats->call_stats.eval_calls[name]++; } while (0)
|
||||
#define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0)
|
||||
#define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0)
|
||||
#define OPT_STAT_ADD(name, n) do { if (_Py_stats) _Py_stats->optimization_stats.name += (n); } while (0)
|
||||
#define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0)
|
||||
#define UOP_PAIR_INC(uopcode, lastuop) \
|
||||
do { \
|
||||
if (lastuop && _Py_stats) { \
|
||||
_Py_stats->optimization_stats.opcode[lastuop].pair_count[uopcode]++; \
|
||||
} \
|
||||
lastuop = uopcode; \
|
||||
} while (0)
|
||||
#define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0)
|
||||
#define OPT_ERROR_IN_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.error_in_opcode[opname]++; } while (0)
|
||||
#define OPT_HIST(length, name) \
|
||||
do { \
|
||||
if (_Py_stats) { \
|
||||
int bucket = _Py_bit_length(length >= 1 ? length - 1 : 0); \
|
||||
bucket = (bucket >= _Py_UOP_HIST_SIZE) ? _Py_UOP_HIST_SIZE - 1 : bucket; \
|
||||
_Py_stats->optimization_stats.name[bucket]++; \
|
||||
} \
|
||||
} while (0)
|
||||
#define RARE_EVENT_STAT_INC(name) do { if (_Py_stats) _Py_stats->rare_event_stats.name++; } while (0)
|
||||
#define OPCODE_DEFERRED_INC(opname) do { if (_Py_stats && opcode == opname) _Py_stats->opcode_stats[opname].specialization.deferred++; } while (0)
|
||||
|
||||
// Export for '_opcode' shared extension
|
||||
PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void);
|
||||
|
||||
#else
|
||||
#define STAT_INC(opname, name) ((void)0)
|
||||
#define STAT_DEC(opname, name) ((void)0)
|
||||
#define OPCODE_EXE_INC(opname) ((void)0)
|
||||
#define CALL_STAT_INC(name) ((void)0)
|
||||
#define OBJECT_STAT_INC(name) ((void)0)
|
||||
#define OBJECT_STAT_INC_COND(name, cond) ((void)0)
|
||||
#define EVAL_CALL_STAT_INC(name) ((void)0)
|
||||
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) ((void)0)
|
||||
#define GC_STAT_ADD(gen, name, n) ((void)0)
|
||||
#define OPT_STAT_INC(name) ((void)0)
|
||||
#define OPT_STAT_ADD(name, n) ((void)0)
|
||||
#define UOP_STAT_INC(opname, name) ((void)0)
|
||||
#define UOP_PAIR_INC(uopcode, lastuop) ((void)0)
|
||||
#define OPT_UNSUPPORTED_OPCODE(opname) ((void)0)
|
||||
#define OPT_ERROR_IN_OPCODE(opname) ((void)0)
|
||||
#define OPT_HIST(length, name) ((void)0)
|
||||
#define RARE_EVENT_STAT_INC(name) ((void)0)
|
||||
#define OPCODE_DEFERRED_INC(opname) ((void)0)
|
||||
#endif // !Py_STATS
|
||||
|
||||
|
||||
#define RARE_EVENT_INTERP_INC(interp, name) \
|
||||
do { \
|
||||
/* saturating add */ \
|
||||
int val = FT_ATOMIC_LOAD_UINT8_RELAXED(interp->rare_events.name); \
|
||||
if (val < UINT8_MAX) { \
|
||||
FT_ATOMIC_STORE_UINT8(interp->rare_events.name, val + 1); \
|
||||
} \
|
||||
RARE_EVENT_STAT_INC(name); \
|
||||
} while (0); \
|
||||
|
||||
#define RARE_EVENT_INC(name) \
|
||||
do { \
|
||||
PyInterpreterState *interp = PyInterpreterState_Get(); \
|
||||
RARE_EVENT_INTERP_INC(interp, name); \
|
||||
} while (0); \
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_STATS_H */
|
||||
|
|
@ -8,9 +8,10 @@ extern "C" {
|
|||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_function.h"
|
||||
#include "pycore_moduleobject.h" // PyModuleObject
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
#include "pycore_runtime_structs.h" // type state
|
||||
#include "pycore_stats.h"
|
||||
|
||||
|
||||
/* state */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue