mirror of
https://github.com/python/cpython.git
synced 2025-08-03 00:23:06 +00:00
Revert changeset 6661a8154eb3: Issue #3329: Add new APIs to customize memory allocators
The new API require more discussion.
This commit is contained in:
parent
05a647deed
commit
36f01ad9ac
6 changed files with 211 additions and 771 deletions
|
@ -1859,6 +1859,26 @@ PyTypeObject *_PyCapsule_hack = &PyCapsule_Type;
|
|||
Py_ssize_t (*_Py_abstract_hack)(PyObject *) = PyObject_Size;
|
||||
|
||||
|
||||
/* Python's malloc wrappers (see pymem.h) */
|
||||
|
||||
void *
|
||||
PyMem_Malloc(size_t nbytes)
|
||||
{
|
||||
return PyMem_MALLOC(nbytes);
|
||||
}
|
||||
|
||||
void *
|
||||
PyMem_Realloc(void *p, size_t nbytes)
|
||||
{
|
||||
return PyMem_REALLOC(p, nbytes);
|
||||
}
|
||||
|
||||
void
|
||||
PyMem_Free(void *p)
|
||||
{
|
||||
PyMem_FREE(p);
|
||||
}
|
||||
|
||||
void
|
||||
_PyObject_DebugTypeStats(FILE *out)
|
||||
{
|
||||
|
|
|
@ -1,327 +1,5 @@
|
|||
#include "Python.h"
|
||||
|
||||
/* Python's malloc wrappers (see pymem.h) */
|
||||
|
||||
/* Forward declaration */
|
||||
|
||||
#ifdef PYMALLOC_DEBUG /* WITH_PYMALLOC && PYMALLOC_DEBUG */
|
||||
static void* _PyMem_DebugMalloc(void *ctx, size_t size);
|
||||
static void _PyMem_DebugFree(void *ctx, void *p);
|
||||
static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
|
||||
|
||||
static void _PyObject_DebugDumpAddress(const void *p);
|
||||
static void _PyMem_DebugCheckAddress(char api_id, const void *p);
|
||||
#endif
|
||||
|
||||
#ifdef WITH_PYMALLOC
|
||||
static void* _PyObject_Malloc(void *ctx, size_t size);
|
||||
static void _PyObject_Free(void *ctx, void *p);
|
||||
static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
|
||||
static void *
|
||||
_PyMem_RawMalloc(void *ctx, size_t size)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
static void *
|
||||
_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
|
||||
{
|
||||
return realloc(ptr, size);
|
||||
}
|
||||
|
||||
static void
|
||||
_PyMem_RawFree(void *ctx, void *ptr)
|
||||
{
|
||||
return free(ptr);
|
||||
}
|
||||
|
||||
static void *
|
||||
_PyMem_Malloc(void *ctx, size_t size)
|
||||
{
|
||||
/* PyMem_Malloc(0) means malloc(1). Some systems would return NULL
|
||||
for malloc(0), which would be treated as an error. Some platforms would
|
||||
return a pointer with no memory behind it, which would break pymalloc.
|
||||
To solve these problems, allocate an extra byte. */
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
static void *
|
||||
_PyMem_Realloc(void *ctx, void *ptr, size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
return realloc(ptr, size);
|
||||
}
|
||||
|
||||
#ifdef ARENAS_USE_MMAP
|
||||
static void *
|
||||
_PyObject_ArenaMmap(void *ctx, size_t size)
|
||||
{
|
||||
void *ptr;
|
||||
ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
if (ptr == MAP_FAILED)
|
||||
return NULL;
|
||||
assert(ptr != NULL);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void
|
||||
_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
|
||||
{
|
||||
return munmap(ptr, size);
|
||||
}
|
||||
#else
|
||||
static void *
|
||||
_PyObject_ArenaMalloc(void *ctx, size_t size)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
static void
|
||||
_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawRealloc, _PyMem_RawFree
|
||||
#define PYMEM_FUNCS _PyMem_Malloc, _PyMem_Realloc, _PyMem_RawFree
|
||||
#ifdef WITH_PYMALLOC
|
||||
#define PYOBJECT_FUNCS _PyObject_Malloc, _PyObject_Realloc, _PyObject_Free
|
||||
#else
|
||||
#define PYOBJECT_FUNCS PYMEM_FUNCS
|
||||
#endif
|
||||
|
||||
#ifdef PYMALLOC_DEBUG
|
||||
typedef struct {
|
||||
/* We tag each block with an API ID in order to tag API violations */
|
||||
char api_id;
|
||||
PyMemAllocators alloc;
|
||||
} debug_alloc_api_t;
|
||||
static struct {
|
||||
debug_alloc_api_t raw;
|
||||
debug_alloc_api_t mem;
|
||||
debug_alloc_api_t obj;
|
||||
} _PyMem_Debug = {
|
||||
{'r', {NULL, PYRAW_FUNCS}},
|
||||
{'m', {NULL, PYMEM_FUNCS}},
|
||||
{'o', {NULL, PYOBJECT_FUNCS}}
|
||||
};
|
||||
|
||||
#define PYDEBUG_FUNCS _PyMem_DebugMalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
|
||||
#endif
|
||||
|
||||
static PyMemAllocators _PyMem_Raw = {
|
||||
#ifdef PYMALLOC_DEBUG
|
||||
&_PyMem_Debug.raw, PYDEBUG_FUNCS
|
||||
#else
|
||||
NULL, PYMEM_FUNCS
|
||||
#endif
|
||||
};
|
||||
|
||||
static PyMemAllocators _PyMem = {
|
||||
#ifdef PYMALLOC_DEBUG
|
||||
&_PyMem_Debug.mem, PYDEBUG_FUNCS
|
||||
#else
|
||||
NULL, PYMEM_FUNCS
|
||||
#endif
|
||||
};
|
||||
|
||||
static PyMemAllocators _PyObject = {
|
||||
#ifdef PYMALLOC_DEBUG
|
||||
&_PyMem_Debug.obj, PYDEBUG_FUNCS
|
||||
#else
|
||||
NULL, PYOBJECT_FUNCS
|
||||
#endif
|
||||
};
|
||||
|
||||
#undef PYRAW_FUNCS
|
||||
#undef PYMEM_FUNCS
|
||||
#undef PYOBJECT_FUNCS
|
||||
#undef PYDEBUG_FUNCS
|
||||
|
||||
static struct {
|
||||
void *ctx;
|
||||
void* (*malloc) (void*, size_t);
|
||||
void (*free) (void*, void*, size_t);
|
||||
} _PyObject_Arena = {NULL,
|
||||
#ifdef ARENAS_USE_MMAP
|
||||
_PyObject_ArenaMmap, _PyObject_ArenaMunmap
|
||||
#else
|
||||
_PyObject_ArenaMalloc, _PyObject_ArenaFree
|
||||
#endif
|
||||
};
|
||||
|
||||
void
|
||||
PyMem_SetupDebugHooks(void)
|
||||
{
|
||||
#ifdef PYMALLOC_DEBUG
|
||||
PyMemAllocators alloc;
|
||||
|
||||
alloc.malloc = _PyMem_DebugMalloc;
|
||||
alloc.realloc = _PyMem_DebugRealloc;
|
||||
alloc.free = _PyMem_DebugFree;
|
||||
|
||||
if (_PyMem_Raw.malloc != _PyMem_DebugMalloc) {
|
||||
alloc.ctx = &_PyMem_Debug.raw;
|
||||
PyMem_GetAllocators(&_PyMem_Debug.raw.alloc);
|
||||
PyMem_SetAllocators(&alloc);
|
||||
}
|
||||
|
||||
if (_PyMem.malloc != _PyMem_DebugMalloc) {
|
||||
alloc.ctx = &_PyMem_Debug.mem;
|
||||
PyMem_GetAllocators(&_PyMem_Debug.mem.alloc);
|
||||
PyMem_SetAllocators(&alloc);
|
||||
}
|
||||
|
||||
if (_PyObject.malloc != _PyMem_DebugMalloc) {
|
||||
alloc.ctx = &_PyMem_Debug.obj;
|
||||
PyObject_GetAllocators(&_PyMem_Debug.obj.alloc);
|
||||
PyObject_SetAllocators(&alloc);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
PyMem_GetRawAllocators(PyMemAllocators *allocators)
|
||||
{
|
||||
*allocators = _PyMem_Raw;
|
||||
}
|
||||
|
||||
void
|
||||
PyMem_SetRawAllocators(PyMemAllocators *allocators)
|
||||
{
|
||||
_PyMem_Raw = *allocators;
|
||||
}
|
||||
|
||||
void
|
||||
PyMem_GetAllocators(PyMemAllocators *allocators)
|
||||
{
|
||||
*allocators = _PyMem;
|
||||
}
|
||||
|
||||
void
|
||||
PyMem_SetAllocators(PyMemAllocators *allocators)
|
||||
{
|
||||
_PyMem = *allocators;
|
||||
}
|
||||
|
||||
void
|
||||
PyObject_GetAllocators(PyMemAllocators *allocators)
|
||||
{
|
||||
*allocators = _PyObject;
|
||||
}
|
||||
|
||||
void
|
||||
PyObject_SetAllocators(PyMemAllocators *allocators)
|
||||
{
|
||||
_PyObject = *allocators;
|
||||
}
|
||||
|
||||
void
|
||||
_PyObject_GetArenaAllocators(void **ctx_p,
|
||||
void* (**malloc_p) (void *ctx, size_t size),
|
||||
void (**free_p) (void *ctx, void *ptr, size_t size))
|
||||
{
|
||||
*malloc_p = _PyObject_Arena.malloc;
|
||||
*free_p = _PyObject_Arena.free;
|
||||
*ctx_p = _PyObject_Arena.ctx;
|
||||
}
|
||||
|
||||
void
|
||||
_PyObject_SetArenaAllocators(void *ctx,
|
||||
void* (*malloc) (void *ctx, size_t size),
|
||||
void (*free) (void *ctx, void *ptr, size_t size))
|
||||
{
|
||||
_PyObject_Arena.malloc = malloc;
|
||||
_PyObject_Arena.free = free;
|
||||
_PyObject_Arena.ctx = ctx;
|
||||
}
|
||||
|
||||
void *
|
||||
PyMem_RawMalloc(size_t size)
|
||||
{
|
||||
return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
|
||||
}
|
||||
|
||||
void*
|
||||
PyMem_RawRealloc(void *ptr, size_t new_size)
|
||||
{
|
||||
return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
|
||||
}
|
||||
|
||||
void PyMem_RawFree(void *ptr)
|
||||
{
|
||||
_PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
|
||||
}
|
||||
|
||||
void *
|
||||
PyMem_Malloc(size_t size)
|
||||
{
|
||||
/*
|
||||
* Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
|
||||
* Most python internals blindly use a signed Py_ssize_t to track
|
||||
* things without checking for overflows or negatives.
|
||||
* As size_t is unsigned, checking for size < 0 is not required.
|
||||
*/
|
||||
if (size > (size_t)PY_SSIZE_T_MAX)
|
||||
return NULL;
|
||||
|
||||
return _PyMem.malloc(_PyMem.ctx, size);
|
||||
}
|
||||
|
||||
void *
|
||||
PyMem_Realloc(void *ptr, size_t new_size)
|
||||
{
|
||||
if (new_size > (size_t)PY_SSIZE_T_MAX)
|
||||
return NULL;
|
||||
|
||||
return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
|
||||
}
|
||||
|
||||
void
|
||||
PyMem_Free(void *ptr)
|
||||
{
|
||||
_PyMem.free(_PyMem.ctx, ptr);
|
||||
}
|
||||
|
||||
void *
|
||||
PyObject_Malloc(size_t size)
|
||||
{
|
||||
/*
|
||||
* Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
|
||||
* Most python internals blindly use a signed Py_ssize_t to track
|
||||
* things without checking for overflows or negatives.
|
||||
* As size_t is unsigned, checking for size < 0 is not required.
|
||||
*/
|
||||
if (size > (size_t)PY_SSIZE_T_MAX)
|
||||
return NULL;
|
||||
|
||||
return _PyObject.malloc(_PyObject.ctx, size);
|
||||
}
|
||||
|
||||
void *
|
||||
PyObject_Realloc(void *ptr, size_t new_size)
|
||||
{
|
||||
if (new_size > (size_t)PY_SSIZE_T_MAX)
|
||||
return NULL;
|
||||
|
||||
return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
|
||||
}
|
||||
|
||||
void
|
||||
PyObject_Free(void *ptr)
|
||||
{
|
||||
_PyObject.free(_PyObject.ctx, ptr);
|
||||
}
|
||||
|
||||
|
||||
#ifdef WITH_PYMALLOC
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
|
@ -867,6 +545,7 @@ new_arena(void)
|
|||
struct arena_object* arenaobj;
|
||||
uint excess; /* number of bytes above pool alignment */
|
||||
void *address;
|
||||
int err;
|
||||
|
||||
#ifdef PYMALLOC_DEBUG
|
||||
if (Py_GETENV("PYTHONMALLOCSTATS"))
|
||||
|
@ -888,12 +567,11 @@ new_arena(void)
|
|||
return NULL; /* overflow */
|
||||
#endif
|
||||
nbytes = numarenas * sizeof(*arenas);
|
||||
arenaobj = (struct arena_object *)PyMem_Realloc(arenas, nbytes);
|
||||
arenaobj = (struct arena_object *)realloc(arenas, nbytes);
|
||||
if (arenaobj == NULL)
|
||||
return NULL;
|
||||
arenas = arenaobj;
|
||||
|
||||
|
||||
/* We might need to fix pointers that were copied. However,
|
||||
* new_arena only gets called when all the pages in the
|
||||
* previous arenas are full. Thus, there are *no* pointers
|
||||
|
@ -920,8 +598,15 @@ new_arena(void)
|
|||
arenaobj = unused_arena_objects;
|
||||
unused_arena_objects = arenaobj->nextarena;
|
||||
assert(arenaobj->address == 0);
|
||||
address = _PyObject_Arena.malloc(_PyObject_Arena.ctx, ARENA_SIZE);
|
||||
if (address == NULL) {
|
||||
#ifdef ARENAS_USE_MMAP
|
||||
address = mmap(NULL, ARENA_SIZE, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
err = (address == MAP_FAILED);
|
||||
#else
|
||||
address = malloc(ARENA_SIZE);
|
||||
err = (address == 0);
|
||||
#endif
|
||||
if (err) {
|
||||
/* The allocation failed: return NULL after putting the
|
||||
* arenaobj back.
|
||||
*/
|
||||
|
@ -1084,8 +769,9 @@ int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
|
|||
* Unless the optimizer reorders everything, being too smart...
|
||||
*/
|
||||
|
||||
static void *
|
||||
_PyObject_Malloc(void *ctx, size_t nbytes)
|
||||
#undef PyObject_Malloc
|
||||
void *
|
||||
PyObject_Malloc(size_t nbytes)
|
||||
{
|
||||
block *bp;
|
||||
poolp pool;
|
||||
|
@ -1101,6 +787,17 @@ _PyObject_Malloc(void *ctx, size_t nbytes)
|
|||
goto redirect;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
|
||||
* Most python internals blindly use a signed Py_ssize_t to track
|
||||
* things without checking for overflows or negatives.
|
||||
* As size_t is unsigned, checking for nbytes < 0 is not required.
|
||||
*/
|
||||
if (nbytes > PY_SSIZE_T_MAX) {
|
||||
_Py_AllocatedBlocks--;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This implicitly redirects malloc(0).
|
||||
*/
|
||||
|
@ -1273,8 +970,10 @@ redirect:
|
|||
* last chance to serve the request) or when the max memory limit
|
||||
* has been reached.
|
||||
*/
|
||||
if (nbytes == 0)
|
||||
nbytes = 1;
|
||||
{
|
||||
void *result = PyMem_Malloc(nbytes);
|
||||
void *result = malloc(nbytes);
|
||||
if (!result)
|
||||
_Py_AllocatedBlocks--;
|
||||
return result;
|
||||
|
@ -1283,8 +982,9 @@ redirect:
|
|||
|
||||
/* free */
|
||||
|
||||
static void
|
||||
_PyObject_Free(void *ctx, void *p)
|
||||
#undef PyObject_Free
|
||||
void
|
||||
PyObject_Free(void *p)
|
||||
{
|
||||
poolp pool;
|
||||
block *lastfree;
|
||||
|
@ -1393,8 +1093,11 @@ _PyObject_Free(void *ctx, void *p)
|
|||
unused_arena_objects = ao;
|
||||
|
||||
/* Free the entire arena. */
|
||||
_PyObject_Arena.free(_PyObject_Arena.ctx,
|
||||
(void *)ao->address, ARENA_SIZE);
|
||||
#ifdef ARENAS_USE_MMAP
|
||||
munmap((void *)ao->address, ARENA_SIZE);
|
||||
#else
|
||||
free((void *)ao->address);
|
||||
#endif
|
||||
ao->address = 0; /* mark unassociated */
|
||||
--narenas_currently_allocated;
|
||||
|
||||
|
@ -1503,7 +1206,7 @@ _PyObject_Free(void *ctx, void *p)
|
|||
redirect:
|
||||
#endif
|
||||
/* We didn't allocate this address. */
|
||||
PyMem_Free(p);
|
||||
free(p);
|
||||
}
|
||||
|
||||
/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
|
||||
|
@ -1511,8 +1214,9 @@ redirect:
|
|||
* return a non-NULL result.
|
||||
*/
|
||||
|
||||
static void *
|
||||
_PyObject_Realloc(void *ctx, void *p, size_t nbytes)
|
||||
#undef PyObject_Realloc
|
||||
void *
|
||||
PyObject_Realloc(void *p, size_t nbytes)
|
||||
{
|
||||
void *bp;
|
||||
poolp pool;
|
||||
|
@ -1522,7 +1226,16 @@ _PyObject_Realloc(void *ctx, void *p, size_t nbytes)
|
|||
#endif
|
||||
|
||||
if (p == NULL)
|
||||
return _PyObject_Malloc(ctx, nbytes);
|
||||
return PyObject_Malloc(nbytes);
|
||||
|
||||
/*
|
||||
* Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
|
||||
* Most python internals blindly use a signed Py_ssize_t to track
|
||||
* things without checking for overflows or negatives.
|
||||
* As size_t is unsigned, checking for nbytes < 0 is not required.
|
||||
*/
|
||||
if (nbytes > PY_SSIZE_T_MAX)
|
||||
return NULL;
|
||||
|
||||
#ifdef WITH_VALGRIND
|
||||
/* Treat running_on_valgrind == -1 the same as 0 */
|
||||
|
@ -1550,10 +1263,10 @@ _PyObject_Realloc(void *ctx, void *p, size_t nbytes)
|
|||
}
|
||||
size = nbytes;
|
||||
}
|
||||
bp = _PyObject_Malloc(ctx, nbytes);
|
||||
bp = PyObject_Malloc(nbytes);
|
||||
if (bp != NULL) {
|
||||
memcpy(bp, p, size);
|
||||
_PyObject_Free(ctx, p);
|
||||
PyObject_Free(p);
|
||||
}
|
||||
return bp;
|
||||
}
|
||||
|
@ -1571,14 +1284,14 @@ _PyObject_Realloc(void *ctx, void *p, size_t nbytes)
|
|||
* at p. Instead we punt: let C continue to manage this block.
|
||||
*/
|
||||
if (nbytes)
|
||||
return PyMem_Realloc(p, nbytes);
|
||||
return realloc(p, nbytes);
|
||||
/* C doesn't define the result of realloc(p, 0) (it may or may not
|
||||
* return NULL then), but Python's docs promise that nbytes==0 never
|
||||
* returns NULL. We don't pass 0 to realloc(), to avoid that endcase
|
||||
* to begin with. Even then, we can't be sure that realloc() won't
|
||||
* return NULL.
|
||||
*/
|
||||
bp = PyMem_Realloc(p, 1);
|
||||
bp = realloc(p, 1);
|
||||
return bp ? bp : p;
|
||||
}
|
||||
|
||||
|
@ -1588,6 +1301,24 @@ _PyObject_Realloc(void *ctx, void *p, size_t nbytes)
|
|||
/* pymalloc not enabled: Redirect the entry points to malloc. These will
|
||||
* only be used by extensions that are compiled with pymalloc enabled. */
|
||||
|
||||
void *
|
||||
PyObject_Malloc(size_t n)
|
||||
{
|
||||
return PyMem_MALLOC(n);
|
||||
}
|
||||
|
||||
void *
|
||||
PyObject_Realloc(void *p, size_t n)
|
||||
{
|
||||
return PyMem_REALLOC(p, n);
|
||||
}
|
||||
|
||||
void
|
||||
PyObject_Free(void *p)
|
||||
{
|
||||
PyMem_FREE(p);
|
||||
}
|
||||
|
||||
Py_ssize_t
|
||||
_Py_GetAllocatedBlocks(void)
|
||||
{
|
||||
|
@ -1613,6 +1344,10 @@ _Py_GetAllocatedBlocks(void)
|
|||
#define DEADBYTE 0xDB /* dead (newly freed) memory */
|
||||
#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
|
||||
|
||||
/* We tag each block with an API ID in order to tag API violations */
|
||||
#define _PYMALLOC_MEM_ID 'm' /* the PyMem_Malloc() API */
|
||||
#define _PYMALLOC_OBJ_ID 'o' /* The PyObject_Malloc() API */
|
||||
|
||||
static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
|
||||
|
||||
/* serialno is always incremented via calling this routine. The point is
|
||||
|
@ -1695,18 +1430,58 @@ p[2*S: 2*S+n]
|
|||
p[2*S+n: 2*S+n+S]
|
||||
Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
|
||||
p[2*S+n+S: 2*S+n+2*S]
|
||||
A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
|
||||
and _PyMem_DebugRealloc.
|
||||
A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
|
||||
and _PyObject_DebugRealloc.
|
||||
This is a big-endian size_t.
|
||||
If "bad memory" is detected later, the serial number gives an
|
||||
excellent way to set a breakpoint on the next run, to capture the
|
||||
instant at which this block was passed out.
|
||||
*/
|
||||
|
||||
static void *
|
||||
_PyMem_DebugMalloc(void *ctx, size_t nbytes)
|
||||
/* debug replacements for the PyMem_* memory API */
|
||||
void *
|
||||
_PyMem_DebugMalloc(size_t nbytes)
|
||||
{
|
||||
return _PyObject_DebugMallocApi(_PYMALLOC_MEM_ID, nbytes);
|
||||
}
|
||||
void *
|
||||
_PyMem_DebugRealloc(void *p, size_t nbytes)
|
||||
{
|
||||
return _PyObject_DebugReallocApi(_PYMALLOC_MEM_ID, p, nbytes);
|
||||
}
|
||||
void
|
||||
_PyMem_DebugFree(void *p)
|
||||
{
|
||||
_PyObject_DebugFreeApi(_PYMALLOC_MEM_ID, p);
|
||||
}
|
||||
|
||||
/* debug replacements for the PyObject_* memory API */
|
||||
void *
|
||||
_PyObject_DebugMalloc(size_t nbytes)
|
||||
{
|
||||
return _PyObject_DebugMallocApi(_PYMALLOC_OBJ_ID, nbytes);
|
||||
}
|
||||
void *
|
||||
_PyObject_DebugRealloc(void *p, size_t nbytes)
|
||||
{
|
||||
return _PyObject_DebugReallocApi(_PYMALLOC_OBJ_ID, p, nbytes);
|
||||
}
|
||||
void
|
||||
_PyObject_DebugFree(void *p)
|
||||
{
|
||||
_PyObject_DebugFreeApi(_PYMALLOC_OBJ_ID, p);
|
||||
}
|
||||
void
|
||||
_PyObject_DebugCheckAddress(const void *p)
|
||||
{
|
||||
_PyObject_DebugCheckAddressApi(_PYMALLOC_OBJ_ID, p);
|
||||
}
|
||||
|
||||
|
||||
/* generic debug memory api, with an "id" to identify the API in use */
|
||||
void *
|
||||
_PyObject_DebugMallocApi(char id, size_t nbytes)
|
||||
{
|
||||
debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
|
||||
uchar *p; /* base address of malloc'ed block */
|
||||
uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
|
||||
size_t total; /* nbytes + 4*SST */
|
||||
|
@ -1717,14 +1492,14 @@ _PyMem_DebugMalloc(void *ctx, size_t nbytes)
|
|||
/* overflow: can't represent total as a size_t */
|
||||
return NULL;
|
||||
|
||||
p = (uchar *)api->alloc.malloc(api->alloc.ctx, total);
|
||||
p = (uchar *)PyObject_Malloc(total);
|
||||
if (p == NULL)
|
||||
return NULL;
|
||||
|
||||
/* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
|
||||
write_size_t(p, nbytes);
|
||||
p[SST] = (uchar)api->api_id;
|
||||
memset(p + SST + 1, FORBIDDENBYTE, SST-1);
|
||||
p[SST] = (uchar)id;
|
||||
memset(p + SST + 1 , FORBIDDENBYTE, SST-1);
|
||||
|
||||
if (nbytes > 0)
|
||||
memset(p + 2*SST, CLEANBYTE, nbytes);
|
||||
|
@ -1742,27 +1517,25 @@ _PyMem_DebugMalloc(void *ctx, size_t nbytes)
|
|||
Then fills the original bytes with DEADBYTE.
|
||||
Then calls the underlying free.
|
||||
*/
|
||||
static void
|
||||
_PyMem_DebugFree(void *ctx, void *p)
|
||||
void
|
||||
_PyObject_DebugFreeApi(char api, void *p)
|
||||
{
|
||||
debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
|
||||
uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
|
||||
size_t nbytes;
|
||||
|
||||
if (p == NULL)
|
||||
return;
|
||||
_PyMem_DebugCheckAddress(api->api_id, p);
|
||||
_PyObject_DebugCheckAddressApi(api, p);
|
||||
nbytes = read_size_t(q);
|
||||
nbytes += 4*SST;
|
||||
if (nbytes > 0)
|
||||
memset(q, DEADBYTE, nbytes);
|
||||
api->alloc.free(api->alloc.ctx, q);
|
||||
PyObject_Free(q);
|
||||
}
|
||||
|
||||
static void *
|
||||
_PyMem_DebugRealloc(void *ctx, void *p, size_t nbytes)
|
||||
void *
|
||||
_PyObject_DebugReallocApi(char api, void *p, size_t nbytes)
|
||||
{
|
||||
debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
|
||||
uchar *q = (uchar *)p;
|
||||
uchar *tail;
|
||||
size_t total; /* nbytes + 4*SST */
|
||||
|
@ -1770,9 +1543,9 @@ _PyMem_DebugRealloc(void *ctx, void *p, size_t nbytes)
|
|||
int i;
|
||||
|
||||
if (p == NULL)
|
||||
return _PyMem_DebugMalloc(ctx, nbytes);
|
||||
return _PyObject_DebugMallocApi(api, nbytes);
|
||||
|
||||
_PyMem_DebugCheckAddress(api->api_id, p);
|
||||
_PyObject_DebugCheckAddressApi(api, p);
|
||||
bumpserialno();
|
||||
original_nbytes = read_size_t(q - 2*SST);
|
||||
total = nbytes + 4*SST;
|
||||
|
@ -1789,12 +1562,12 @@ _PyMem_DebugRealloc(void *ctx, void *p, size_t nbytes)
|
|||
* case we didn't get the chance to mark the old memory with DEADBYTE,
|
||||
* but we live with that.
|
||||
*/
|
||||
q = (uchar *)api->alloc.realloc(api->alloc.ctx, q - 2*SST, total);
|
||||
q = (uchar *)PyObject_Realloc(q - 2*SST, total);
|
||||
if (q == NULL)
|
||||
return NULL;
|
||||
|
||||
write_size_t(q, nbytes);
|
||||
assert(q[SST] == (uchar)api->api_id);
|
||||
assert(q[SST] == (uchar)api);
|
||||
for (i = 1; i < SST; ++i)
|
||||
assert(q[SST + i] == FORBIDDENBYTE);
|
||||
q += 2*SST;
|
||||
|
@ -1816,8 +1589,8 @@ _PyMem_DebugRealloc(void *ctx, void *p, size_t nbytes)
|
|||
* and call Py_FatalError to kill the program.
|
||||
* The API id, is also checked.
|
||||
*/
|
||||
static void
|
||||
_PyMem_DebugCheckAddress(char api, const void *p)
|
||||
void
|
||||
_PyObject_DebugCheckAddressApi(char api, const void *p)
|
||||
{
|
||||
const uchar *q = (const uchar *)p;
|
||||
char msgbuf[64];
|
||||
|
@ -1869,7 +1642,7 @@ error:
|
|||
}
|
||||
|
||||
/* Display info to stderr about the memory block at p. */
|
||||
static void
|
||||
void
|
||||
_PyObject_DebugDumpAddress(const void *p)
|
||||
{
|
||||
const uchar *q = (const uchar *)p;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue