mirror of
https://github.com/python/cpython.git
synced 2025-08-04 17:08:35 +00:00
bpo-45340: Don't create object dictionaries unless actually needed (GH-28802)
* Never change types' cached keys. It could invalidate inline attribute objects. * Lazily create object dictionaries. * Update specialization of LOAD/STORE_ATTR. * Don't update shared keys version for deletion of value. * Update gdb support to handle instance values. * Rename SPLIT_KEYS opcodes to INSTANCE_VALUE.
This commit is contained in:
parent
97308dfcdc
commit
a8b9350964
18 changed files with 721 additions and 400 deletions
|
@ -634,7 +634,7 @@ new_values(Py_ssize_t size)
|
|||
|
||||
/* Consumes a reference to the keys object */
|
||||
static PyObject *
|
||||
new_dict(PyDictKeysObject *keys, PyDictValues *values)
|
||||
new_dict(PyDictKeysObject *keys, PyDictValues *values, Py_ssize_t used, int free_values_on_failure)
|
||||
{
|
||||
PyDictObject *mp;
|
||||
assert(keys != NULL);
|
||||
|
@ -653,7 +653,7 @@ new_dict(PyDictKeysObject *keys, PyDictValues *values)
|
|||
mp = PyObject_GC_New(PyDictObject, &PyDict_Type);
|
||||
if (mp == NULL) {
|
||||
dictkeys_decref(keys);
|
||||
if (values != empty_values) {
|
||||
if (free_values_on_failure) {
|
||||
free_values(values);
|
||||
}
|
||||
return NULL;
|
||||
|
@ -661,12 +661,18 @@ new_dict(PyDictKeysObject *keys, PyDictValues *values)
|
|||
}
|
||||
mp->ma_keys = keys;
|
||||
mp->ma_values = values;
|
||||
mp->ma_used = 0;
|
||||
mp->ma_used = used;
|
||||
mp->ma_version_tag = DICT_NEXT_VERSION();
|
||||
ASSERT_CONSISTENT(mp);
|
||||
return (PyObject *)mp;
|
||||
}
|
||||
|
||||
static inline Py_ssize_t
|
||||
shared_keys_usable_size(PyDictKeysObject *keys)
|
||||
{
|
||||
return keys->dk_nentries + keys->dk_usable;
|
||||
}
|
||||
|
||||
/* Consumes a reference to the keys object */
|
||||
static PyObject *
|
||||
new_dict_with_shared_keys(PyDictKeysObject *keys)
|
||||
|
@ -674,7 +680,7 @@ new_dict_with_shared_keys(PyDictKeysObject *keys)
|
|||
PyDictValues *values;
|
||||
Py_ssize_t i, size;
|
||||
|
||||
size = USABLE_FRACTION(DK_SIZE(keys));
|
||||
size = shared_keys_usable_size(keys);
|
||||
values = new_values(size);
|
||||
if (values == NULL) {
|
||||
dictkeys_decref(keys);
|
||||
|
@ -684,7 +690,7 @@ new_dict_with_shared_keys(PyDictKeysObject *keys)
|
|||
for (i = 0; i < size; i++) {
|
||||
values->values[i] = NULL;
|
||||
}
|
||||
return new_dict(keys, values);
|
||||
return new_dict(keys, values, 0, 1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -733,7 +739,7 @@ PyObject *
|
|||
PyDict_New(void)
|
||||
{
|
||||
dictkeys_incref(Py_EMPTY_KEYS);
|
||||
return new_dict(Py_EMPTY_KEYS, empty_values);
|
||||
return new_dict(Py_EMPTY_KEYS, empty_values, 0, 0);
|
||||
}
|
||||
|
||||
/* Search index of hash table from offset of entry table */
|
||||
|
@ -998,6 +1004,40 @@ insertion_resize(PyDictObject *mp)
|
|||
return dictresize(mp, calculate_log2_keysize(GROWTH_RATE(mp)));
|
||||
}
|
||||
|
||||
static int
|
||||
insert_into_dictkeys(PyDictKeysObject *keys, PyObject *name)
|
||||
{
|
||||
assert(PyUnicode_CheckExact(name));
|
||||
Py_hash_t hash = ((PyASCIIObject *)name)->hash;
|
||||
if (hash == -1) {
|
||||
hash = PyUnicode_Type.tp_hash(name);
|
||||
if (hash == -1) {
|
||||
PyErr_Clear();
|
||||
return DKIX_EMPTY;
|
||||
}
|
||||
}
|
||||
Py_ssize_t ix = dictkeys_stringlookup(keys, name, hash);
|
||||
if (ix == DKIX_EMPTY) {
|
||||
if (keys->dk_usable <= 0) {
|
||||
return DKIX_EMPTY;
|
||||
}
|
||||
Py_INCREF(name);
|
||||
/* Insert into new slot. */
|
||||
keys->dk_version = 0;
|
||||
Py_ssize_t hashpos = find_empty_slot(keys, hash);
|
||||
ix = keys->dk_nentries;
|
||||
PyDictKeyEntry *ep = &DK_ENTRIES(keys)[ix];
|
||||
dictkeys_set_index(keys, hashpos, ix);
|
||||
assert(ep->me_key == NULL);
|
||||
ep->me_key = name;
|
||||
ep->me_hash = hash;
|
||||
keys->dk_usable--;
|
||||
keys->dk_nentries++;
|
||||
}
|
||||
assert (ix < SHARED_KEYS_MAX_SIZE);
|
||||
return (int)ix;
|
||||
}
|
||||
|
||||
/*
|
||||
Internal routine to insert a new item into the table.
|
||||
Used both by the internal resize routine and by the public insert routine.
|
||||
|
@ -1043,7 +1083,7 @@ insertdict(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject *value)
|
|||
Py_ssize_t index = mp->ma_keys->dk_nentries;
|
||||
assert(index < SHARED_KEYS_MAX_SIZE);
|
||||
assert((mp->ma_values->mv_order >> 60) == 0);
|
||||
mp->ma_values->mv_order = (mp->ma_values->mv_order)<<4 | index;
|
||||
mp->ma_values->mv_order = ((mp->ma_values->mv_order)<<4) | index;
|
||||
assert (mp->ma_values->values[index] == NULL);
|
||||
mp->ma_values->values[index] = value;
|
||||
}
|
||||
|
@ -1144,8 +1184,7 @@ actually be smaller than the old one.
|
|||
If a table is split (its keys and hashes are shared, its values are not),
|
||||
then the values are temporarily copied into the table, it is resized as
|
||||
a combined table, then the me_value slots in the old table are NULLed out.
|
||||
After resizing a table is always combined,
|
||||
but can be resplit by make_keys_shared().
|
||||
After resizing a table is always combined.
|
||||
*/
|
||||
static int
|
||||
dictresize(PyDictObject *mp, uint8_t log2_newsize)
|
||||
|
@ -1186,19 +1225,16 @@ dictresize(PyDictObject *mp, uint8_t log2_newsize)
|
|||
if (oldvalues != NULL) {
|
||||
/* Convert split table into new combined table.
|
||||
* We must incref keys; we can transfer values.
|
||||
* Note that values of split table is always dense.
|
||||
*/
|
||||
for (Py_ssize_t i = 0; i < numentries; i++) {
|
||||
int index = oldvalues->mv_order >> ((numentries-1-i)*4) & 15;
|
||||
assert(oldvalues->values[index] != NULL);
|
||||
int index = get_index_from_order(mp, i);
|
||||
PyDictKeyEntry *ep = &oldentries[index];
|
||||
PyObject *key = ep->me_key;
|
||||
Py_INCREF(key);
|
||||
newentries[i].me_key = key;
|
||||
assert(oldvalues->values[index] != NULL);
|
||||
Py_INCREF(ep->me_key);
|
||||
newentries[i].me_key = ep->me_key;
|
||||
newentries[i].me_hash = ep->me_hash;
|
||||
newentries[i].me_value = oldvalues->values[index];
|
||||
}
|
||||
|
||||
dictkeys_decref(oldkeys);
|
||||
mp->ma_values = NULL;
|
||||
if (oldvalues != empty_values) {
|
||||
|
@ -1241,69 +1277,8 @@ dictresize(PyDictObject *mp, uint8_t log2_newsize)
|
|||
build_indices(mp->ma_keys, newentries, numentries);
|
||||
mp->ma_keys->dk_usable -= numentries;
|
||||
mp->ma_keys->dk_nentries = numentries;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Returns NULL if unable to split table.
|
||||
* A NULL return does not necessarily indicate an error */
|
||||
static PyDictKeysObject *
|
||||
make_keys_shared(PyObject *op)
|
||||
{
|
||||
Py_ssize_t i;
|
||||
Py_ssize_t size;
|
||||
PyDictObject *mp = (PyDictObject *)op;
|
||||
|
||||
if (!PyDict_CheckExact(op))
|
||||
return NULL;
|
||||
if (mp->ma_used > SHARED_KEYS_MAX_SIZE) {
|
||||
return NULL;
|
||||
}
|
||||
if (!_PyDict_HasSplitTable(mp)) {
|
||||
PyDictKeyEntry *ep0;
|
||||
PyDictValues *values;
|
||||
assert(mp->ma_keys->dk_refcnt == 1);
|
||||
if (mp->ma_keys->dk_kind == DICT_KEYS_GENERAL) {
|
||||
return NULL;
|
||||
}
|
||||
else if (mp->ma_used > mp->ma_keys->dk_nentries) {
|
||||
/* Remove dummy keys */
|
||||
if (dictresize(mp, DK_LOG_SIZE(mp->ma_keys)))
|
||||
return NULL;
|
||||
}
|
||||
assert(mp->ma_used == mp->ma_keys->dk_nentries);
|
||||
/* Copy values into a new array */
|
||||
ep0 = DK_ENTRIES(mp->ma_keys);
|
||||
size = USABLE_FRACTION(DK_SIZE(mp->ma_keys));
|
||||
values = new_values(size);
|
||||
if (values == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError,
|
||||
"Not enough memory to allocate new values array");
|
||||
return NULL;
|
||||
}
|
||||
uint64_t order = 0;
|
||||
for (i = 0; i < mp->ma_used; i++) {
|
||||
order <<= 4;
|
||||
order |= i;
|
||||
assert(ep0[i].me_value != NULL);
|
||||
values->values[i] = ep0[i].me_value;
|
||||
ep0[i].me_value = NULL;
|
||||
}
|
||||
values->mv_order = order;
|
||||
for (; i < size; i++) {
|
||||
assert(ep0[i].me_value == NULL);
|
||||
values->values[i] = NULL;
|
||||
ep0[i].me_value = NULL;
|
||||
}
|
||||
if (mp->ma_keys->dk_nentries + mp->ma_keys->dk_usable > SHARED_KEYS_MAX_SIZE) {
|
||||
assert(mp->ma_keys->dk_nentries <= SHARED_KEYS_MAX_SIZE);
|
||||
mp->ma_keys->dk_usable = SHARED_KEYS_MAX_SIZE - mp->ma_keys->dk_nentries;
|
||||
}
|
||||
mp->ma_keys->dk_kind = DICT_KEYS_SPLIT;
|
||||
mp->ma_values = values;
|
||||
}
|
||||
dictkeys_incref(mp->ma_keys);
|
||||
ASSERT_CONSISTENT(mp);
|
||||
return mp->ma_keys;
|
||||
return 0;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
|
@ -1331,7 +1306,7 @@ _PyDict_NewPresized(Py_ssize_t minused)
|
|||
new_keys = new_keys_object(log2_newsize);
|
||||
if (new_keys == NULL)
|
||||
return NULL;
|
||||
return new_dict(new_keys, NULL);
|
||||
return new_dict(new_keys, NULL, 0, 0);
|
||||
}
|
||||
|
||||
/* Note that, for historical reasons, PyDict_GetItem() suppresses all errors
|
||||
|
@ -1503,6 +1478,9 @@ _PyDict_GetItemStringWithError(PyObject *v, const char *key)
|
|||
/* Fast version of global value lookup (LOAD_GLOBAL).
|
||||
* Lookup in globals, then builtins.
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
* Raise an exception and return NULL if an error occurred (ex: computing the
|
||||
* key hash failed, key comparison failed, ...). Return NULL if the key doesn't
|
||||
* exist. Return the value if the key exists.
|
||||
|
@ -1590,6 +1568,21 @@ _PyDict_SetItem_KnownHash(PyObject *op, PyObject *key, PyObject *value,
|
|||
return insertdict(mp, key, hash, value);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
delete_index_from_order(uint64_t order, Py_ssize_t ix)
|
||||
{ /* Update order */
|
||||
for (int i = 0;; i+= 4) {
|
||||
assert (i < 64);
|
||||
if (((order >> i) & 15) == (uint64_t)ix) {
|
||||
/* Remove 4 bits at ith position */
|
||||
uint64_t high = ((order>>i)>>4)<<i;
|
||||
uint64_t low = order & ((((uint64_t)1)<<i)-1);
|
||||
return high | low;
|
||||
}
|
||||
}
|
||||
Py_UNREACHABLE();
|
||||
}
|
||||
|
||||
static int
|
||||
delitem_common(PyDictObject *mp, Py_hash_t hash, Py_ssize_t ix,
|
||||
PyObject *old_value)
|
||||
|
@ -1601,7 +1594,6 @@ delitem_common(PyDictObject *mp, Py_hash_t hash, Py_ssize_t ix,
|
|||
assert(hashpos >= 0);
|
||||
|
||||
mp->ma_used--;
|
||||
mp->ma_keys->dk_version = 0;
|
||||
mp->ma_version_tag = DICT_NEXT_VERSION();
|
||||
ep = &DK_ENTRIES(mp->ma_keys)[ix];
|
||||
if (mp->ma_values) {
|
||||
|
@ -1609,19 +1601,12 @@ delitem_common(PyDictObject *mp, Py_hash_t hash, Py_ssize_t ix,
|
|||
mp->ma_values->values[ix] = NULL;
|
||||
assert(ix < SHARED_KEYS_MAX_SIZE);
|
||||
/* Update order */
|
||||
for (int i = 0;; i+= 4) {
|
||||
assert (i < 64);
|
||||
if (((mp->ma_values->mv_order >> i) & 15) == (uint64_t)ix) {
|
||||
/* Remove 4 bits at ith position */
|
||||
uint64_t order = mp->ma_values->mv_order;
|
||||
uint64_t high = ((order>>i)>>4)<<i;
|
||||
uint64_t low = order & ((((uint64_t)1)<<i)-1);
|
||||
mp->ma_values->mv_order = high | low;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mp->ma_values->mv_order =
|
||||
delete_index_from_order(mp->ma_values->mv_order, ix);
|
||||
ASSERT_CONSISTENT(mp);
|
||||
}
|
||||
else {
|
||||
mp->ma_keys->dk_version = 0;
|
||||
dictkeys_set_index(mp->ma_keys, hashpos, DKIX_DUMMY);
|
||||
old_key = ep->me_key;
|
||||
ep->me_key = NULL;
|
||||
|
@ -2692,7 +2677,7 @@ PyDict_Copy(PyObject *o)
|
|||
|
||||
if (_PyDict_HasSplitTable(mp)) {
|
||||
PyDictObject *split_copy;
|
||||
Py_ssize_t size = USABLE_FRACTION(DK_SIZE(mp->ma_keys));
|
||||
Py_ssize_t size = shared_keys_usable_size(mp->ma_keys);
|
||||
PyDictValues *newvalues;
|
||||
newvalues = new_values(size);
|
||||
if (newvalues == NULL)
|
||||
|
@ -2740,7 +2725,7 @@ PyDict_Copy(PyObject *o)
|
|||
if (keys == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
PyDictObject *new = (PyDictObject *)new_dict(keys, NULL);
|
||||
PyDictObject *new = (PyDictObject *)new_dict(keys, NULL, 0, 0);
|
||||
if (new == NULL) {
|
||||
/* In case of an error, `new_dict()` takes care of
|
||||
cleaning up `keys`. */
|
||||
|
@ -2979,15 +2964,6 @@ PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *defaultobj)
|
|||
if (ix == DKIX_ERROR)
|
||||
return NULL;
|
||||
|
||||
if (_PyDict_HasSplitTable(mp) &&
|
||||
((ix >= 0 && value == NULL && mp->ma_used != ix) ||
|
||||
(ix == DKIX_EMPTY && mp->ma_used != mp->ma_keys->dk_nentries))) {
|
||||
if (insertion_resize(mp) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
ix = DKIX_EMPTY;
|
||||
}
|
||||
|
||||
if (ix == DKIX_EMPTY) {
|
||||
mp->ma_keys->dk_version = 0;
|
||||
PyDictKeyEntry *ep, *ep0;
|
||||
|
@ -3028,7 +3004,7 @@ PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *defaultobj)
|
|||
else if (value == NULL) {
|
||||
value = defaultobj;
|
||||
assert(_PyDict_HasSplitTable(mp));
|
||||
assert(ix == mp->ma_used);
|
||||
assert(mp->ma_values->values[ix] == NULL);
|
||||
Py_INCREF(value);
|
||||
MAINTAIN_TRACKING(mp, key, value);
|
||||
mp->ma_values->values[ix] = value;
|
||||
|
@ -3204,20 +3180,22 @@ static PyObject *dictiter_new(PyDictObject *, PyTypeObject *);
|
|||
Py_ssize_t
|
||||
_PyDict_SizeOf(PyDictObject *mp)
|
||||
{
|
||||
Py_ssize_t size, usable, res;
|
||||
Py_ssize_t size, res;
|
||||
|
||||
size = DK_SIZE(mp->ma_keys);
|
||||
usable = USABLE_FRACTION(size);
|
||||
|
||||
res = _PyObject_SIZE(Py_TYPE(mp));
|
||||
if (mp->ma_values)
|
||||
res += usable * sizeof(PyObject*);
|
||||
if (mp->ma_values) {
|
||||
res += shared_keys_usable_size(mp->ma_keys) * sizeof(PyObject*);
|
||||
}
|
||||
/* If the dictionary is split, the keys portion is accounted-for
|
||||
in the type object. */
|
||||
if (mp->ma_keys->dk_refcnt == 1)
|
||||
if (mp->ma_keys->dk_refcnt == 1) {
|
||||
Py_ssize_t usable = USABLE_FRACTION(size);
|
||||
res += (sizeof(PyDictKeysObject)
|
||||
+ DK_IXSIZE(mp->ma_keys) * size
|
||||
+ sizeof(PyDictKeyEntry) * usable);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -4919,11 +4897,14 @@ dictvalues_reversed(_PyDictViewObject *dv, PyObject *Py_UNUSED(ignored))
|
|||
PyDictKeysObject *
|
||||
_PyDict_NewKeysForClass(void)
|
||||
{
|
||||
PyDictKeysObject *keys = new_keys_object(PyDict_LOG_MINSIZE);
|
||||
PyDictKeysObject *keys = new_keys_object(5); /* log2(32) */
|
||||
if (keys == NULL) {
|
||||
PyErr_Clear();
|
||||
}
|
||||
else {
|
||||
assert(keys->dk_nentries == 0);
|
||||
/* Set to max size+1 as it will shrink by one before each new object */
|
||||
keys->dk_usable = SHARED_KEYS_MAX_SIZE;
|
||||
keys->dk_kind = DICT_KEYS_SPLIT;
|
||||
}
|
||||
return keys;
|
||||
|
@ -4931,15 +4912,42 @@ _PyDict_NewKeysForClass(void)
|
|||
|
||||
#define CACHED_KEYS(tp) (((PyHeapTypeObject*)tp)->ht_cached_keys)
|
||||
|
||||
static int
|
||||
init_inline_values(PyObject *obj, PyTypeObject *tp)
|
||||
{
|
||||
assert(tp->tp_flags & Py_TPFLAGS_HEAPTYPE);
|
||||
assert(tp->tp_dictoffset > 0);
|
||||
assert(tp->tp_inline_values_offset > 0);
|
||||
PyDictKeysObject *keys = CACHED_KEYS(tp);
|
||||
assert(keys != NULL);
|
||||
if (keys->dk_usable > 1) {
|
||||
keys->dk_usable--;
|
||||
}
|
||||
Py_ssize_t size = shared_keys_usable_size(keys);
|
||||
assert(size > 0);
|
||||
PyDictValues *values = new_values(size);
|
||||
if (values == NULL) {
|
||||
PyErr_NoMemory();
|
||||
return -1;
|
||||
}
|
||||
values->mv_order = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
values->values[i] = NULL;
|
||||
}
|
||||
*((PyDictValues **)((char *)obj + tp->tp_inline_values_offset)) = values;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
_PyObject_InitializeDict(PyObject *obj)
|
||||
{
|
||||
PyObject **dictptr = _PyObject_GetDictPtr(obj);
|
||||
if (dictptr == NULL) {
|
||||
PyTypeObject *tp = Py_TYPE(obj);
|
||||
if (tp->tp_dictoffset == 0) {
|
||||
return 0;
|
||||
}
|
||||
assert(*dictptr == NULL);
|
||||
PyTypeObject *tp = Py_TYPE(obj);
|
||||
if (tp->tp_inline_values_offset) {
|
||||
return init_inline_values(obj, tp);
|
||||
}
|
||||
PyObject *dict;
|
||||
if (_PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE) && CACHED_KEYS(tp)) {
|
||||
dictkeys_incref(CACHED_KEYS(tp));
|
||||
|
@ -4951,15 +4959,174 @@ _PyObject_InitializeDict(PyObject *obj)
|
|||
if (dict == NULL) {
|
||||
return -1;
|
||||
}
|
||||
PyObject **dictptr = _PyObject_DictPointer(obj);
|
||||
*dictptr = dict;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
make_dict_from_instance_attributes(PyDictKeysObject *keys, PyDictValues *values)
|
||||
{
|
||||
dictkeys_incref(keys);
|
||||
Py_ssize_t used = 0;
|
||||
Py_ssize_t track = 0;
|
||||
for (Py_ssize_t i = 0; i < shared_keys_usable_size(keys); i++) {
|
||||
PyObject *val = values->values[i];
|
||||
if (val != NULL) {
|
||||
used += 1;
|
||||
track += _PyObject_GC_MAY_BE_TRACKED(val);
|
||||
}
|
||||
}
|
||||
PyObject *res = new_dict(keys, values, used, 0);
|
||||
if (track && res) {
|
||||
_PyObject_GC_TRACK(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyObject_MakeDictFromInstanceAttributes(PyObject *obj, PyDictValues *values)
|
||||
{
|
||||
assert(Py_TYPE(obj)->tp_inline_values_offset != 0);
|
||||
PyDictKeysObject *keys = CACHED_KEYS(Py_TYPE(obj));
|
||||
return make_dict_from_instance_attributes(keys, values);
|
||||
}
|
||||
|
||||
int
|
||||
_PyObject_StoreInstanceAttribute(PyObject *obj, PyDictValues *values,
|
||||
PyObject *name, PyObject *value)
|
||||
{
|
||||
assert(PyUnicode_CheckExact(name));
|
||||
PyTypeObject *tp = Py_TYPE(obj);
|
||||
PyDictKeysObject *keys = CACHED_KEYS(Py_TYPE(obj));
|
||||
assert(keys != NULL);
|
||||
assert(values != NULL);
|
||||
int ix = insert_into_dictkeys(keys, name);
|
||||
if (ix == DKIX_EMPTY) {
|
||||
if (value == NULL) {
|
||||
PyErr_SetObject(PyExc_AttributeError, name);
|
||||
return -1;
|
||||
}
|
||||
PyObject *dict = make_dict_from_instance_attributes(keys, values);
|
||||
if (dict == NULL) {
|
||||
return -1;
|
||||
}
|
||||
*((PyDictValues **)((char *)obj + tp->tp_inline_values_offset)) = NULL;
|
||||
*((PyObject **) ((char *)obj + tp->tp_dictoffset)) = dict;
|
||||
return PyDict_SetItem(dict, name, value);
|
||||
}
|
||||
PyObject *old_value = values->values[ix];
|
||||
Py_XINCREF(value);
|
||||
values->values[ix] = value;
|
||||
if (old_value == NULL) {
|
||||
if (value == NULL) {
|
||||
PyErr_SetObject(PyExc_AttributeError, name);
|
||||
return -1;
|
||||
}
|
||||
values->mv_order = (values->mv_order << 4) | ix;
|
||||
}
|
||||
else {
|
||||
if (value == NULL) {
|
||||
values->mv_order = delete_index_from_order(values->mv_order, ix);
|
||||
}
|
||||
Py_DECREF(old_value);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
PyObject *
|
||||
_PyObject_GetInstanceAttribute(PyObject *obj, PyDictValues *values,
|
||||
PyObject *name)
|
||||
{
|
||||
assert(PyUnicode_CheckExact(name));
|
||||
PyDictKeysObject *keys = CACHED_KEYS(Py_TYPE(obj));
|
||||
assert(keys != NULL);
|
||||
Py_ssize_t ix = _PyDictKeys_StringLookup(keys, name);
|
||||
if (ix == DKIX_EMPTY) {
|
||||
return NULL;
|
||||
}
|
||||
PyObject *value = values->values[ix];
|
||||
Py_XINCREF(value);
|
||||
return value;
|
||||
}
|
||||
|
||||
int
|
||||
_PyObject_IsInstanceDictEmpty(PyObject *obj)
|
||||
{
|
||||
PyTypeObject *tp = Py_TYPE(obj);
|
||||
if (tp->tp_dictoffset == 0) {
|
||||
return 1;
|
||||
}
|
||||
PyDictValues **values_ptr = _PyObject_ValuesPointer(obj);
|
||||
if (values_ptr && *values_ptr) {
|
||||
PyDictKeysObject *keys = CACHED_KEYS(tp);
|
||||
for (Py_ssize_t i = 0; i < keys->dk_nentries; i++) {
|
||||
if ((*values_ptr)->values[i] != NULL) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
PyObject **dictptr = _PyObject_DictPointer(obj);
|
||||
PyObject *dict = *dictptr;
|
||||
if (dict == NULL) {
|
||||
return 1;
|
||||
}
|
||||
return ((PyDictObject *)dict)->ma_used == 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
_PyObject_VisitInstanceAttributes(PyObject *self, visitproc visit, void *arg)
|
||||
{
|
||||
PyTypeObject *tp = Py_TYPE(self);
|
||||
assert(tp->tp_inline_values_offset);
|
||||
PyDictValues **values_ptr = _PyObject_ValuesPointer(self);
|
||||
if (*values_ptr == NULL) {
|
||||
return 0;
|
||||
}
|
||||
PyDictKeysObject *keys = CACHED_KEYS(tp);
|
||||
for (Py_ssize_t i = 0; i < keys->dk_nentries; i++) {
|
||||
Py_VISIT((*values_ptr)->values[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
_PyObject_ClearInstanceAttributes(PyObject *self)
|
||||
{
|
||||
PyTypeObject *tp = Py_TYPE(self);
|
||||
assert(tp->tp_inline_values_offset);
|
||||
PyDictValues **values_ptr = _PyObject_ValuesPointer(self);
|
||||
if (*values_ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
PyDictKeysObject *keys = CACHED_KEYS(tp);
|
||||
for (Py_ssize_t i = 0; i < keys->dk_nentries; i++) {
|
||||
Py_CLEAR((*values_ptr)->values[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
_PyObject_FreeInstanceAttributes(PyObject *self)
|
||||
{
|
||||
PyTypeObject *tp = Py_TYPE(self);
|
||||
assert(tp->tp_inline_values_offset);
|
||||
PyDictValues **values_ptr = _PyObject_ValuesPointer(self);
|
||||
if (*values_ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
PyDictKeysObject *keys = CACHED_KEYS(tp);
|
||||
for (Py_ssize_t i = 0; i < keys->dk_nentries; i++) {
|
||||
Py_XDECREF((*values_ptr)->values[i]);
|
||||
}
|
||||
free_values(*values_ptr);
|
||||
}
|
||||
|
||||
PyObject *
|
||||
PyObject_GenericGetDict(PyObject *obj, void *context)
|
||||
{
|
||||
PyObject **dictptr = _PyObject_GetDictPtr(obj);
|
||||
PyObject **dictptr = _PyObject_DictPointer(obj);
|
||||
if (dictptr == NULL) {
|
||||
PyErr_SetString(PyExc_AttributeError,
|
||||
"This object has no __dict__");
|
||||
|
@ -4968,7 +5135,14 @@ PyObject_GenericGetDict(PyObject *obj, void *context)
|
|||
PyObject *dict = *dictptr;
|
||||
if (dict == NULL) {
|
||||
PyTypeObject *tp = Py_TYPE(obj);
|
||||
if (_PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE) && CACHED_KEYS(tp)) {
|
||||
PyDictValues **values_ptr = _PyObject_ValuesPointer(obj);
|
||||
if (values_ptr && *values_ptr) {
|
||||
*dictptr = dict = make_dict_from_instance_attributes(CACHED_KEYS(tp), *values_ptr);
|
||||
if (dict != NULL) {
|
||||
*values_ptr = NULL;
|
||||
}
|
||||
}
|
||||
else if (_PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE) && CACHED_KEYS(tp)) {
|
||||
dictkeys_incref(CACHED_KEYS(tp));
|
||||
*dictptr = dict = new_dict_with_shared_keys(CACHED_KEYS(tp));
|
||||
}
|
||||
|
@ -5003,37 +5177,7 @@ _PyObjectDict_SetItem(PyTypeObject *tp, PyObject **dictptr,
|
|||
res = PyDict_DelItem(dict, key);
|
||||
}
|
||||
else {
|
||||
int was_shared = (cached == ((PyDictObject *)dict)->ma_keys);
|
||||
res = PyDict_SetItem(dict, key, value);
|
||||
if (was_shared &&
|
||||
(cached = CACHED_KEYS(tp)) != NULL &&
|
||||
cached != ((PyDictObject *)dict)->ma_keys &&
|
||||
cached->dk_nentries <= SHARED_KEYS_MAX_SIZE) {
|
||||
/* PyDict_SetItem() may call dictresize and convert split table
|
||||
* into combined table. In such case, convert it to split
|
||||
* table again and update type's shared key only when this is
|
||||
* the only dict sharing key with the type.
|
||||
*
|
||||
* This is to allow using shared key in class like this:
|
||||
*
|
||||
* class C:
|
||||
* def __init__(self):
|
||||
* # one dict resize happens
|
||||
* self.a, self.b, self.c = 1, 2, 3
|
||||
* self.d, self.e, self.f = 4, 5, 6
|
||||
* a = C()
|
||||
*/
|
||||
if (cached->dk_refcnt == 1) {
|
||||
PyDictKeysObject *new_cached = make_keys_shared(dict);
|
||||
if (new_cached != NULL) {
|
||||
CACHED_KEYS(tp) = new_cached;
|
||||
dictkeys_decref(cached);
|
||||
}
|
||||
else if (PyErr_Occurred()) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dict = *dictptr;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue