gh-115999: Add free-threaded specialization for FOR_ITER (#128798)

Add free-threaded versions of existing specialization for FOR_ITER (list, tuples, fast range iterators and generators), without significantly affecting their thread-safety. (Iterating over shared lists/tuples/ranges should be fine like before. Reusing iterators between threads is not fine, like before. Sharing generators between threads is a recipe for significant crashes, like before.)
This commit is contained in:
T. Wouters 2025-03-12 16:21:46 +01:00 committed by GitHub
parent db27aee2fe
commit de2f7da77d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 469 additions and 125 deletions

View file

@ -3005,7 +3005,7 @@ dummy_func(
};
specializing op(_SPECIALIZE_FOR_ITER, (counter/1, iter -- iter)) {
#if ENABLE_SPECIALIZATION
#if ENABLE_SPECIALIZATION_FT
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_Py_Specialize_ForIter(iter, next_instr, oparg);
@ -3013,7 +3013,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(FOR_ITER);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
#endif /* ENABLE_SPECIALIZATION_FT */
}
replaced op(_FOR_ITER, (iter -- iter, next)) {
@ -3091,31 +3091,46 @@ dummy_func(
op(_ITER_CHECK_LIST, (iter -- iter)) {
EXIT_IF(Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyListIter_Type);
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
EXIT_IF(Py_TYPE(iter_o) != &PyListIter_Type);
#ifdef Py_GIL_DISABLED
EXIT_IF(!_PyObject_IsUniquelyReferenced(iter_o));
_PyListIterObject *it = (_PyListIterObject *)iter_o;
EXIT_IF(!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) ||
!_PyObject_GC_IS_SHARED(it->it_seq));
#endif
}
replaced op(_ITER_JUMP_LIST, (iter -- iter)) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyListIterObject *it = (_PyListIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyListIter_Type);
// For free-threaded Python, the loop exit can happen at any point during
// item retrieval, so it doesn't make much sense to check and jump
// separately before item retrieval. Any length check we do here can be
// invalid by the time we actually try to fetch the item.
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
(void)iter_o;
#else
_PyListIterObject *it = (_PyListIterObject *)iter_o;
STAT_INC(FOR_ITER, hit);
PyListObject *seq = it->it_seq;
if (seq == NULL || (size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) {
it->it_index = -1;
#ifndef Py_GIL_DISABLED
if (seq != NULL) {
it->it_seq = NULL;
Py_DECREF(seq);
}
#endif
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
}
#endif
}
// Only used by Tier 2
op(_GUARD_NOT_EXHAUSTED_LIST, (iter -- iter)) {
#ifndef Py_GIL_DISABLED
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyListIterObject *it = (_PyListIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyListIter_Type);
@ -3125,16 +3140,62 @@ dummy_func(
it->it_index = -1;
EXIT_IF(1);
}
#endif
}
op(_ITER_NEXT_LIST, (iter -- iter, next)) {
replaced op(_ITER_NEXT_LIST, (iter -- iter, next)) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyListIterObject *it = (_PyListIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyListIter_Type);
PyListObject *seq = it->it_seq;
assert(seq);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
_PyObject_GC_IS_SHARED(seq));
STAT_INC(FOR_ITER, hit);
int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
// A negative result means we lost a race with another thread
// and we need to take the slow path.
DEOPT_IF(result < 0);
if (result == 0) {
it->it_index = -1;
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
}
it->it_index++;
#else
assert(it->it_index < PyList_GET_SIZE(seq));
next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
#endif
}
// Only used by Tier 2
op(_ITER_NEXT_LIST_TIER_TWO, (iter -- iter, next)) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyListIterObject *it = (_PyListIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyListIter_Type);
PyListObject *seq = it->it_seq;
assert(seq);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
_PyObject_GC_IS_SHARED(seq));
STAT_INC(FOR_ITER, hit);
int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
// A negative result means we lost a race with another thread
// and we need to take the slow path.
EXIT_IF(result < 0);
if (result == 0) {
it->it_index = -1;
EXIT_IF(1);
}
it->it_index++;
#else
assert(it->it_index < PyList_GET_SIZE(seq));
next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
#endif
}
macro(FOR_ITER_LIST) =
@ -3144,20 +3205,30 @@ dummy_func(
_ITER_NEXT_LIST;
op(_ITER_CHECK_TUPLE, (iter -- iter)) {
EXIT_IF(Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyTupleIter_Type);
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
EXIT_IF(Py_TYPE(iter_o) != &PyTupleIter_Type);
#ifdef Py_GIL_DISABLED
EXIT_IF(!_PyObject_IsUniquelyReferenced(iter_o));
#endif
}
replaced op(_ITER_JUMP_TUPLE, (iter -- iter)) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
(void)iter_o;
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
#endif
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
STAT_INC(FOR_ITER, hit);
PyTupleObject *seq = it->it_seq;
if (seq == NULL || it->it_index >= PyTuple_GET_SIZE(seq)) {
if (seq == NULL || (size_t)it->it_index >= (size_t)PyTuple_GET_SIZE(seq)) {
#ifndef Py_GIL_DISABLED
if (seq != NULL) {
it->it_seq = NULL;
Py_DECREF(seq);
}
#endif
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
@ -3169,6 +3240,9 @@ dummy_func(
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
#endif
PyTupleObject *seq = it->it_seq;
EXIT_IF(seq == NULL);
EXIT_IF(it->it_index >= PyTuple_GET_SIZE(seq));
@ -3179,6 +3253,9 @@ dummy_func(
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
PyTupleObject *seq = it->it_seq;
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
#endif
assert(seq);
assert(it->it_index < PyTuple_GET_SIZE(seq));
next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, it->it_index++));
@ -3193,11 +3270,17 @@ dummy_func(
op(_ITER_CHECK_RANGE, (iter -- iter)) {
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
EXIT_IF(Py_TYPE(r) != &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
EXIT_IF(!_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
}
replaced op(_ITER_JUMP_RANGE, (iter -- iter)) {
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
STAT_INC(FOR_ITER, hit);
if (r->len <= 0) {
// Jump over END_FOR instruction.
@ -3216,6 +3299,9 @@ dummy_func(
op(_ITER_NEXT_RANGE, (iter -- iter, next)) {
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
assert(r->len > 0);
long value = r->start;
r->start = value + r->step;
@ -3234,6 +3320,13 @@ dummy_func(
op(_FOR_ITER_GEN_FRAME, (iter -- iter, gen_frame: _PyInterpreterFrame*)) {
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter);
DEOPT_IF(Py_TYPE(gen) != &PyGen_Type);
#ifdef Py_GIL_DISABLED
// Since generators can't be used by multiple threads anyway we
// don't need to deopt here, but this lets us work on making
// generators thread-safe without necessarily having to
// specialize them thread-safely as well.
DEOPT_IF(!_PyObject_IsUniquelyReferenced((PyObject *)gen));
#endif
DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING);
STAT_INC(FOR_ITER, hit);
gen_frame = &gen->gi_iframe;

View file

@ -4171,10 +4171,23 @@
case _ITER_CHECK_LIST: {
_PyStackRef iter;
iter = stack_pointer[-1];
if (Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyListIter_Type) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyListIter_Type) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
_PyListIterObject *it = (_PyListIterObject *)iter_o;
if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) ||
!_PyObject_GC_IS_SHARED(it->it_seq)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#endif
break;
}
@ -4183,6 +4196,7 @@
case _GUARD_NOT_EXHAUSTED_LIST: {
_PyStackRef iter;
iter = stack_pointer[-1];
#ifndef Py_GIL_DISABLED
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyListIterObject *it = (_PyListIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyListIter_Type);
@ -4198,10 +4212,13 @@
JUMP_TO_JUMP_TARGET();
}
}
#endif
break;
}
case _ITER_NEXT_LIST: {
/* _ITER_NEXT_LIST is not a viable micro-op for tier 2 because it is replaced */
case _ITER_NEXT_LIST_TIER_TWO: {
_PyStackRef iter;
_PyStackRef next;
iter = stack_pointer[-1];
@ -4210,8 +4227,32 @@
assert(Py_TYPE(iter_o) == &PyListIter_Type);
PyListObject *seq = it->it_seq;
assert(seq);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
_PyObject_GC_IS_SHARED(seq));
STAT_INC(FOR_ITER, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
stack_pointer = _PyFrame_GetStackPointer(frame);
// A negative result means we lost a race with another thread
// and we need to take the slow path.
if (result < 0) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
if (result == 0) {
it->it_index = -1;
if (1) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
}
it->it_index++;
#else
assert(it->it_index < PyList_GET_SIZE(seq));
next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
#endif
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@ -4221,10 +4262,17 @@
case _ITER_CHECK_TUPLE: {
_PyStackRef iter;
iter = stack_pointer[-1];
if (Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyTupleIter_Type) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyTupleIter_Type) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#endif
break;
}
@ -4236,6 +4284,9 @@
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
#endif
PyTupleObject *seq = it->it_seq;
if (seq == NULL) {
UOP_STAT_INC(uopcode, miss);
@ -4256,6 +4307,9 @@
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
PyTupleObject *seq = it->it_seq;
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
#endif
assert(seq);
assert(it->it_index < PyTuple_GET_SIZE(seq));
next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, it->it_index++));
@ -4273,6 +4327,12 @@
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced((PyObject *)r)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#endif
break;
}
@ -4296,6 +4356,9 @@
iter = stack_pointer[-1];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
assert(r->len > 0);
long value = r->start;
r->start = value + r->step;
@ -4321,6 +4384,16 @@
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#ifdef Py_GIL_DISABLED
// Since generators can't be used by multiple threads anyway we
// don't need to deopt here, but this lets us work on making
// generators thread-safe without necessarily having to
// specialize them thread-safely as well.
if (!_PyObject_IsUniquelyReferenced((PyObject *)gen)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
#endif
if (gen->gi_frame_state >= FRAME_EXECUTING) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();

View file

@ -5434,7 +5434,7 @@
iter = stack_pointer[-1];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION
#if ENABLE_SPECIALIZATION_FT
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
@ -5444,7 +5444,7 @@
}
OPCODE_DEFERRED_INC(FOR_ITER);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
#endif /* ENABLE_SPECIALIZATION */
#endif /* ENABLE_SPECIALIZATION_FT */
}
// _FOR_ITER
{
@ -5514,6 +5514,17 @@
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#ifdef Py_GIL_DISABLED
// Since generators can't be used by multiple threads anyway we
// don't need to deopt here, but this lets us work on making
// generators thread-safe without necessarily having to
// specialize them thread-safely as well.
if (!_PyObject_IsUniquelyReferenced((PyObject *)gen)) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#endif
if (gen->gi_frame_state >= FRAME_EXECUTING) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
@ -5565,33 +5576,55 @@
// _ITER_CHECK_LIST
{
iter = stack_pointer[-1];
if (Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyListIter_Type) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyListIter_Type) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
_PyListIterObject *it = (_PyListIterObject *)iter_o;
if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) ||
!_PyObject_GC_IS_SHARED(it->it_seq)) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#endif
}
// _ITER_JUMP_LIST
{
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyListIterObject *it = (_PyListIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyListIter_Type);
// For free-threaded Python, the loop exit can happen at any point during
// item retrieval, so it doesn't make much sense to check and jump
// separately before item retrieval. Any length check we do here can be
// invalid by the time we actually try to fetch the item.
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
(void)iter_o;
#else
_PyListIterObject *it = (_PyListIterObject *)iter_o;
STAT_INC(FOR_ITER, hit);
PyListObject *seq = it->it_seq;
if (seq == NULL || (size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) {
it->it_index = -1;
#ifndef Py_GIL_DISABLED
if (seq != NULL) {
it->it_seq = NULL;
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_DECREF(seq);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
#endif
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
}
#endif
}
// _ITER_NEXT_LIST
{
@ -5600,8 +5633,32 @@
assert(Py_TYPE(iter_o) == &PyListIter_Type);
PyListObject *seq = it->it_seq;
assert(seq);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
_PyObject_GC_IS_SHARED(seq));
STAT_INC(FOR_ITER, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
stack_pointer = _PyFrame_GetStackPointer(frame);
// A negative result means we lost a race with another thread
// and we need to take the slow path.
if (result < 0) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
if (result == 0) {
it->it_index = -1;
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
}
it->it_index++;
#else
assert(it->it_index < PyList_GET_SIZE(seq));
next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
#endif
}
stack_pointer[0] = next;
stack_pointer += 1;
@ -5632,11 +5689,21 @@
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced((PyObject *)r)) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#endif
}
// _ITER_JUMP_RANGE
{
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
STAT_INC(FOR_ITER, hit);
if (r->len <= 0) {
// Jump over END_FOR instruction.
@ -5648,6 +5715,9 @@
{
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
#endif
assert(r->len > 0);
long value = r->start;
r->start = value + r->step;
@ -5681,26 +5751,40 @@
// _ITER_CHECK_TUPLE
{
iter = stack_pointer[-1];
if (Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyTupleIter_Type) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(iter_o) != &PyTupleIter_Type) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#ifdef Py_GIL_DISABLED
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
#endif
}
// _ITER_JUMP_TUPLE
{
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
(void)iter_o;
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
#endif
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
STAT_INC(FOR_ITER, hit);
PyTupleObject *seq = it->it_seq;
if (seq == NULL || it->it_index >= PyTuple_GET_SIZE(seq)) {
if (seq == NULL || (size_t)it->it_index >= (size_t)PyTuple_GET_SIZE(seq)) {
#ifndef Py_GIL_DISABLED
if (seq != NULL) {
it->it_seq = NULL;
_PyFrame_SetStackPointer(frame, stack_pointer);
Py_DECREF(seq);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
#endif
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
@ -5712,6 +5796,9 @@
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
PyTupleObject *seq = it->it_seq;
#ifdef Py_GIL_DISABLED
assert(_PyObject_IsUniquelyReferenced(iter_o));
#endif
assert(seq);
assert(it->it_index < PyTuple_GET_SIZE(seq));
next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, it->it_index++));

View file

@ -363,6 +363,7 @@ _PyUOp_Replacements[MAX_UOP_ID + 1] = {
[_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST,
[_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE,
[_FOR_ITER] = _FOR_ITER_TIER_TWO,
[_ITER_NEXT_LIST] = _ITER_NEXT_LIST_TIER_TWO,
};
static const uint8_t

View file

@ -1458,7 +1458,9 @@
break;
}
case _ITER_NEXT_LIST: {
/* _ITER_NEXT_LIST is not a viable micro-op for tier 2 */
case _ITER_NEXT_LIST_TIER_TWO: {
JitOptSymbol *next;
next = sym_new_not_null(ctx);
stack_pointer[0] = next;

View file

@ -2826,45 +2826,56 @@ int
void
_Py_Specialize_ForIter(_PyStackRef iter, _Py_CODEUNIT *instr, int oparg)
{
assert(ENABLE_SPECIALIZATION);
assert(ENABLE_SPECIALIZATION_FT);
assert(_PyOpcode_Caches[FOR_ITER] == INLINE_CACHE_ENTRIES_FOR_ITER);
_PyForIterCache *cache = (_PyForIterCache *)(instr + 1);
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
PyTypeObject *tp = Py_TYPE(iter_o);
#ifdef Py_GIL_DISABLED
// Only specialize for uniquely referenced iterators, so that we know
// they're only referenced by this one thread. This is more limiting
// than we need (even `it = iter(mylist); for item in it:` won't get
// specialized) but we don't have a way to check whether we're the only
// _thread_ who has access to the object.
if (!_PyObject_IsUniquelyReferenced(iter_o))
goto failure;
#endif
if (tp == &PyListIter_Type) {
instr->op.code = FOR_ITER_LIST;
goto success;
#ifdef Py_GIL_DISABLED
_PyListIterObject *it = (_PyListIterObject *)iter_o;
if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) &&
!_PyObject_GC_IS_SHARED(it->it_seq)) {
// Maybe this should just set GC_IS_SHARED in a critical
// section, instead of leaving it to the first iteration?
goto failure;
}
#endif
specialize(instr, FOR_ITER_LIST);
return;
}
else if (tp == &PyTupleIter_Type) {
instr->op.code = FOR_ITER_TUPLE;
goto success;
specialize(instr, FOR_ITER_TUPLE);
return;
}
else if (tp == &PyRangeIter_Type) {
instr->op.code = FOR_ITER_RANGE;
goto success;
specialize(instr, FOR_ITER_RANGE);
return;
}
else if (tp == &PyGen_Type && oparg <= SHRT_MAX) {
// Generators are very much not thread-safe, so don't worry about
// the specialization not being thread-safe.
assert(instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == END_FOR ||
instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == INSTRUMENTED_END_FOR
);
/* Don't specialize if PEP 523 is active */
if (_PyInterpreterState_GET()->eval_frame) {
SPECIALIZATION_FAIL(FOR_ITER, SPEC_FAIL_OTHER);
if (_PyInterpreterState_GET()->eval_frame)
goto failure;
}
instr->op.code = FOR_ITER_GEN;
goto success;
specialize(instr, FOR_ITER_GEN);
return;
}
failure:
SPECIALIZATION_FAIL(FOR_ITER,
_PySpecialization_ClassifyIterator(iter_o));
failure:
STAT_INC(FOR_ITER, failure);
instr->op.code = FOR_ITER;
cache->counter = adaptive_counter_backoff(cache->counter);
return;
success:
STAT_INC(FOR_ITER, success);
cache->counter = adaptive_counter_cooldown();
unspecialize(instr);
}
void