mirror of
https://github.com/python/cpython.git
synced 2025-08-23 18:24:46 +00:00
gh-115999: Add free-threaded specialization for FOR_ITER (#128798)
Add free-threaded versions of existing specialization for FOR_ITER (list, tuples, fast range iterators and generators), without significantly affecting their thread-safety. (Iterating over shared lists/tuples/ranges should be fine like before. Reusing iterators between threads is not fine, like before. Sharing generators between threads is a recipe for significant crashes, like before.)
This commit is contained in:
parent
db27aee2fe
commit
de2f7da77d
13 changed files with 469 additions and 125 deletions
79
Python/executor_cases.c.h
generated
79
Python/executor_cases.c.h
generated
|
@ -4171,10 +4171,23 @@
|
|||
case _ITER_CHECK_LIST: {
|
||||
_PyStackRef iter;
|
||||
iter = stack_pointer[-1];
|
||||
if (Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyListIter_Type) {
|
||||
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
|
||||
if (Py_TYPE(iter_o) != &PyListIter_Type) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#ifdef Py_GIL_DISABLED
|
||||
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
_PyListIterObject *it = (_PyListIterObject *)iter_o;
|
||||
if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) ||
|
||||
!_PyObject_GC_IS_SHARED(it->it_seq)) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4183,6 +4196,7 @@
|
|||
case _GUARD_NOT_EXHAUSTED_LIST: {
|
||||
_PyStackRef iter;
|
||||
iter = stack_pointer[-1];
|
||||
#ifndef Py_GIL_DISABLED
|
||||
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
|
||||
_PyListIterObject *it = (_PyListIterObject *)iter_o;
|
||||
assert(Py_TYPE(iter_o) == &PyListIter_Type);
|
||||
|
@ -4198,10 +4212,13 @@
|
|||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
case _ITER_NEXT_LIST: {
|
||||
/* _ITER_NEXT_LIST is not a viable micro-op for tier 2 because it is replaced */
|
||||
|
||||
case _ITER_NEXT_LIST_TIER_TWO: {
|
||||
_PyStackRef iter;
|
||||
_PyStackRef next;
|
||||
iter = stack_pointer[-1];
|
||||
|
@ -4210,8 +4227,32 @@
|
|||
assert(Py_TYPE(iter_o) == &PyListIter_Type);
|
||||
PyListObject *seq = it->it_seq;
|
||||
assert(seq);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
assert(_PyObject_IsUniquelyReferenced(iter_o));
|
||||
assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
|
||||
_PyObject_GC_IS_SHARED(seq));
|
||||
STAT_INC(FOR_ITER, hit);
|
||||
_PyFrame_SetStackPointer(frame, stack_pointer);
|
||||
int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
|
||||
stack_pointer = _PyFrame_GetStackPointer(frame);
|
||||
// A negative result means we lost a race with another thread
|
||||
// and we need to take the slow path.
|
||||
if (result < 0) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
if (result == 0) {
|
||||
it->it_index = -1;
|
||||
if (1) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
}
|
||||
it->it_index++;
|
||||
#else
|
||||
assert(it->it_index < PyList_GET_SIZE(seq));
|
||||
next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
|
||||
#endif
|
||||
stack_pointer[0] = next;
|
||||
stack_pointer += 1;
|
||||
assert(WITHIN_STACK_BOUNDS());
|
||||
|
@ -4221,10 +4262,17 @@
|
|||
case _ITER_CHECK_TUPLE: {
|
||||
_PyStackRef iter;
|
||||
iter = stack_pointer[-1];
|
||||
if (Py_TYPE(PyStackRef_AsPyObjectBorrow(iter)) != &PyTupleIter_Type) {
|
||||
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
|
||||
if (Py_TYPE(iter_o) != &PyTupleIter_Type) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#ifdef Py_GIL_DISABLED
|
||||
if (!_PyObject_IsUniquelyReferenced(iter_o)) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4236,6 +4284,9 @@
|
|||
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
|
||||
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
|
||||
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
assert(_PyObject_IsUniquelyReferenced(iter_o));
|
||||
#endif
|
||||
PyTupleObject *seq = it->it_seq;
|
||||
if (seq == NULL) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
|
@ -4256,6 +4307,9 @@
|
|||
_PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
|
||||
assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
|
||||
PyTupleObject *seq = it->it_seq;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
assert(_PyObject_IsUniquelyReferenced(iter_o));
|
||||
#endif
|
||||
assert(seq);
|
||||
assert(it->it_index < PyTuple_GET_SIZE(seq));
|
||||
next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, it->it_index++));
|
||||
|
@ -4273,6 +4327,12 @@
|
|||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#ifdef Py_GIL_DISABLED
|
||||
if (!_PyObject_IsUniquelyReferenced((PyObject *)r)) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4296,6 +4356,9 @@
|
|||
iter = stack_pointer[-1];
|
||||
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
|
||||
assert(Py_TYPE(r) == &PyRangeIter_Type);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
assert(_PyObject_IsUniquelyReferenced((PyObject *)r));
|
||||
#endif
|
||||
assert(r->len > 0);
|
||||
long value = r->start;
|
||||
r->start = value + r->step;
|
||||
|
@ -4321,6 +4384,16 @@
|
|||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Since generators can't be used by multiple threads anyway we
|
||||
// don't need to deopt here, but this lets us work on making
|
||||
// generators thread-safe without necessarily having to
|
||||
// specialize them thread-safely as well.
|
||||
if (!_PyObject_IsUniquelyReferenced((PyObject *)gen)) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
}
|
||||
#endif
|
||||
if (gen->gi_frame_state >= FRAME_EXECUTING) {
|
||||
UOP_STAT_INC(uopcode, miss);
|
||||
JUMP_TO_JUMP_TARGET();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue