mirror of
https://github.com/python/cpython.git
synced 2025-07-07 19:35:27 +00:00
gh-115999: Add free-threaded specialization for FOR_ITER (#128798)
Add free-threaded versions of existing specialization for FOR_ITER (list, tuples, fast range iterators and generators), without significantly affecting their thread-safety. (Iterating over shared lists/tuples/ranges should be fine like before. Reusing iterators between threads is not fine, like before. Sharing generators between threads is a recipe for significant crashes, like before.)
This commit is contained in:
parent
db27aee2fe
commit
de2f7da77d
13 changed files with 469 additions and 125 deletions
|
@ -2826,45 +2826,56 @@ int
|
|||
void
|
||||
_Py_Specialize_ForIter(_PyStackRef iter, _Py_CODEUNIT *instr, int oparg)
|
||||
{
|
||||
assert(ENABLE_SPECIALIZATION);
|
||||
assert(ENABLE_SPECIALIZATION_FT);
|
||||
assert(_PyOpcode_Caches[FOR_ITER] == INLINE_CACHE_ENTRIES_FOR_ITER);
|
||||
_PyForIterCache *cache = (_PyForIterCache *)(instr + 1);
|
||||
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
|
||||
PyTypeObject *tp = Py_TYPE(iter_o);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Only specialize for uniquely referenced iterators, so that we know
|
||||
// they're only referenced by this one thread. This is more limiting
|
||||
// than we need (even `it = iter(mylist); for item in it:` won't get
|
||||
// specialized) but we don't have a way to check whether we're the only
|
||||
// _thread_ who has access to the object.
|
||||
if (!_PyObject_IsUniquelyReferenced(iter_o))
|
||||
goto failure;
|
||||
#endif
|
||||
if (tp == &PyListIter_Type) {
|
||||
instr->op.code = FOR_ITER_LIST;
|
||||
goto success;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyListIterObject *it = (_PyListIterObject *)iter_o;
|
||||
if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) &&
|
||||
!_PyObject_GC_IS_SHARED(it->it_seq)) {
|
||||
// Maybe this should just set GC_IS_SHARED in a critical
|
||||
// section, instead of leaving it to the first iteration?
|
||||
goto failure;
|
||||
}
|
||||
#endif
|
||||
specialize(instr, FOR_ITER_LIST);
|
||||
return;
|
||||
}
|
||||
else if (tp == &PyTupleIter_Type) {
|
||||
instr->op.code = FOR_ITER_TUPLE;
|
||||
goto success;
|
||||
specialize(instr, FOR_ITER_TUPLE);
|
||||
return;
|
||||
}
|
||||
else if (tp == &PyRangeIter_Type) {
|
||||
instr->op.code = FOR_ITER_RANGE;
|
||||
goto success;
|
||||
specialize(instr, FOR_ITER_RANGE);
|
||||
return;
|
||||
}
|
||||
else if (tp == &PyGen_Type && oparg <= SHRT_MAX) {
|
||||
// Generators are very much not thread-safe, so don't worry about
|
||||
// the specialization not being thread-safe.
|
||||
assert(instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == END_FOR ||
|
||||
instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == INSTRUMENTED_END_FOR
|
||||
);
|
||||
/* Don't specialize if PEP 523 is active */
|
||||
if (_PyInterpreterState_GET()->eval_frame) {
|
||||
SPECIALIZATION_FAIL(FOR_ITER, SPEC_FAIL_OTHER);
|
||||
if (_PyInterpreterState_GET()->eval_frame)
|
||||
goto failure;
|
||||
}
|
||||
instr->op.code = FOR_ITER_GEN;
|
||||
goto success;
|
||||
specialize(instr, FOR_ITER_GEN);
|
||||
return;
|
||||
}
|
||||
failure:
|
||||
SPECIALIZATION_FAIL(FOR_ITER,
|
||||
_PySpecialization_ClassifyIterator(iter_o));
|
||||
failure:
|
||||
STAT_INC(FOR_ITER, failure);
|
||||
instr->op.code = FOR_ITER;
|
||||
cache->counter = adaptive_counter_backoff(cache->counter);
|
||||
return;
|
||||
success:
|
||||
STAT_INC(FOR_ITER, success);
|
||||
cache->counter = adaptive_counter_cooldown();
|
||||
unspecialize(instr);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue