gh-126835: Move constant unaryop & binop folding to CFG (#129550)

This commit is contained in:
Yan Yanchii 2025-02-21 18:54:22 +01:00 committed by GitHub
parent d88677ac20
commit 38642bff13
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 1057 additions and 443 deletions

View file

@ -56,199 +56,6 @@ has_starred(asdl_expr_seq *elts)
return 0;
}
static PyObject*
unary_not(PyObject *v)
{
int r = PyObject_IsTrue(v);
if (r < 0)
return NULL;
return PyBool_FromLong(!r);
}
static int
fold_unaryop(expr_ty node, PyArena *arena, _PyASTOptimizeState *state)
{
expr_ty arg = node->v.UnaryOp.operand;
if (arg->kind != Constant_kind) {
/* Fold not into comparison */
if (node->v.UnaryOp.op == Not && arg->kind == Compare_kind &&
asdl_seq_LEN(arg->v.Compare.ops) == 1) {
/* Eq and NotEq are often implemented in terms of one another, so
folding not (self == other) into self != other breaks implementation
of !=. Detecting such cases doesn't seem worthwhile.
Python uses </> for 'is subset'/'is superset' operations on sets.
They don't satisfy not folding laws. */
cmpop_ty op = asdl_seq_GET(arg->v.Compare.ops, 0);
switch (op) {
case Is:
op = IsNot;
break;
case IsNot:
op = Is;
break;
case In:
op = NotIn;
break;
case NotIn:
op = In;
break;
// The remaining comparison operators can't be safely inverted
case Eq:
case NotEq:
case Lt:
case LtE:
case Gt:
case GtE:
op = 0; // The AST enums leave "0" free as an "unused" marker
break;
// No default case, so the compiler will emit a warning if new
// comparison operators are added without being handled here
}
if (op) {
asdl_seq_SET(arg->v.Compare.ops, 0, op);
COPY_NODE(node, arg);
return 1;
}
}
return 1;
}
typedef PyObject *(*unary_op)(PyObject*);
static const unary_op ops[] = {
[Invert] = PyNumber_Invert,
[Not] = unary_not,
[UAdd] = PyNumber_Positive,
[USub] = PyNumber_Negative,
};
PyObject *newval = ops[node->v.UnaryOp.op](arg->v.Constant.value);
return make_const(node, newval, arena);
}
/* Check whether a collection doesn't containing too much items (including
subcollections). This protects from creating a constant that needs
too much time for calculating a hash.
"limit" is the maximal number of items.
Returns the negative number if the total number of items exceeds the
limit. Otherwise returns the limit minus the total number of items.
*/
static Py_ssize_t
check_complexity(PyObject *obj, Py_ssize_t limit)
{
if (PyTuple_Check(obj)) {
Py_ssize_t i;
limit -= PyTuple_GET_SIZE(obj);
for (i = 0; limit >= 0 && i < PyTuple_GET_SIZE(obj); i++) {
limit = check_complexity(PyTuple_GET_ITEM(obj, i), limit);
}
return limit;
}
return limit;
}
#define MAX_INT_SIZE 128 /* bits */
#define MAX_COLLECTION_SIZE 256 /* items */
#define MAX_STR_SIZE 4096 /* characters */
#define MAX_TOTAL_ITEMS 1024 /* including nested collections */
static PyObject *
safe_multiply(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) &&
!_PyLong_IsZero((PyLongObject *)v) && !_PyLong_IsZero((PyLongObject *)w)
) {
int64_t vbits = _PyLong_NumBits(v);
int64_t wbits = _PyLong_NumBits(w);
assert(vbits >= 0);
assert(wbits >= 0);
if (vbits + wbits > MAX_INT_SIZE) {
return NULL;
}
}
else if (PyLong_Check(v) && PyTuple_Check(w)) {
Py_ssize_t size = PyTuple_GET_SIZE(w);
if (size) {
long n = PyLong_AsLong(v);
if (n < 0 || n > MAX_COLLECTION_SIZE / size) {
return NULL;
}
if (n && check_complexity(w, MAX_TOTAL_ITEMS / n) < 0) {
return NULL;
}
}
}
else if (PyLong_Check(v) && (PyUnicode_Check(w) || PyBytes_Check(w))) {
Py_ssize_t size = PyUnicode_Check(w) ? PyUnicode_GET_LENGTH(w) :
PyBytes_GET_SIZE(w);
if (size) {
long n = PyLong_AsLong(v);
if (n < 0 || n > MAX_STR_SIZE / size) {
return NULL;
}
}
}
else if (PyLong_Check(w) &&
(PyTuple_Check(v) || PyUnicode_Check(v) || PyBytes_Check(v)))
{
return safe_multiply(w, v);
}
return PyNumber_Multiply(v, w);
}
static PyObject *
safe_power(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) &&
!_PyLong_IsZero((PyLongObject *)v) && _PyLong_IsPositive((PyLongObject *)w)
) {
int64_t vbits = _PyLong_NumBits(v);
size_t wbits = PyLong_AsSize_t(w);
assert(vbits >= 0);
if (wbits == (size_t)-1) {
return NULL;
}
if ((uint64_t)vbits > MAX_INT_SIZE / wbits) {
return NULL;
}
}
return PyNumber_Power(v, w, Py_None);
}
static PyObject *
safe_lshift(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) &&
!_PyLong_IsZero((PyLongObject *)v) && !_PyLong_IsZero((PyLongObject *)w)
) {
int64_t vbits = _PyLong_NumBits(v);
size_t wbits = PyLong_AsSize_t(w);
assert(vbits >= 0);
if (wbits == (size_t)-1) {
return NULL;
}
if (wbits > MAX_INT_SIZE || (uint64_t)vbits > MAX_INT_SIZE - wbits) {
return NULL;
}
}
return PyNumber_Lshift(v, w);
}
static PyObject *
safe_mod(PyObject *v, PyObject *w)
{
if (PyUnicode_Check(v) || PyBytes_Check(v)) {
return NULL;
}
return PyNumber_Remainder(v, w);
}
static expr_ty
parse_literal(PyObject *fmt, Py_ssize_t *ppos, PyArena *arena)
{
@ -468,58 +275,7 @@ fold_binop(expr_ty node, PyArena *arena, _PyASTOptimizeState *state)
return optimize_format(node, lv, rhs->v.Tuple.elts, arena);
}
if (rhs->kind != Constant_kind) {
return 1;
}
PyObject *rv = rhs->v.Constant.value;
PyObject *newval = NULL;
switch (node->v.BinOp.op) {
case Add:
newval = PyNumber_Add(lv, rv);
break;
case Sub:
newval = PyNumber_Subtract(lv, rv);
break;
case Mult:
newval = safe_multiply(lv, rv);
break;
case Div:
newval = PyNumber_TrueDivide(lv, rv);
break;
case FloorDiv:
newval = PyNumber_FloorDivide(lv, rv);
break;
case Mod:
newval = safe_mod(lv, rv);
break;
case Pow:
newval = safe_power(lv, rv);
break;
case LShift:
newval = safe_lshift(lv, rv);
break;
case RShift:
newval = PyNumber_Rshift(lv, rv);
break;
case BitOr:
newval = PyNumber_Or(lv, rv);
break;
case BitXor:
newval = PyNumber_Xor(lv, rv);
break;
case BitAnd:
newval = PyNumber_And(lv, rv);
break;
// No builtin constants implement the following operators
case MatMult:
return 1;
// No default case, so the compiler will emit a warning if new binary
// operators are added without being handled here
}
return make_const(node, newval, arena);
return 1;
}
static PyObject*
@ -670,7 +426,6 @@ astfold_expr(expr_ty node_, PyArena *ctx_, _PyASTOptimizeState *state)
break;
case UnaryOp_kind:
CALL(astfold_expr, expr_ty, node_->v.UnaryOp.operand);
CALL(fold_unaryop, expr_ty, node_);
break;
case Lambda_kind:
CALL(astfold_arguments, arguments_ty, node_->v.Lambda.args);
@ -961,6 +716,44 @@ astfold_withitem(withitem_ty node_, PyArena *ctx_, _PyASTOptimizeState *state)
return 1;
}
static int
fold_const_match_patterns(expr_ty node, PyArena *ctx_, _PyASTOptimizeState *state)
{
switch (node->kind)
{
case UnaryOp_kind:
{
if (node->v.UnaryOp.op == USub &&
node->v.UnaryOp.operand->kind == Constant_kind)
{
PyObject *operand = node->v.UnaryOp.operand->v.Constant.value;
PyObject *folded = PyNumber_Negative(operand);
return make_const(node, folded, ctx_);
}
break;
}
case BinOp_kind:
{
operator_ty op = node->v.BinOp.op;
if ((op == Add || op == Sub) &&
node->v.BinOp.right->kind == Constant_kind)
{
CALL(fold_const_match_patterns, expr_ty, node->v.BinOp.left);
if (node->v.BinOp.left->kind == Constant_kind) {
PyObject *left = node->v.BinOp.left->v.Constant.value;
PyObject *right = node->v.BinOp.right->v.Constant.value;
PyObject *folded = op == Add ? PyNumber_Add(left, right) : PyNumber_Subtract(left, right);
return make_const(node, folded, ctx_);
}
}
break;
}
default:
break;
}
return 1;
}
static int
astfold_pattern(pattern_ty node_, PyArena *ctx_, _PyASTOptimizeState *state)
{
@ -970,7 +763,7 @@ astfold_pattern(pattern_ty node_, PyArena *ctx_, _PyASTOptimizeState *state)
ENTER_RECURSIVE();
switch (node_->kind) {
case MatchValue_kind:
CALL(astfold_expr, expr_ty, node_->v.MatchValue.value);
CALL(fold_const_match_patterns, expr_ty, node_->v.MatchValue.value);
break;
case MatchSingleton_kind:
break;
@ -978,7 +771,7 @@ astfold_pattern(pattern_ty node_, PyArena *ctx_, _PyASTOptimizeState *state)
CALL_SEQ(astfold_pattern, pattern, node_->v.MatchSequence.patterns);
break;
case MatchMapping_kind:
CALL_SEQ(astfold_expr, expr, node_->v.MatchMapping.keys);
CALL_SEQ(fold_const_match_patterns, expr, node_->v.MatchMapping.keys);
CALL_SEQ(astfold_pattern, pattern, node_->v.MatchMapping.patterns);
break;
case MatchClass_kind:

View file

@ -1406,6 +1406,26 @@ nop_out(basicblock *bb, int start, int count)
}
}
/* Steals reference to "newconst" */
static int
instr_make_load_const(cfg_instr *instr, PyObject *newconst,
PyObject *consts, PyObject *const_cache)
{
if (PyLong_CheckExact(newconst)) {
int overflow;
long val = PyLong_AsLongAndOverflow(newconst, &overflow);
if (!overflow && _PY_IS_SMALL_INT(val)) {
assert(_Py_IsImmortal(newconst));
INSTR_SET_OP1(instr, LOAD_SMALL_INT, (int)val);
return SUCCESS;
}
}
int oparg = add_const(newconst, consts, const_cache);
RETURN_IF_ERROR(oparg);
INSTR_SET_OP1(instr, LOAD_CONST, oparg);
return SUCCESS;
}
/* Replace LOAD_CONST c1, LOAD_CONST c2 ... LOAD_CONST cn, BUILD_TUPLE n
with LOAD_CONST (c1, c2, ... cn).
The consts table must still be in list form so that the
@ -1413,25 +1433,23 @@ nop_out(basicblock *bb, int start, int count)
Called with codestr pointing to the first LOAD_CONST.
*/
static int
fold_tuple_of_constants(basicblock *bb, int n, PyObject *consts, PyObject *const_cache)
fold_tuple_of_constants(basicblock *bb, int i, PyObject *consts, PyObject *const_cache)
{
/* Pre-conditions */
assert(PyDict_CheckExact(const_cache));
assert(PyList_CheckExact(consts));
cfg_instr *instr = &bb->b_instr[n];
cfg_instr *instr = &bb->b_instr[i];
assert(instr->i_opcode == BUILD_TUPLE);
int seq_size = instr->i_oparg;
PyObject *newconst;
RETURN_IF_ERROR(get_constant_sequence(bb, n-1, seq_size, consts, &newconst));
RETURN_IF_ERROR(get_constant_sequence(bb, i-1, seq_size, consts, &newconst));
if (newconst == NULL) {
/* not a const sequence */
return SUCCESS;
}
assert(PyTuple_CheckExact(newconst) && PyTuple_GET_SIZE(newconst) == seq_size);
int index = add_const(newconst, consts, const_cache);
RETURN_IF_ERROR(index);
nop_out(bb, n-1, seq_size);
INSTR_SET_OP1(instr, LOAD_CONST, index);
assert(PyTuple_Size(newconst) == seq_size);
RETURN_IF_ERROR(instr_make_load_const(instr, newconst, consts, const_cache));
nop_out(bb, i-1, seq_size);
return SUCCESS;
}
@ -1469,7 +1487,7 @@ optimize_lists_and_sets(basicblock *bb, int i, int nextop,
}
return SUCCESS;
}
assert(PyTuple_CheckExact(newconst) && PyTuple_GET_SIZE(newconst) == seq_size);
assert(PyTuple_Size(newconst) == seq_size);
if (instr->i_opcode == BUILD_SET) {
PyObject *frozenset = PyFrozenSet_New(newconst);
if (frozenset == NULL) {
@ -1497,45 +1515,200 @@ optimize_lists_and_sets(basicblock *bb, int i, int nextop,
return SUCCESS;
}
/* Determine opcode & oparg for freshly folded constant. */
static int
newop_from_folded(PyObject *newconst, PyObject *consts,
PyObject *const_cache, int *newopcode, int *newoparg)
/* Check whether the total number of items in the (possibly nested) collection obj exceeds
* limit. Return a negative number if it does, and a non-negative number otherwise.
* Used to avoid creating constants which are slow to hash.
*/
static Py_ssize_t
const_folding_check_complexity(PyObject *obj, Py_ssize_t limit)
{
if (PyLong_CheckExact(newconst)) {
int overflow;
long val = PyLong_AsLongAndOverflow(newconst, &overflow);
if (!overflow && _PY_IS_SMALL_INT(val)) {
*newopcode = LOAD_SMALL_INT;
*newoparg = val;
return SUCCESS;
if (PyTuple_Check(obj)) {
Py_ssize_t i;
limit -= PyTuple_GET_SIZE(obj);
for (i = 0; limit >= 0 && i < PyTuple_GET_SIZE(obj); i++) {
limit = const_folding_check_complexity(PyTuple_GET_ITEM(obj, i), limit);
if (limit < 0) {
return limit;
}
}
}
*newopcode = LOAD_CONST;
*newoparg = add_const(newconst, consts, const_cache);
RETURN_IF_ERROR(*newoparg);
return SUCCESS;
return limit;
}
#define MAX_INT_SIZE 128 /* bits */
#define MAX_COLLECTION_SIZE 256 /* items */
#define MAX_STR_SIZE 4096 /* characters */
#define MAX_TOTAL_ITEMS 1024 /* including nested collections */
static PyObject *
const_folding_safe_multiply(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) &&
!_PyLong_IsZero((PyLongObject *)v) && !_PyLong_IsZero((PyLongObject *)w)
) {
int64_t vbits = _PyLong_NumBits(v);
int64_t wbits = _PyLong_NumBits(w);
assert(vbits >= 0);
assert(wbits >= 0);
if (vbits + wbits > MAX_INT_SIZE) {
return NULL;
}
}
else if (PyLong_Check(v) && PyTuple_Check(w)) {
Py_ssize_t size = PyTuple_GET_SIZE(w);
if (size) {
long n = PyLong_AsLong(v);
if (n < 0 || n > MAX_COLLECTION_SIZE / size) {
return NULL;
}
if (n && const_folding_check_complexity(w, MAX_TOTAL_ITEMS / n) < 0) {
return NULL;
}
}
}
else if (PyLong_Check(v) && (PyUnicode_Check(w) || PyBytes_Check(w))) {
Py_ssize_t size = PyUnicode_Check(w) ? PyUnicode_GET_LENGTH(w) :
PyBytes_GET_SIZE(w);
if (size) {
long n = PyLong_AsLong(v);
if (n < 0 || n > MAX_STR_SIZE / size) {
return NULL;
}
}
}
else if (PyLong_Check(w) &&
(PyTuple_Check(v) || PyUnicode_Check(v) || PyBytes_Check(v)))
{
return const_folding_safe_multiply(w, v);
}
return PyNumber_Multiply(v, w);
}
static PyObject *
const_folding_safe_power(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) &&
!_PyLong_IsZero((PyLongObject *)v) && _PyLong_IsPositive((PyLongObject *)w)
) {
int64_t vbits = _PyLong_NumBits(v);
size_t wbits = PyLong_AsSize_t(w);
assert(vbits >= 0);
if (wbits == (size_t)-1) {
return NULL;
}
if ((uint64_t)vbits > MAX_INT_SIZE / wbits) {
return NULL;
}
}
return PyNumber_Power(v, w, Py_None);
}
static PyObject *
const_folding_safe_lshift(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) &&
!_PyLong_IsZero((PyLongObject *)v) && !_PyLong_IsZero((PyLongObject *)w)
) {
int64_t vbits = _PyLong_NumBits(v);
size_t wbits = PyLong_AsSize_t(w);
assert(vbits >= 0);
if (wbits == (size_t)-1) {
return NULL;
}
if (wbits > MAX_INT_SIZE || (uint64_t)vbits > MAX_INT_SIZE - wbits) {
return NULL;
}
}
return PyNumber_Lshift(v, w);
}
static PyObject *
const_folding_safe_mod(PyObject *v, PyObject *w)
{
if (PyUnicode_Check(v) || PyBytes_Check(v)) {
return NULL;
}
return PyNumber_Remainder(v, w);
}
static PyObject *
eval_const_binop(PyObject *left, int op, PyObject *right)
{
assert(left != NULL && right != NULL);
assert(op >= 0 && op <= NB_OPARG_LAST);
PyObject *result = NULL;
switch (op) {
case NB_ADD:
result = PyNumber_Add(left, right);
break;
case NB_SUBTRACT:
result = PyNumber_Subtract(left, right);
break;
case NB_MULTIPLY:
result = const_folding_safe_multiply(left, right);
break;
case NB_TRUE_DIVIDE:
result = PyNumber_TrueDivide(left, right);
break;
case NB_FLOOR_DIVIDE:
result = PyNumber_FloorDivide(left, right);
break;
case NB_REMAINDER:
result = const_folding_safe_mod(left, right);
break;
case NB_POWER:
result = const_folding_safe_power(left, right);
break;
case NB_LSHIFT:
result = const_folding_safe_lshift(left, right);
break;
case NB_RSHIFT:
result = PyNumber_Rshift(left, right);
break;
case NB_OR:
result = PyNumber_Or(left, right);
break;
case NB_XOR:
result = PyNumber_Xor(left, right);
break;
case NB_AND:
result = PyNumber_And(left, right);
break;
case NB_SUBSCR:
result = PyObject_GetItem(left, right);
break;
case NB_MATRIX_MULTIPLY:
// No builtin constants implement matrix multiplication
break;
default:
Py_UNREACHABLE();
}
return result;
}
static int
optimize_if_const_binop(basicblock *bb, int i, PyObject *consts, PyObject *const_cache)
fold_const_binop(basicblock *bb, int i, PyObject *consts, PyObject *const_cache)
{
#define BINOP_OPERAND_COUNT 2
assert(PyDict_CheckExact(const_cache));
assert(PyList_CheckExact(consts));
cfg_instr *binop = &bb->b_instr[i];
assert(binop->i_opcode == BINARY_OP);
if (binop->i_oparg != NB_SUBSCR) {
/* TODO: support other binary ops */
return SUCCESS;
}
PyObject *pair;
RETURN_IF_ERROR(get_constant_sequence(bb, i-1, 2, consts, &pair));
RETURN_IF_ERROR(get_constant_sequence(bb, i-1, BINOP_OPERAND_COUNT, consts, &pair));
if (pair == NULL) {
/* not a const sequence */
return SUCCESS;
}
assert(PyTuple_CheckExact(pair) && PyTuple_Size(pair) == 2);
assert(PyTuple_Size(pair) == BINOP_OPERAND_COUNT);
PyObject *left = PyTuple_GET_ITEM(pair, 0);
PyObject *right = PyTuple_GET_ITEM(pair, 1);
assert(left != NULL && right != NULL);
PyObject *newconst = PyObject_GetItem(left, right);
PyObject *newconst = eval_const_binop(left, binop->i_oparg, right);
Py_DECREF(pair);
if (newconst == NULL) {
if (PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) {
@ -1544,10 +1717,78 @@ optimize_if_const_binop(basicblock *bb, int i, PyObject *consts, PyObject *const
PyErr_Clear();
return SUCCESS;
}
int newopcode, newoparg;
RETURN_IF_ERROR(newop_from_folded(newconst, consts, const_cache, &newopcode, &newoparg));
nop_out(bb, i-1, 2);
INSTR_SET_OP1(binop, newopcode, newoparg);
RETURN_IF_ERROR(instr_make_load_const(binop, newconst, consts, const_cache));
nop_out(bb, i-1, BINOP_OPERAND_COUNT);
return SUCCESS;
}
static PyObject *
eval_const_unaryop(PyObject *operand, int opcode, int oparg)
{
assert(operand != NULL);
assert(
opcode == UNARY_NEGATIVE ||
opcode == UNARY_INVERT ||
opcode == UNARY_NOT ||
(opcode == CALL_INTRINSIC_1 && oparg == INTRINSIC_UNARY_POSITIVE)
);
PyObject *result;
switch (opcode) {
case UNARY_NEGATIVE:
result = PyNumber_Negative(operand);
break;
case UNARY_INVERT:
result = PyNumber_Invert(operand);
break;
case UNARY_NOT: {
int r = PyObject_IsTrue(operand);
if (r < 0) {
return NULL;
}
result = PyBool_FromLong(!r);
break;
}
case CALL_INTRINSIC_1:
if (oparg != INTRINSIC_UNARY_POSITIVE) {
Py_UNREACHABLE();
}
result = PyNumber_Positive(operand);
break;
default:
Py_UNREACHABLE();
}
return result;
}
static int
fold_const_unaryop(basicblock *bb, int i, PyObject *consts, PyObject *const_cache)
{
#define UNARYOP_OPERAND_COUNT 1
assert(PyDict_CheckExact(const_cache));
assert(PyList_CheckExact(consts));
cfg_instr *instr = &bb->b_instr[i];
PyObject *seq;
RETURN_IF_ERROR(get_constant_sequence(bb, i-1, UNARYOP_OPERAND_COUNT, consts, &seq));
if (seq == NULL) {
/* not a const */
return SUCCESS;
}
assert(PyTuple_Size(seq) == UNARYOP_OPERAND_COUNT);
PyObject *operand = PyTuple_GET_ITEM(seq, 0);
PyObject *newconst = eval_const_unaryop(operand, instr->i_opcode, instr->i_oparg);
Py_DECREF(seq);
if (newconst == NULL) {
if (PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) {
return ERROR;
}
PyErr_Clear();
return SUCCESS;
}
if (instr->i_opcode == UNARY_NOT) {
assert(PyBool_Check(newconst));
}
RETURN_IF_ERROR(instr_make_load_const(instr, newconst, consts, const_cache));
nop_out(bb, i-1, UNARYOP_OPERAND_COUNT);
return SUCCESS;
}
@ -2023,6 +2264,13 @@ optimize_basic_block(PyObject *const_cache, basicblock *bb, PyObject *consts)
INSTR_SET_OP1(&bb->b_instr[i + 1], opcode, oparg);
continue;
}
if (nextop == UNARY_NOT) {
INSTR_SET_OP0(inst, NOP);
int inverted = oparg ^ 1;
assert(inverted == 0 || inverted == 1);
INSTR_SET_OP1(&bb->b_instr[i + 1], opcode, inverted);
continue;
}
break;
case TO_BOOL:
if (nextop == TO_BOOL) {
@ -2041,15 +2289,22 @@ optimize_basic_block(PyObject *const_cache, basicblock *bb, PyObject *consts)
INSTR_SET_OP0(&bb->b_instr[i + 1], NOP);
continue;
}
_Py_FALLTHROUGH;
case UNARY_INVERT:
case UNARY_NEGATIVE:
RETURN_IF_ERROR(fold_const_unaryop(bb, i, consts, const_cache));
break;
case CALL_INTRINSIC_1:
// for _ in (*foo, *bar) -> for _ in [*foo, *bar]
if (oparg == INTRINSIC_LIST_TO_TUPLE && nextop == GET_ITER) {
INSTR_SET_OP0(inst, NOP);
}
else if (oparg == INTRINSIC_UNARY_POSITIVE) {
RETURN_IF_ERROR(fold_const_unaryop(bb, i, consts, const_cache));
}
break;
case BINARY_OP:
RETURN_IF_ERROR(optimize_if_const_binop(bb, i, consts, const_cache));
RETURN_IF_ERROR(fold_const_binop(bb, i, consts, const_cache));
break;
}
}