mirror of
https://github.com/astral-sh/ruff.git
synced 2025-09-28 12:55:05 +00:00
[flake8-pie
] Reuse parsed tokens (PIE800
) (#15438)
## Summary Follow-up to #15394. See [this review comment](https://github.com/astral-sh/ruff/pull/15394#discussion_r1910526741). ## Test Plan `cargo nextest run` and `cargo insta test`.
This commit is contained in:
parent
347ab5b47a
commit
70c3be88b9
1 changed files with 16 additions and 13 deletions
|
@ -1,11 +1,10 @@
|
||||||
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
|
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
|
||||||
use ruff_macros::{derive_message_formats, ViolationMetadata};
|
use ruff_macros::{derive_message_formats, ViolationMetadata};
|
||||||
use ruff_python_ast::{self as ast, Expr};
|
use ruff_python_ast::{self as ast, Expr};
|
||||||
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
|
use ruff_python_parser::{TokenKind, Tokens};
|
||||||
use ruff_text_size::{Ranged, TextSize};
|
use ruff_text_size::{Ranged, TextSize};
|
||||||
|
|
||||||
use crate::checkers::ast::Checker;
|
use crate::checkers::ast::Checker;
|
||||||
use crate::Locator;
|
|
||||||
|
|
||||||
/// ## What it does
|
/// ## What it does
|
||||||
/// Checks for unnecessary dictionary unpacking operators (`**`).
|
/// Checks for unnecessary dictionary unpacking operators (`**`).
|
||||||
|
@ -54,7 +53,7 @@ pub(crate) fn unnecessary_spread(checker: &mut Checker, dict: &ast::ExprDict) {
|
||||||
// inside a dict.
|
// inside a dict.
|
||||||
if let Expr::Dict(inner) = value {
|
if let Expr::Dict(inner) = value {
|
||||||
let mut diagnostic = Diagnostic::new(UnnecessarySpread, value.range());
|
let mut diagnostic = Diagnostic::new(UnnecessarySpread, value.range());
|
||||||
if let Some(fix) = unnecessary_spread_fix(inner, prev_end, checker.locator()) {
|
if let Some(fix) = unnecessary_spread_fix(inner, prev_end, checker.tokens()) {
|
||||||
diagnostic.set_fix(fix);
|
diagnostic.set_fix(fix);
|
||||||
}
|
}
|
||||||
checker.diagnostics.push(diagnostic);
|
checker.diagnostics.push(diagnostic);
|
||||||
|
@ -68,24 +67,27 @@ pub(crate) fn unnecessary_spread(checker: &mut Checker, dict: &ast::ExprDict) {
|
||||||
fn unnecessary_spread_fix(
|
fn unnecessary_spread_fix(
|
||||||
dict: &ast::ExprDict,
|
dict: &ast::ExprDict,
|
||||||
prev_end: TextSize,
|
prev_end: TextSize,
|
||||||
locator: &Locator,
|
tokens: &Tokens,
|
||||||
) -> Option<Fix> {
|
) -> Option<Fix> {
|
||||||
// Find the `**` token preceding the spread.
|
// Find the `**` token preceding the spread.
|
||||||
let doublestar = SimpleTokenizer::starts_at(prev_end, locator.contents())
|
let doublestar = tokens
|
||||||
.find(|tok| matches!(tok.kind(), SimpleTokenKind::DoubleStar))?;
|
.after(prev_end)
|
||||||
|
.iter()
|
||||||
|
.find(|tok| matches!(tok.kind(), TokenKind::DoubleStar))?;
|
||||||
|
|
||||||
if let Some(last) = dict.iter_values().last() {
|
if let Some(last) = dict.iter_values().last() {
|
||||||
// Ex) `**{a: 1, b: 2}`
|
// Ex) `**{a: 1, b: 2}`
|
||||||
let mut edits = vec![];
|
let mut edits = vec![];
|
||||||
let mut open_parens: u32 = 0;
|
let mut open_parens: u32 = 0;
|
||||||
|
|
||||||
for tok in SimpleTokenizer::starts_at(doublestar.end(), locator.contents()).skip_trivia() {
|
for tok in tokens.after(doublestar.end()) {
|
||||||
match tok.kind() {
|
match tok.kind() {
|
||||||
SimpleTokenKind::LParen => {
|
kind if kind.is_trivia() => {}
|
||||||
|
TokenKind::Lpar => {
|
||||||
edits.push(Edit::range_deletion(tok.range()));
|
edits.push(Edit::range_deletion(tok.range()));
|
||||||
open_parens += 1;
|
open_parens += 1;
|
||||||
}
|
}
|
||||||
SimpleTokenKind::LBrace => {
|
TokenKind::Lbrace => {
|
||||||
edits.push(Edit::range_deletion(tok.range()));
|
edits.push(Edit::range_deletion(tok.range()));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -97,22 +99,23 @@ fn unnecessary_spread_fix(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut found_r_curly = false;
|
let mut found_r_curly = false;
|
||||||
for tok in SimpleTokenizer::starts_at(last.end(), locator.contents()).skip_trivia() {
|
for tok in tokens.after(last.end()) {
|
||||||
if found_r_curly && open_parens == 0 {
|
if found_r_curly && open_parens == 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
match tok.kind() {
|
match tok.kind() {
|
||||||
SimpleTokenKind::Comma => {
|
kind if kind.is_trivia() => {}
|
||||||
|
TokenKind::Comma => {
|
||||||
edits.push(Edit::range_deletion(tok.range()));
|
edits.push(Edit::range_deletion(tok.range()));
|
||||||
}
|
}
|
||||||
SimpleTokenKind::RParen => {
|
TokenKind::Rpar => {
|
||||||
if found_r_curly {
|
if found_r_curly {
|
||||||
edits.push(Edit::range_deletion(tok.range()));
|
edits.push(Edit::range_deletion(tok.range()));
|
||||||
open_parens -= 1;
|
open_parens -= 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SimpleTokenKind::RBrace => {
|
TokenKind::Rbrace => {
|
||||||
edits.push(Edit::range_deletion(tok.range()));
|
edits.push(Edit::range_deletion(tok.range()));
|
||||||
found_r_curly = true;
|
found_r_curly = true;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue