Replace row/column based Location with byte-offsets. (#3931)

This commit is contained in:
Micha Reiser 2023-04-26 20:11:02 +02:00 committed by GitHub
parent ee91598835
commit cab65b25da
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
418 changed files with 6203 additions and 7040 deletions

View file

@ -12,3 +12,6 @@ indent_size = 2
[*.{rs,py}] [*.{rs,py}]
indent_size = 4 indent_size = 4
[*.snap]
trim_trailing_whitespace = false

36
Cargo.lock generated
View file

@ -2123,6 +2123,7 @@ dependencies = [
"ruff_diagnostics", "ruff_diagnostics",
"ruff_python_ast", "ruff_python_ast",
"ruff_python_stdlib", "ruff_python_stdlib",
"ruff_text_size",
"rustc-hash", "rustc-hash",
"serde", "serde",
"serde_json", "serde_json",
@ -2165,8 +2166,7 @@ version = "0.0.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"log", "log",
"ruff_python_ast", "ruff_text_size",
"rustpython-parser",
"serde", "serde",
] ]
@ -2248,6 +2248,7 @@ dependencies = [
"nohash-hasher", "nohash-hasher",
"ruff_python_ast", "ruff_python_ast",
"ruff_python_stdlib", "ruff_python_stdlib",
"ruff_text_size",
"rustc-hash", "rustc-hash",
"rustpython-parser", "rustpython-parser",
"smallvec", "smallvec",
@ -2284,11 +2285,10 @@ dependencies = [
[[package]] [[package]]
name = "ruff_text_size" name = "ruff_text_size"
version = "0.0.0" version = "0.0.0"
source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [ dependencies = [
"schemars", "schemars",
"serde", "serde",
"serde_test",
"static_assertions",
] ]
[[package]] [[package]]
@ -2356,27 +2356,28 @@ dependencies = [
[[package]] [[package]]
name = "rustpython-ast" name = "rustpython-ast"
version = "0.2.0" version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4" source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [ dependencies = [
"num-bigint", "num-bigint",
"rustpython-compiler-core", "ruff_text_size",
] ]
[[package]] [[package]]
name = "rustpython-common" name = "rustpython-common"
version = "0.2.0" version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4" source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [ dependencies = [
"ascii", "ascii",
"bitflags 1.3.2", "bitflags 1.3.2",
"bstr 0.2.17",
"cfg-if", "cfg-if",
"getrandom",
"hexf-parse", "hexf-parse",
"itertools", "itertools",
"lexical-parse-float", "lexical-parse-float",
"libc", "libc",
"lock_api", "lock_api",
"num-bigint", "num-bigint",
"num-complex",
"num-traits", "num-traits",
"once_cell", "once_cell",
"radium", "radium",
@ -2390,23 +2391,21 @@ dependencies = [
[[package]] [[package]]
name = "rustpython-compiler-core" name = "rustpython-compiler-core"
version = "0.2.0" version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4" source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [ dependencies = [
"bitflags 1.3.2", "bitflags 1.3.2",
"bstr 0.2.17",
"itertools", "itertools",
"lz4_flex", "lz4_flex",
"num-bigint", "num-bigint",
"num-complex", "num-complex",
"serde", "ruff_text_size",
] ]
[[package]] [[package]]
name = "rustpython-parser" name = "rustpython-parser"
version = "0.2.0" version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4" source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [ dependencies = [
"ahash",
"anyhow", "anyhow",
"itertools", "itertools",
"lalrpop", "lalrpop",
@ -2416,10 +2415,10 @@ dependencies = [
"num-traits", "num-traits",
"phf", "phf",
"phf_codegen", "phf_codegen",
"ruff_text_size",
"rustc-hash", "rustc-hash",
"rustpython-ast", "rustpython-ast",
"rustpython-compiler-core", "rustpython-compiler-core",
"serde",
"tiny-keccak", "tiny-keccak",
"unic-emoji-char", "unic-emoji-char",
"unic-ucd-ident", "unic-ucd-ident",
@ -2568,15 +2567,6 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "serde_test"
version = "1.0.160"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c95a500e3923258f7fc3a16bf29934e403aef5ca1096e184d85e3b1926675e8"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "shellexpand" name = "shellexpand"
version = "3.1.0" version = "3.1.0"

View file

@ -30,12 +30,10 @@ path-absolutize = { version = "3.0.14" }
proc-macro2 = { version = "1.0.51" } proc-macro2 = { version = "1.0.51" }
quote = { version = "1.0.23" } quote = { version = "1.0.23" }
regex = { version = "1.7.1" } regex = { version = "1.7.1" }
ruff_text_size = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
rustc-hash = { version = "1.1.0" } rustc-hash = { version = "1.1.0" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" } rustpython-common = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
rustpython-parser = { features = [ rustpython-parser = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
"lalrpop",
"serde",
], git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
schemars = { version = "0.8.12" } schemars = { version = "0.8.12" }
serde = { version = "1.0.152", features = ["derive"] } serde = { version = "1.0.152", features = ["derive"] }
serde_json = { version = "1.0.93", features = ["preserve_order"] } serde_json = { version = "1.0.93", features = ["preserve_order"] }

View file

@ -17,11 +17,11 @@ name = "ruff"
ruff_cache = { path = "../ruff_cache" } ruff_cache = { path = "../ruff_cache" }
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] } ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
ruff_macros = { path = "../ruff_macros" } ruff_macros = { path = "../ruff_macros" }
ruff_python_ast = { path = "../ruff_python_ast" } ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
ruff_python_semantic = { path = "../ruff_python_semantic" } ruff_python_semantic = { path = "../ruff_python_semantic" }
ruff_python_stdlib = { path = "../ruff_python_stdlib" } ruff_python_stdlib = { path = "../ruff_python_stdlib" }
ruff_rustpython = { path = "../ruff_rustpython" } ruff_rustpython = { path = "../ruff_rustpython" }
ruff_text_size = { path = "../ruff_text_size" } ruff_text_size = { workspace = true }
annotate-snippets = { version = "0.9.1", features = ["color"] } annotate-snippets = { version = "0.9.1", features = ["color"] }
anyhow = { workspace = true } anyhow = { workspace = true }

View file

@ -9,6 +9,9 @@ def f():
# Here's a standalone comment that's over the limit. # Here's a standalone comment that's over the limit.
x = 2
# Another standalone that is preceded by a newline and indent toke and is over the limit.
print("Here's a string that's over the limit, but it's not a docstring.") print("Here's a string that's over the limit, but it's not a docstring.")

View file

@ -4,12 +4,12 @@ use itertools::Itertools;
use libcst_native::{ use libcst_native::{
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement, Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
}; };
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind}; use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Stmt, StmtKind};
use rustpython_parser::{lexer, Mode, Tok}; use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit; use ruff_diagnostics::Edit;
use ruff_python_ast::helpers; use ruff_python_ast::helpers;
use ruff_python_ast::helpers::to_absolute;
use ruff_python_ast::imports::{AnyImport, Import}; use ruff_python_ast::imports::{AnyImport, Import};
use ruff_python_ast::newlines::NewlineWithTrailingNewline; use ruff_python_ast::newlines::NewlineWithTrailingNewline;
use ruff_python_ast::source_code::{Indexer, Locator, Stylist}; use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
@ -102,20 +102,17 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
/// Return the location of a trailing semicolon following a `Stmt`, if it's part /// Return the location of a trailing semicolon following a `Stmt`, if it's part
/// of a multi-statement line. /// of a multi-statement line.
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> { fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<TextSize> {
let contents = locator.after(stmt.end_location.unwrap()); let contents = locator.after(stmt.end());
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
let trimmed = line.trim(); for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim_start();
if trimmed.starts_with(';') { if trimmed.starts_with(';') {
let column = line let colon_offset = line.text_len() - trimmed.text_len();
.char_indices() return Some(stmt.end() + line.start() + colon_offset);
.find_map(|(column, char)| if char == ';' { Some(column) } else { None })
.unwrap();
return Some(to_absolute(
Location::new(row + 1, column),
stmt.end_location.unwrap(),
));
} }
if !trimmed.starts_with('\\') { if !trimmed.starts_with('\\') {
break; break;
} }
@ -124,42 +121,36 @@ fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
} }
/// Find the next valid break for a `Stmt` after a semicolon. /// Find the next valid break for a `Stmt` after a semicolon.
fn next_stmt_break(semicolon: Location, locator: &Locator) -> Location { fn next_stmt_break(semicolon: TextSize, locator: &Locator) -> TextSize {
let start_location = Location::new(semicolon.row(), semicolon.column() + 1); let start_location = semicolon + TextSize::from(1);
let contents = locator.after(start_location);
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() { let contents = &locator.contents()[usize::from(start_location)..];
for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim(); let trimmed = line.trim();
// Skip past any continuations. // Skip past any continuations.
if trimmed.starts_with('\\') { if trimmed.starts_with('\\') {
continue; continue;
} }
return if trimmed.is_empty() {
// If the line is empty, then despite the previous statement ending in a return start_location
// semicolon, we know that it's not a multi-statement line. + if trimmed.is_empty() {
to_absolute(Location::new(row + 1, 0), start_location) // If the line is empty, then despite the previous statement ending in a
} else { // semicolon, we know that it's not a multi-statement line.
// Otherwise, find the start of the next statement. (Or, anything that isn't line.start()
// whitespace.) } else {
let column = line // Otherwise, find the start of the next statement. (Or, anything that isn't
.char_indices() // whitespace.)
.find_map(|(column, char)| { let relative_offset = line.find(|c: char| !c.is_whitespace()).unwrap();
if char.is_whitespace() { line.start() + TextSize::try_from(relative_offset).unwrap()
None };
} else {
Some(column)
}
})
.unwrap();
to_absolute(Location::new(row + 1, column), start_location)
};
} }
Location::new(start_location.row() + 1, 0)
locator.line_end(start_location)
} }
/// Return `true` if a `Stmt` occurs at the end of a file. /// Return `true` if a `Stmt` occurs at the end of a file.
fn is_end_of_file(stmt: &Stmt, locator: &Locator) -> bool { fn is_end_of_file(stmt: &Stmt, locator: &Locator) -> bool {
let contents = locator.after(stmt.end_location.unwrap()); stmt.end() == locator.contents().text_len()
contents.is_empty()
} }
/// Return the `Fix` to use when deleting a `Stmt`. /// Return the `Fix` to use when deleting a `Stmt`.
@ -190,33 +181,23 @@ pub fn delete_stmt(
{ {
// If removing this node would lead to an invalid syntax tree, replace // If removing this node would lead to an invalid syntax tree, replace
// it with a `pass`. // it with a `pass`.
Ok(Edit::replacement( Ok(Edit::range_replacement("pass".to_string(), stmt.range()))
"pass".to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
} else { } else {
Ok(if let Some(semicolon) = trailing_semicolon(stmt, locator) { Ok(if let Some(semicolon) = trailing_semicolon(stmt, locator) {
let next = next_stmt_break(semicolon, locator); let next = next_stmt_break(semicolon, locator);
Edit::deletion(stmt.location, next) Edit::deletion(stmt.start(), next)
} else if helpers::match_leading_content(stmt, locator) { } else if helpers::has_leading_content(stmt, locator) {
Edit::deletion(stmt.location, stmt.end_location.unwrap()) Edit::range_deletion(stmt.range())
} else if helpers::preceded_by_continuation(stmt, indexer) { } else if helpers::preceded_by_continuation(stmt, indexer, locator) {
if is_end_of_file(stmt, locator) && stmt.location.column() == 0 { if is_end_of_file(stmt, locator) && locator.is_at_start_of_line(stmt.start()) {
// Special-case: a file can't end in a continuation. // Special-case: a file can't end in a continuation.
Edit::replacement( Edit::range_replacement(stylist.line_ending().to_string(), stmt.range())
stylist.line_ending().to_string(),
stmt.location,
stmt.end_location.unwrap(),
)
} else { } else {
Edit::deletion(stmt.location, stmt.end_location.unwrap()) Edit::range_deletion(stmt.range())
} }
} else { } else {
Edit::deletion( let range = locator.full_lines_range(stmt.range());
Location::new(stmt.location.row(), 0), Edit::range_deletion(range)
Location::new(stmt.end_location.unwrap().row() + 1, 0),
)
}) })
} }
} }
@ -231,7 +212,7 @@ pub fn remove_unused_imports<'a>(
indexer: &Indexer, indexer: &Indexer,
stylist: &Stylist, stylist: &Stylist,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(stmt); let module_text = locator.slice(stmt.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let Some(Statement::Simple(body)) = tree.body.first_mut() else { let Some(Statement::Simple(body)) = tree.body.first_mut() else {
@ -337,11 +318,7 @@ pub fn remove_unused_imports<'a>(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), stmt.range()))
state.to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
} }
} }
@ -353,9 +330,8 @@ pub fn remove_unused_imports<'a>(
/// For this behavior, set `remove_parentheses` to `true`. /// For this behavior, set `remove_parentheses` to `true`.
pub fn remove_argument( pub fn remove_argument(
locator: &Locator, locator: &Locator,
call_at: Location, call_at: TextSize,
expr_at: Location, expr_range: TextRange,
expr_end: Location,
args: &[Expr], args: &[Expr],
keywords: &[Keyword], keywords: &[Keyword],
remove_parentheses: bool, remove_parentheses: bool,
@ -374,13 +350,13 @@ pub fn remove_argument(
if n_arguments == 1 { if n_arguments == 1 {
// Case 1: there is only one argument. // Case 1: there is only one argument.
let mut count: usize = 0; let mut count: usize = 0;
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() { for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if matches!(tok, Tok::Lpar) { if matches!(tok, Tok::Lpar) {
if count == 0 { if count == 0 {
fix_start = Some(if remove_parentheses { fix_start = Some(if remove_parentheses {
start range.start()
} else { } else {
Location::new(start.row(), start.column() + 1) range.start() + TextSize::from(1)
}); });
} }
count += 1; count += 1;
@ -390,9 +366,9 @@ pub fn remove_argument(
count -= 1; count -= 1;
if count == 0 { if count == 0 {
fix_end = Some(if remove_parentheses { fix_end = Some(if remove_parentheses {
end range.end()
} else { } else {
Location::new(end.row(), end.column() - 1) range.end() - TextSize::from(1)
}); });
break; break;
} }
@ -400,27 +376,27 @@ pub fn remove_argument(
} }
} else if args } else if args
.iter() .iter()
.map(|node| node.location) .map(Expr::start)
.chain(keywords.iter().map(|node| node.location)) .chain(keywords.iter().map(Keyword::start))
.any(|location| location > expr_at) .any(|location| location > expr_range.start())
{ {
// Case 2: argument or keyword is _not_ the last node. // Case 2: argument or keyword is _not_ the last node.
let mut seen_comma = false; let mut seen_comma = false;
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() { for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if seen_comma { if seen_comma {
if matches!(tok, Tok::NonLogicalNewline) { if matches!(tok, Tok::NonLogicalNewline) {
// Also delete any non-logical newlines after the comma. // Also delete any non-logical newlines after the comma.
continue; continue;
} }
fix_end = Some(if matches!(tok, Tok::Newline) { fix_end = Some(if matches!(tok, Tok::Newline) {
end range.end()
} else { } else {
start range.start()
}); });
break; break;
} }
if start == expr_at { if range.start() == expr_range.start() {
fix_start = Some(start); fix_start = Some(range.start());
} }
if fix_start.is_some() && matches!(tok, Tok::Comma) { if fix_start.is_some() && matches!(tok, Tok::Comma) {
seen_comma = true; seen_comma = true;
@ -429,13 +405,13 @@ pub fn remove_argument(
} else { } else {
// Case 3: argument or keyword is the last node, so we have to find the last // Case 3: argument or keyword is the last node, so we have to find the last
// comma in the stmt. // comma in the stmt.
for (start, tok, _) in lexer::lex_located(contents, Mode::Module, call_at).flatten() { for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if start == expr_at { if range.start() == expr_range.start() {
fix_end = Some(expr_end); fix_end = Some(expr_range.end());
break; break;
} }
if matches!(tok, Tok::Comma) { if matches!(tok, Tok::Comma) {
fix_start = Some(start); fix_start = Some(range.start());
} }
} }
} }
@ -482,11 +458,8 @@ pub fn get_or_import_symbol(
// //
// By adding this no-op edit, we force the `unused-imports` fix to conflict with the // By adding this no-op edit, we force the `unused-imports` fix to conflict with the
// `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass. // `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass.
let import_edit = Edit::replacement( let import_edit =
locator.slice(source).to_string(), Edit::range_replacement(locator.slice(source.range()).to_string(), source.range());
source.location,
source.end_location.unwrap(),
);
Ok((import_edit, binding)) Ok((import_edit, binding))
} else { } else {
if let Some(stmt) = importer.get_import_from(module) { if let Some(stmt) = importer.get_import_from(module) {
@ -527,8 +500,8 @@ pub fn get_or_import_symbol(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use anyhow::Result; use anyhow::Result;
use ruff_text_size::TextSize;
use rustpython_parser as parser; use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use ruff_python_ast::source_code::Locator; use ruff_python_ast::source_code::Locator;
@ -546,19 +519,13 @@ mod tests {
let program = parser::parse_program(contents, "<filename>")?; let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap(); let stmt = program.first().unwrap();
let locator = Locator::new(contents); let locator = Locator::new(contents);
assert_eq!( assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(5)));
trailing_semicolon(stmt, &locator),
Some(Location::new(1, 5))
);
let contents = "x = 1 ; y = 1"; let contents = "x = 1 ; y = 1";
let program = parser::parse_program(contents, "<filename>")?; let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap(); let stmt = program.first().unwrap();
let locator = Locator::new(contents); let locator = Locator::new(contents);
assert_eq!( assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(6)));
trailing_semicolon(stmt, &locator),
Some(Location::new(1, 6))
);
let contents = r#" let contents = r#"
x = 1 \ x = 1 \
@ -568,10 +535,7 @@ x = 1 \
let program = parser::parse_program(contents, "<filename>")?; let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap(); let stmt = program.first().unwrap();
let locator = Locator::new(contents); let locator = Locator::new(contents);
assert_eq!( assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(10)));
trailing_semicolon(stmt, &locator),
Some(Location::new(2, 2))
);
Ok(()) Ok(())
} }
@ -581,15 +545,15 @@ x = 1 \
let contents = "x = 1; y = 1"; let contents = "x = 1; y = 1";
let locator = Locator::new(contents); let locator = Locator::new(contents);
assert_eq!( assert_eq!(
next_stmt_break(Location::new(1, 4), &locator), next_stmt_break(TextSize::from(4), &locator),
Location::new(1, 5) TextSize::from(5)
); );
let contents = "x = 1 ; y = 1"; let contents = "x = 1 ; y = 1";
let locator = Locator::new(contents); let locator = Locator::new(contents);
assert_eq!( assert_eq!(
next_stmt_break(Location::new(1, 5), &locator), next_stmt_break(TextSize::from(5), &locator),
Location::new(1, 6) TextSize::from(6)
); );
let contents = r#" let contents = r#"
@ -599,8 +563,8 @@ x = 1 \
.trim(); .trim();
let locator = Locator::new(contents); let locator = Locator::new(contents);
assert_eq!( assert_eq!(
next_stmt_break(Location::new(2, 2), &locator), next_stmt_break(TextSize::from(10), &locator),
Location::new(2, 4) TextSize::from(12)
); );
} }
} }

View file

@ -1,12 +1,11 @@
use std::collections::BTreeSet; use std::collections::BTreeSet;
use itertools::Itertools; use itertools::Itertools;
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use rustpython_parser::ast::Location;
use ruff_diagnostics::{Diagnostic, Edit, Fix}; use ruff_diagnostics::{Diagnostic, Edit, Fix};
use ruff_python_ast::source_code::Locator; use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::linter::FixTable; use crate::linter::FixTable;
use crate::registry::{AsRule, Rule}; use crate::registry::{AsRule, Rule};
@ -33,7 +32,7 @@ fn apply_fixes<'a>(
locator: &'a Locator<'a>, locator: &'a Locator<'a>,
) -> (String, FixTable) { ) -> (String, FixTable) {
let mut output = String::with_capacity(locator.len()); let mut output = String::with_capacity(locator.len());
let mut last_pos: Option<Location> = None; let mut last_pos: Option<TextSize> = None;
let mut applied: BTreeSet<&Edit> = BTreeSet::default(); let mut applied: BTreeSet<&Edit> = BTreeSet::default();
let mut fixed = FxHashMap::default(); let mut fixed = FxHashMap::default();
@ -57,7 +56,7 @@ fn apply_fixes<'a>(
// Best-effort approach: if this fix overlaps with a fix we've already applied, // Best-effort approach: if this fix overlaps with a fix we've already applied,
// skip it. // skip it.
if last_pos.map_or(false, |last_pos| { if last_pos.map_or(false, |last_pos| {
fix.min_location() fix.min_start()
.map_or(false, |fix_location| last_pos >= fix_location) .map_or(false, |fix_location| last_pos >= fix_location)
}) { }) {
continue; continue;
@ -65,14 +64,14 @@ fn apply_fixes<'a>(
for edit in fix.edits() { for edit in fix.edits() {
// Add all contents from `last_pos` to `fix.location`. // Add all contents from `last_pos` to `fix.location`.
let slice = locator.slice(Range::new(last_pos.unwrap_or_default(), edit.location())); let slice = locator.slice(TextRange::new(last_pos.unwrap_or_default(), edit.start()));
output.push_str(slice); output.push_str(slice);
// Add the patch itself. // Add the patch itself.
output.push_str(edit.content().unwrap_or_default()); output.push_str(edit.content().unwrap_or_default());
// Track that the edit was applied. // Track that the edit was applied.
last_pos = Some(edit.end_location()); last_pos = Some(edit.end());
applied.insert(edit); applied.insert(edit);
} }
@ -88,8 +87,8 @@ fn apply_fixes<'a>(
/// Compare two fixes. /// Compare two fixes.
fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering { fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
fix1.min_location() fix1.min_start()
.cmp(&fix2.min_location()) .cmp(&fix2.min_start())
.then_with(|| match (&rule1, &rule2) { .then_with(|| match (&rule1, &rule2) {
// Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes. // Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes.
(Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less, (Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less,
@ -100,7 +99,7 @@ fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Orderi
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rustpython_parser::ast::Location; use ruff_text_size::TextSize;
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
use ruff_diagnostics::Edit; use ruff_diagnostics::Edit;
@ -114,8 +113,7 @@ mod tests {
.map(|edit| Diagnostic { .map(|edit| Diagnostic {
// The choice of rule here is arbitrary. // The choice of rule here is arbitrary.
kind: MissingNewlineAtEndOfFile.into(), kind: MissingNewlineAtEndOfFile.into(),
location: edit.location(), range: edit.range(),
end_location: edit.end_location(),
fix: edit.into(), fix: edit.into(),
parent: None, parent: None,
}) })
@ -142,8 +140,8 @@ class A(object):
); );
let diagnostics = create_diagnostics([Edit::replacement( let diagnostics = create_diagnostics([Edit::replacement(
"Bar".to_string(), "Bar".to_string(),
Location::new(1, 8), TextSize::new(8),
Location::new(1, 14), TextSize::new(14),
)]); )]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator); let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!( assert_eq!(
@ -166,8 +164,7 @@ class A(object):
"# "#
.trim(), .trim(),
); );
let diagnostics = let diagnostics = create_diagnostics([Edit::deletion(TextSize::new(7), TextSize::new(15))]);
create_diagnostics([Edit::deletion(Location::new(1, 7), Location::new(1, 15))]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator); let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!( assert_eq!(
contents, contents,
@ -190,8 +187,8 @@ class A(object, object, object):
.trim(), .trim(),
); );
let diagnostics = create_diagnostics([ let diagnostics = create_diagnostics([
Edit::deletion(Location::new(1, 8), Location::new(1, 16)), Edit::deletion(TextSize::from(8), TextSize::from(16)),
Edit::deletion(Location::new(1, 22), Location::new(1, 30)), Edit::deletion(TextSize::from(22), TextSize::from(30)),
]); ]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator); let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
@ -216,12 +213,8 @@ class A(object):
.trim(), .trim(),
); );
let diagnostics = create_diagnostics([ let diagnostics = create_diagnostics([
Edit::deletion(Location::new(1, 7), Location::new(1, 15)), Edit::deletion(TextSize::from(7), TextSize::from(15)),
Edit::replacement( Edit::replacement("ignored".to_string(), TextSize::from(9), TextSize::from(11)),
"ignored".to_string(),
Location::new(1, 9),
Location::new(1, 11),
),
]); ]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator); let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!( assert_eq!(

View file

@ -1,7 +1,7 @@
use ruff_python_semantic::scope::ScopeStack; use ruff_python_semantic::scope::ScopeStack;
use ruff_text_size::TextRange;
use rustpython_parser::ast::{Expr, Stmt}; use rustpython_parser::ast::{Expr, Stmt};
use ruff_python_ast::types::Range;
use ruff_python_ast::types::RefEquality; use ruff_python_ast::types::RefEquality;
use ruff_python_semantic::analyze::visibility::{Visibility, VisibleScope}; use ruff_python_semantic::analyze::visibility::{Visibility, VisibleScope};
@ -16,7 +16,7 @@ type Context<'a> = (ScopeStack, Vec<RefEquality<'a, Stmt>>);
#[derive(Default)] #[derive(Default)]
pub struct Deferred<'a> { pub struct Deferred<'a> {
pub definitions: Vec<(Definition<'a>, Visibility, Context<'a>)>, pub definitions: Vec<(Definition<'a>, Visibility, Context<'a>)>,
pub string_type_definitions: Vec<(Range, &'a str, AnnotationContext, Context<'a>)>, pub string_type_definitions: Vec<(TextRange, &'a str, AnnotationContext, Context<'a>)>,
pub type_definitions: Vec<(&'a Expr, AnnotationContext, Context<'a>)>, pub type_definitions: Vec<(&'a Expr, AnnotationContext, Context<'a>)>,
pub functions: Vec<(&'a Stmt, Context<'a>, VisibleScope)>, pub functions: Vec<(&'a Stmt, Context<'a>, VisibleScope)>,
pub lambdas: Vec<(&'a Expr, Context<'a>)>, pub lambdas: Vec<(&'a Expr, Context<'a>)>,

View file

@ -3,20 +3,19 @@ use std::path::Path;
use itertools::Itertools; use itertools::Itertools;
use log::error; use log::error;
use nohash_hasher::IntMap; use ruff_text_size::{TextRange, TextSize};
use rustc_hash::{FxHashMap, FxHashSet}; use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_common::cformat::{CFormatError, CFormatErrorType}; use rustpython_common::cformat::{CFormatError, CFormatErrorType};
use rustpython_parser::ast::{ use rustpython_parser::ast::{
Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext, Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext,
ExprKind, KeywordData, Located, Location, Operator, Pattern, PatternKind, Stmt, StmtKind, ExprKind, KeywordData, Located, Operator, Pattern, PatternKind, Stmt, StmtKind, Suite,
Suite,
}; };
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
use ruff_python_ast::all::{extract_all_names, AllNamesFlags}; use ruff_python_ast::all::{extract_all_names, AllNamesFlags};
use ruff_python_ast::helpers::{extract_handled_exceptions, to_module_path}; use ruff_python_ast::helpers::{extract_handled_exceptions, to_module_path};
use ruff_python_ast::source_code::{Indexer, Locator, Stylist}; use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use ruff_python_ast::types::{Node, Range, RefEquality}; use ruff_python_ast::types::{Node, RefEquality};
use ruff_python_ast::typing::parse_type_annotation; use ruff_python_ast::typing::parse_type_annotation;
use ruff_python_ast::visitor::{walk_excepthandler, walk_pattern, Visitor}; use ruff_python_ast::visitor::{walk_excepthandler, walk_pattern, Visitor};
use ruff_python_ast::{branch_detection, cast, helpers, str, visitor}; use ruff_python_ast::{branch_detection, cast, helpers, str, visitor};
@ -39,6 +38,7 @@ use crate::docstrings::definition::{
}; };
use crate::fs::relativize_path; use crate::fs::relativize_path;
use crate::importer::Importer; use crate::importer::Importer;
use crate::noqa::NoqaMapping;
use crate::registry::{AsRule, Rule}; use crate::registry::{AsRule, Rule};
use crate::rules::{ use crate::rules::{
flake8_2020, flake8_annotations, flake8_bandit, flake8_blind_except, flake8_boolean_trap, flake8_2020, flake8_annotations, flake8_bandit, flake8_blind_except, flake8_boolean_trap,
@ -67,7 +67,7 @@ pub struct Checker<'a> {
autofix: flags::Autofix, autofix: flags::Autofix,
noqa: flags::Noqa, noqa: flags::Noqa,
pub settings: &'a Settings, pub settings: &'a Settings,
pub noqa_line_for: &'a IntMap<usize, usize>, pub noqa_line_for: &'a NoqaMapping,
pub locator: &'a Locator<'a>, pub locator: &'a Locator<'a>,
pub stylist: &'a Stylist<'a>, pub stylist: &'a Stylist<'a>,
pub indexer: &'a Indexer, pub indexer: &'a Indexer,
@ -85,7 +85,7 @@ impl<'a> Checker<'a> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
settings: &'a Settings, settings: &'a Settings,
noqa_line_for: &'a IntMap<usize, usize>, noqa_line_for: &'a NoqaMapping,
autofix: flags::Autofix, autofix: flags::Autofix,
noqa: flags::Noqa, noqa: flags::Noqa,
path: &'a Path, path: &'a Path,
@ -126,7 +126,7 @@ impl<'a> Checker<'a> {
} }
/// Return `true` if a `Rule` is disabled by a `noqa` directive. /// Return `true` if a `Rule` is disabled by a `noqa` directive.
pub fn rule_is_ignored(&self, code: Rule, lineno: usize) -> bool { pub fn rule_is_ignored(&self, code: Rule, offset: TextSize) -> bool {
// TODO(charlie): `noqa` directives are mostly enforced in `check_lines.rs`. // TODO(charlie): `noqa` directives are mostly enforced in `check_lines.rs`.
// However, in rare cases, we need to check them here. For example, when // However, in rare cases, we need to check them here. For example, when
// removing unused imports, we create a single fix that's applied to all // removing unused imports, we create a single fix that's applied to all
@ -137,7 +137,7 @@ impl<'a> Checker<'a> {
if !self.noqa.to_bool() { if !self.noqa.to_bool() {
return false; return false;
} }
noqa::rule_is_ignored(code, lineno, self.noqa_line_for, self.locator) noqa::rule_is_ignored(code, offset, self.noqa_line_for, self.locator)
} }
} }
@ -221,13 +221,13 @@ where
match &stmt.node { match &stmt.node {
StmtKind::Global { names } => { StmtKind::Global { names } => {
let scope_index = self.ctx.scope_id(); let scope_index = self.ctx.scope_id();
let ranges: Vec<Range> = helpers::find_names(stmt, self.locator).collect(); let ranges: Vec<TextRange> = helpers::find_names(stmt, self.locator).collect();
if !scope_index.is_global() { if !scope_index.is_global() {
// Add the binding to the current scope. // Add the binding to the current scope.
let context = self.ctx.execution_context(); let context = self.ctx.execution_context();
let exceptions = self.ctx.exceptions(); let exceptions = self.ctx.exceptions();
let scope = &mut self.ctx.scopes[scope_index]; let scope = &mut self.ctx.scopes[scope_index];
let usage = Some((scope.id, Range::from(stmt))); let usage = Some((scope.id, stmt.range()));
for (name, range) in names.iter().zip(ranges.iter()) { for (name, range) in names.iter().zip(ranges.iter()) {
let id = self.ctx.bindings.push(Binding { let id = self.ctx.bindings.push(Binding {
kind: BindingKind::Global, kind: BindingKind::Global,
@ -252,12 +252,12 @@ where
} }
StmtKind::Nonlocal { names } => { StmtKind::Nonlocal { names } => {
let scope_index = self.ctx.scope_id(); let scope_index = self.ctx.scope_id();
let ranges: Vec<Range> = helpers::find_names(stmt, self.locator).collect(); let ranges: Vec<TextRange> = helpers::find_names(stmt, self.locator).collect();
if !scope_index.is_global() { if !scope_index.is_global() {
let context = self.ctx.execution_context(); let context = self.ctx.execution_context();
let exceptions = self.ctx.exceptions(); let exceptions = self.ctx.exceptions();
let scope = &mut self.ctx.scopes[scope_index]; let scope = &mut self.ctx.scopes[scope_index];
let usage = Some((scope.id, Range::from(stmt))); let usage = Some((scope.id, stmt.range()));
for (name, range) in names.iter().zip(ranges.iter()) { for (name, range) in names.iter().zip(ranges.iter()) {
// Add a binding to the current scope. // Add a binding to the current scope.
let id = self.ctx.bindings.push(Binding { let id = self.ctx.bindings.push(Binding {
@ -695,7 +695,7 @@ where
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(stmt), range: stmt.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -880,7 +880,7 @@ where
.rules .rules
.enabled(Rule::ModuleImportNotAtTopOfFile) .enabled(Rule::ModuleImportNotAtTopOfFile)
{ {
pycodestyle::rules::module_import_not_at_top_of_file(self, stmt); pycodestyle::rules::module_import_not_at_top_of_file(self, stmt, self.locator);
} }
if self.settings.rules.enabled(Rule::GlobalStatement) { if self.settings.rules.enabled(Rule::GlobalStatement) {
@ -909,9 +909,9 @@ where
kind: BindingKind::FutureImportation, kind: BindingKind::FutureImportation,
runtime_usage: None, runtime_usage: None,
// Always mark `__future__` imports as used. // Always mark `__future__` imports as used.
synthetic_usage: Some((self.ctx.scope_id(), Range::from(alias))), synthetic_usage: Some((self.ctx.scope_id(), alias.range())),
typing_usage: None, typing_usage: None,
range: Range::from(alias), range: alias.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -923,7 +923,7 @@ where
{ {
self.diagnostics.push(Diagnostic::new( self.diagnostics.push(Diagnostic::new(
pyflakes::rules::LateFutureImport, pyflakes::rules::LateFutureImport,
Range::from(stmt), stmt.range(),
)); ));
} }
} else if alias.node.name.contains('.') && alias.node.asname.is_none() { } else if alias.node.name.contains('.') && alias.node.asname.is_none() {
@ -941,7 +941,7 @@ where
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(alias), range: alias.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -964,12 +964,12 @@ where
kind: BindingKind::Importation(Importation { name, full_name }), kind: BindingKind::Importation(Importation { name, full_name }),
runtime_usage: None, runtime_usage: None,
synthetic_usage: if is_explicit_reexport { synthetic_usage: if is_explicit_reexport {
Some((self.ctx.scope_id(), Range::from(alias))) Some((self.ctx.scope_id(), alias.range()))
} else { } else {
None None
}, },
typing_usage: None, typing_usage: None,
range: Range::from(alias), range: alias.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -1135,7 +1135,7 @@ where
.rules .rules
.enabled(Rule::ModuleImportNotAtTopOfFile) .enabled(Rule::ModuleImportNotAtTopOfFile)
{ {
pycodestyle::rules::module_import_not_at_top_of_file(self, stmt); pycodestyle::rules::module_import_not_at_top_of_file(self, stmt, self.locator);
} }
if self.settings.rules.enabled(Rule::GlobalStatement) { if self.settings.rules.enabled(Rule::GlobalStatement) {
@ -1220,9 +1220,9 @@ where
kind: BindingKind::FutureImportation, kind: BindingKind::FutureImportation,
runtime_usage: None, runtime_usage: None,
// Always mark `__future__` imports as used. // Always mark `__future__` imports as used.
synthetic_usage: Some((self.ctx.scope_id(), Range::from(alias))), synthetic_usage: Some((self.ctx.scope_id(), alias.range())),
typing_usage: None, typing_usage: None,
range: Range::from(alias), range: alias.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -1242,7 +1242,7 @@ where
{ {
self.diagnostics.push(Diagnostic::new( self.diagnostics.push(Diagnostic::new(
pyflakes::rules::LateFutureImport, pyflakes::rules::LateFutureImport,
Range::from(stmt), stmt.range(),
)); ));
} }
} else if alias.node.name == "*" { } else if alias.node.name == "*" {
@ -1265,7 +1265,7 @@ where
module.as_deref(), module.as_deref(),
), ),
}, },
Range::from(stmt), stmt.range(),
)); ));
} }
} }
@ -1279,7 +1279,7 @@ where
pyflakes::rules::UndefinedLocalWithImportStar { pyflakes::rules::UndefinedLocalWithImportStar {
name: helpers::format_import_from(*level, module.as_deref()), name: helpers::format_import_from(*level, module.as_deref()),
}, },
Range::from(stmt), stmt.range(),
)); ));
} }
} else { } else {
@ -1313,12 +1313,12 @@ where
}), }),
runtime_usage: None, runtime_usage: None,
synthetic_usage: if is_explicit_reexport { synthetic_usage: if is_explicit_reexport {
Some((self.ctx.scope_id(), Range::from(alias))) Some((self.ctx.scope_id(), alias.range()))
} else { } else {
None None
}, },
typing_usage: None, typing_usage: None,
range: Range::from(alias), range: alias.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -2004,7 +2004,7 @@ where
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(stmt), range: stmt.range(),
source: Some(RefEquality(stmt)), source: Some(RefEquality(stmt)),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -2067,7 +2067,7 @@ where
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(*stmt), range: stmt.range(),
source: Some(RefEquality(stmt)), source: Some(RefEquality(stmt)),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -2228,7 +2228,7 @@ where
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(stmt), range: stmt.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -2261,7 +2261,7 @@ where
} = &expr.node } = &expr.node
{ {
self.deferred.string_type_definitions.push(( self.deferred.string_type_definitions.push((
Range::from(expr), expr.range(),
value, value,
(self.ctx.in_annotation, self.ctx.in_type_checking_block), (self.ctx.in_annotation, self.ctx.in_type_checking_block),
(self.ctx.scope_stack.clone(), self.ctx.parents.clone()), (self.ctx.scope_stack.clone(), self.ctx.parents.clone()),
@ -2336,7 +2336,7 @@ where
elts, elts,
check_too_many_expressions, check_too_many_expressions,
check_two_starred_expressions, check_two_starred_expressions,
Range::from(expr), expr.range(),
) { ) {
self.diagnostics.push(diagnostic); self.diagnostics.push(diagnostic);
} }
@ -2369,7 +2369,7 @@ where
ExprContext::Store => { ExprContext::Store => {
if self.settings.rules.enabled(Rule::AmbiguousVariableName) { if self.settings.rules.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) = if let Some(diagnostic) =
pycodestyle::rules::ambiguous_variable_name(id, Range::from(expr)) pycodestyle::rules::ambiguous_variable_name(id, expr.range())
{ {
self.diagnostics.push(diagnostic); self.diagnostics.push(diagnostic);
} }
@ -2455,7 +2455,7 @@ where
{ {
if attr == "format" { if attr == "format" {
// "...".format(...) call // "...".format(...) call
let location = Range::from(expr); let location = expr.range();
match pyflakes::format::FormatSummary::try_from(value.as_ref()) { match pyflakes::format::FormatSummary::try_from(value.as_ref()) {
Err(e) => { Err(e) => {
if self if self
@ -2895,14 +2895,14 @@ where
func, func,
args, args,
keywords, keywords,
Range::from(expr), expr.range(),
); );
} }
if self.settings.rules.enabled(Rule::CallDatetimeToday) { if self.settings.rules.enabled(Rule::CallDatetimeToday) {
flake8_datetimez::rules::call_datetime_today(self, func, Range::from(expr)); flake8_datetimez::rules::call_datetime_today(self, func, expr.range());
} }
if self.settings.rules.enabled(Rule::CallDatetimeUtcnow) { if self.settings.rules.enabled(Rule::CallDatetimeUtcnow) {
flake8_datetimez::rules::call_datetime_utcnow(self, func, Range::from(expr)); flake8_datetimez::rules::call_datetime_utcnow(self, func, expr.range());
} }
if self if self
.settings .settings
@ -2912,7 +2912,7 @@ where
flake8_datetimez::rules::call_datetime_utcfromtimestamp( flake8_datetimez::rules::call_datetime_utcfromtimestamp(
self, self,
func, func,
Range::from(expr), expr.range(),
); );
} }
if self if self
@ -2925,7 +2925,7 @@ where
func, func,
args, args,
keywords, keywords,
Range::from(expr), expr.range(),
); );
} }
if self.settings.rules.enabled(Rule::CallDatetimeFromtimestamp) { if self.settings.rules.enabled(Rule::CallDatetimeFromtimestamp) {
@ -2934,7 +2934,7 @@ where
func, func,
args, args,
keywords, keywords,
Range::from(expr), expr.range(),
); );
} }
if self if self
@ -2946,14 +2946,14 @@ where
self, self,
func, func,
args, args,
Range::from(expr), expr.range(),
); );
} }
if self.settings.rules.enabled(Rule::CallDateToday) { if self.settings.rules.enabled(Rule::CallDateToday) {
flake8_datetimez::rules::call_date_today(self, func, Range::from(expr)); flake8_datetimez::rules::call_date_today(self, func, expr.range());
} }
if self.settings.rules.enabled(Rule::CallDateFromtimestamp) { if self.settings.rules.enabled(Rule::CallDateFromtimestamp) {
flake8_datetimez::rules::call_date_fromtimestamp(self, func, Range::from(expr)); flake8_datetimez::rules::call_date_fromtimestamp(self, func, expr.range());
} }
// pygrep-hooks // pygrep-hooks
@ -3207,7 +3207,7 @@ where
Rule::PercentFormatStarRequiresSequence, Rule::PercentFormatStarRequiresSequence,
Rule::PercentFormatUnsupportedFormatCharacter, Rule::PercentFormatUnsupportedFormatCharacter,
]) { ]) {
let location = Range::from(expr); let location = expr.range();
match pyflakes::cformat::CFormatSummary::try_from(value.as_str()) { match pyflakes::cformat::CFormatSummary::try_from(value.as_str()) {
Err(CFormatError { Err(CFormatError {
typ: CFormatErrorType::UnsupportedFormatChar(c), typ: CFormatErrorType::UnsupportedFormatChar(c),
@ -3309,7 +3309,7 @@ where
} }
if self.settings.rules.enabled(Rule::PrintfStringFormatting) { if self.settings.rules.enabled(Rule::PrintfStringFormatting) {
pyupgrade::rules::printf_string_formatting(self, expr, right); pyupgrade::rules::printf_string_formatting(self, expr, right, self.locator);
} }
if self.settings.rules.enabled(Rule::BadStringFormatType) { if self.settings.rules.enabled(Rule::BadStringFormatType) {
pylint::rules::bad_string_format_type(self, expr, right); pylint::rules::bad_string_format_type(self, expr, right);
@ -3417,7 +3417,7 @@ where
left, left,
ops, ops,
comparators, comparators,
Range::from(expr), expr.range(),
); );
} }
@ -3495,7 +3495,7 @@ where
} => { } => {
if self.ctx.in_type_definition && !self.ctx.in_literal && !self.ctx.in_f_string { if self.ctx.in_type_definition && !self.ctx.in_literal && !self.ctx.in_f_string {
self.deferred.string_type_definitions.push(( self.deferred.string_type_definitions.push((
Range::from(expr), expr.range(),
value, value,
(self.ctx.in_annotation, self.ctx.in_type_checking_block), (self.ctx.in_annotation, self.ctx.in_type_checking_block),
(self.ctx.scope_stack.clone(), self.ctx.parents.clone()), (self.ctx.scope_stack.clone(), self.ctx.parents.clone()),
@ -3506,10 +3506,9 @@ where
.rules .rules
.enabled(Rule::HardcodedBindAllInterfaces) .enabled(Rule::HardcodedBindAllInterfaces)
{ {
if let Some(diagnostic) = flake8_bandit::rules::hardcoded_bind_all_interfaces( if let Some(diagnostic) =
value, flake8_bandit::rules::hardcoded_bind_all_interfaces(value, expr.range())
&Range::from(expr), {
) {
self.diagnostics.push(diagnostic); self.diagnostics.push(diagnostic);
} }
} }
@ -3979,13 +3978,12 @@ where
if self.ctx.scope().defines(name.as_str()) { if self.ctx.scope().defines(name.as_str()) {
self.handle_node_store( self.handle_node_store(
name, name,
&Expr::new( &Expr::with_range(
name_range.location,
name_range.end_location,
ExprKind::Name { ExprKind::Name {
id: name.to_string(), id: name.to_string(),
ctx: ExprContext::Store, ctx: ExprContext::Store,
}, },
name_range,
), ),
); );
} }
@ -3993,13 +3991,12 @@ where
let definition = self.ctx.scope().get(name.as_str()).copied(); let definition = self.ctx.scope().get(name.as_str()).copied();
self.handle_node_store( self.handle_node_store(
name, name,
&Expr::new( &Expr::with_range(
name_range.location,
name_range.end_location,
ExprKind::Name { ExprKind::Name {
id: name.to_string(), id: name.to_string(),
ctx: ExprContext::Store, ctx: ExprContext::Store,
}, },
name_range,
), ),
); );
@ -4108,7 +4105,7 @@ where
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(arg), range: arg.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -4117,7 +4114,7 @@ where
if self.settings.rules.enabled(Rule::AmbiguousVariableName) { if self.settings.rules.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) = if let Some(diagnostic) =
pycodestyle::rules::ambiguous_variable_name(&arg.node.arg, Range::from(arg)) pycodestyle::rules::ambiguous_variable_name(&arg.node.arg, arg.range())
{ {
self.diagnostics.push(diagnostic); self.diagnostics.push(diagnostic);
} }
@ -4152,7 +4149,7 @@ where
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(pattern), range: pattern.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -4220,10 +4217,13 @@ impl<'a> Checker<'a> {
); );
if binding.kind.is_loop_var() && existing_is_import { if binding.kind.is_loop_var() && existing_is_import {
if self.settings.rules.enabled(Rule::ImportShadowedByLoopVar) { if self.settings.rules.enabled(Rule::ImportShadowedByLoopVar) {
#[allow(deprecated)]
let line = self.locator.compute_line_index(existing.range.start());
self.diagnostics.push(Diagnostic::new( self.diagnostics.push(Diagnostic::new(
pyflakes::rules::ImportShadowedByLoopVar { pyflakes::rules::ImportShadowedByLoopVar {
name: name.to_string(), name: name.to_string(),
line: existing.range.location.row(), line,
}, },
binding.range, binding.range,
)); ));
@ -4239,10 +4239,13 @@ impl<'a> Checker<'a> {
)) ))
{ {
if self.settings.rules.enabled(Rule::RedefinedWhileUnused) { if self.settings.rules.enabled(Rule::RedefinedWhileUnused) {
#[allow(deprecated)]
let line = self.locator.compute_line_index(existing.range.start());
let mut diagnostic = Diagnostic::new( let mut diagnostic = Diagnostic::new(
pyflakes::rules::RedefinedWhileUnused { pyflakes::rules::RedefinedWhileUnused {
name: name.to_string(), name: name.to_string(),
line: existing.range.location.row(), line,
}, },
matches!( matches!(
binding.kind, binding.kind,
@ -4257,9 +4260,9 @@ impl<'a> Checker<'a> {
); );
if let Some(parent) = binding.source.as_ref() { if let Some(parent) = binding.source.as_ref() {
if matches!(parent.node, StmtKind::ImportFrom { .. }) if matches!(parent.node, StmtKind::ImportFrom { .. })
&& parent.location.row() != binding.range.location.row() && parent.range().contains_range(binding.range)
{ {
diagnostic.set_parent(parent.location); diagnostic.set_parent(parent.start());
} }
} }
self.diagnostics.push(diagnostic); self.diagnostics.push(diagnostic);
@ -4327,9 +4330,9 @@ impl<'a> Checker<'a> {
{ {
let id = self.ctx.bindings.push(Binding { let id = self.ctx.bindings.push(Binding {
kind: BindingKind::Builtin, kind: BindingKind::Builtin,
range: Range::default(), range: TextRange::default(),
runtime_usage: None, runtime_usage: None,
synthetic_usage: Some((ScopeId::global(), Range::default())), synthetic_usage: Some((ScopeId::global(), TextRange::default())),
typing_usage: None, typing_usage: None,
source: None, source: None,
context: ExecutionContext::Runtime, context: ExecutionContext::Runtime,
@ -4363,7 +4366,7 @@ impl<'a> Checker<'a> {
if let Some(index) = scope.get(id.as_str()) { if let Some(index) = scope.get(id.as_str()) {
// Mark the binding as used. // Mark the binding as used.
let context = self.ctx.execution_context(); let context = self.ctx.execution_context();
self.ctx.bindings[*index].mark_used(scope_id, Range::from(expr), context); self.ctx.bindings[*index].mark_used(scope_id, expr.range(), context);
if self.ctx.bindings[*index].kind.is_annotation() if self.ctx.bindings[*index].kind.is_annotation()
&& self.ctx.in_deferred_string_type_definition.is_none() && self.ctx.in_deferred_string_type_definition.is_none()
@ -4394,7 +4397,7 @@ impl<'a> Checker<'a> {
if let Some(index) = scope.get(full_name) { if let Some(index) = scope.get(full_name) {
self.ctx.bindings[*index].mark_used( self.ctx.bindings[*index].mark_used(
scope_id, scope_id,
Range::from(expr), expr.range(),
context, context,
); );
} }
@ -4411,7 +4414,7 @@ impl<'a> Checker<'a> {
if let Some(index) = scope.get(full_name.as_str()) { if let Some(index) = scope.get(full_name.as_str()) {
self.ctx.bindings[*index].mark_used( self.ctx.bindings[*index].mark_used(
scope_id, scope_id,
Range::from(expr), expr.range(),
context, context,
); );
} }
@ -4451,7 +4454,7 @@ impl<'a> Checker<'a> {
name: id.to_string(), name: id.to_string(),
sources, sources,
}, },
Range::from(expr), expr.range(),
)); ));
} }
return; return;
@ -4482,7 +4485,7 @@ impl<'a> Checker<'a> {
self.diagnostics.push(Diagnostic::new( self.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedName { name: id.clone() }, pyflakes::rules::UndefinedName { name: id.clone() },
Range::from(expr), expr.range(),
)); ));
} }
} }
@ -4557,7 +4560,7 @@ impl<'a> Checker<'a> {
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(expr), range: expr.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -4577,7 +4580,7 @@ impl<'a> Checker<'a> {
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(expr), range: expr.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -4594,7 +4597,7 @@ impl<'a> Checker<'a> {
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(expr), range: expr.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -4676,7 +4679,7 @@ impl<'a> Checker<'a> {
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(expr), range: expr.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -4693,7 +4696,7 @@ impl<'a> Checker<'a> {
runtime_usage: None, runtime_usage: None,
synthetic_usage: None, synthetic_usage: None,
typing_usage: None, typing_usage: None,
range: Range::from(expr), range: expr.range(),
source: Some(*self.ctx.current_stmt()), source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(), context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(), exceptions: self.ctx.exceptions(),
@ -4721,7 +4724,7 @@ impl<'a> Checker<'a> {
pyflakes::rules::UndefinedName { pyflakes::rules::UndefinedName {
name: id.to_string(), name: id.to_string(),
}, },
Range::from(expr), expr.range(),
)); ));
} }
@ -4948,9 +4951,9 @@ impl<'a> Checker<'a> {
} }
// Mark anything referenced in `__all__` as used. // Mark anything referenced in `__all__` as used.
let all_bindings: Option<(Vec<BindingId>, Range)> = { let all_bindings: Option<(Vec<BindingId>, TextRange)> = {
let global_scope = self.ctx.global_scope(); let global_scope = self.ctx.global_scope();
let all_names: Option<(&Vec<&str>, Range)> = global_scope let all_names: Option<(&Vec<&str>, TextRange)> = global_scope
.get("__all__") .get("__all__")
.map(|index| &self.ctx.bindings[*index]) .map(|index| &self.ctx.bindings[*index])
.and_then(|binding| match &binding.kind { .and_then(|binding| match &binding.kind {
@ -4980,7 +4983,7 @@ impl<'a> Checker<'a> {
} }
// Extract `__all__` names from the global scope. // Extract `__all__` names from the global scope.
let all_names: Option<(&[&str], Range)> = self let all_names: Option<(&[&str], TextRange)> = self
.ctx .ctx
.global_scope() .global_scope()
.get("__all__") .get("__all__")
@ -5023,7 +5026,7 @@ impl<'a> Checker<'a> {
// F822 // F822
if self.settings.rules.enabled(Rule::UndefinedExport) { if self.settings.rules.enabled(Rule::UndefinedExport) {
if !self.path.ends_with("__init__.py") { if !self.path.ends_with("__init__.py") {
if let Some((names, range)) = &all_names { if let Some((names, range)) = all_names {
diagnostics diagnostics
.extend(pyflakes::rules::undefined_export(names, range, scope)); .extend(pyflakes::rules::undefined_export(names, range, scope));
} }
@ -5107,10 +5110,13 @@ impl<'a> Checker<'a> {
if let Some(indices) = self.ctx.shadowed_bindings.get(index) { if let Some(indices) = self.ctx.shadowed_bindings.get(index) {
for index in indices { for index in indices {
let rebound = &self.ctx.bindings[*index]; let rebound = &self.ctx.bindings[*index];
#[allow(deprecated)]
let line = self.locator.compute_line_index(binding.range.start());
let mut diagnostic = Diagnostic::new( let mut diagnostic = Diagnostic::new(
pyflakes::rules::RedefinedWhileUnused { pyflakes::rules::RedefinedWhileUnused {
name: (*name).to_string(), name: (*name).to_string(),
line: binding.range.location.row(), line,
}, },
matches!( matches!(
rebound.kind, rebound.kind,
@ -5126,9 +5132,9 @@ impl<'a> Checker<'a> {
); );
if let Some(parent) = &rebound.source { if let Some(parent) = &rebound.source {
if matches!(parent.node, StmtKind::ImportFrom { .. }) if matches!(parent.node, StmtKind::ImportFrom { .. })
&& parent.location.row() != rebound.range.location.row() && parent.range().contains_range(rebound.range)
{ {
diagnostic.set_parent(parent.location); diagnostic.set_parent(parent.start());
} }
}; };
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
@ -5178,7 +5184,7 @@ impl<'a> Checker<'a> {
if self.settings.rules.enabled(Rule::UnusedImport) { if self.settings.rules.enabled(Rule::UnusedImport) {
// Collect all unused imports by location. (Multiple unused imports at the same // Collect all unused imports by location. (Multiple unused imports at the same
// location indicates an `import from`.) // location indicates an `import from`.)
type UnusedImport<'a> = (&'a str, &'a Range); type UnusedImport<'a> = (&'a str, &'a TextRange);
type BindingContext<'a, 'b> = ( type BindingContext<'a, 'b> = (
&'a RefEquality<'b, Stmt>, &'a RefEquality<'b, Stmt>,
Option<&'a RefEquality<'b, Stmt>>, Option<&'a RefEquality<'b, Stmt>>,
@ -5213,16 +5219,16 @@ impl<'a> Checker<'a> {
let exceptions = binding.exceptions; let exceptions = binding.exceptions;
let child: &Stmt = defined_by.into(); let child: &Stmt = defined_by.into();
let diagnostic_lineno = binding.range.location.row(); let diagnostic_offset = binding.range.start();
let parent_lineno = if matches!(child.node, StmtKind::ImportFrom { .. }) { let parent_offset = if matches!(child.node, StmtKind::ImportFrom { .. }) {
Some(child.location.row()) Some(child.start())
} else { } else {
None None
}; };
if self.rule_is_ignored(Rule::UnusedImport, diagnostic_lineno) if self.rule_is_ignored(Rule::UnusedImport, diagnostic_offset)
|| parent_lineno.map_or(false, |parent_lineno| { || parent_offset.map_or(false, |parent_offset| {
self.rule_is_ignored(Rule::UnusedImport, parent_lineno) self.rule_is_ignored(Rule::UnusedImport, parent_offset)
}) })
{ {
ignored ignored
@ -5241,7 +5247,7 @@ impl<'a> Checker<'a> {
self.settings.ignore_init_module_imports && self.path.ends_with("__init__.py"); self.settings.ignore_init_module_imports && self.path.ends_with("__init__.py");
for ((defined_by, defined_in, exceptions), unused_imports) in unused for ((defined_by, defined_in, exceptions), unused_imports) in unused
.into_iter() .into_iter()
.sorted_by_key(|((defined_by, ..), ..)| defined_by.location) .sorted_by_key(|((defined_by, ..), ..)| defined_by.start())
{ {
let child: &Stmt = defined_by.into(); let child: &Stmt = defined_by.into();
let parent: Option<&Stmt> = defined_in.map(Into::into); let parent: Option<&Stmt> = defined_in.map(Into::into);
@ -5291,9 +5297,9 @@ impl<'a> Checker<'a> {
*range, *range,
); );
if matches!(child.node, StmtKind::ImportFrom { .. }) { if matches!(child.node, StmtKind::ImportFrom { .. }) {
diagnostic.set_parent(child.location); diagnostic.set_parent(child.start());
} }
if let Some(fix) = fix.as_ref() { if let Some(fix) = &fix {
diagnostic.set_fix(fix.clone()); diagnostic.set_fix(fix.clone());
} }
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
@ -5301,7 +5307,7 @@ impl<'a> Checker<'a> {
} }
for ((defined_by, .., exceptions), unused_imports) in ignored for ((defined_by, .., exceptions), unused_imports) in ignored
.into_iter() .into_iter()
.sorted_by_key(|((defined_by, ..), ..)| defined_by.location) .sorted_by_key(|((defined_by, ..), ..)| defined_by.start())
{ {
let child: &Stmt = defined_by.into(); let child: &Stmt = defined_by.into();
let multiple = unused_imports.len() > 1; let multiple = unused_imports.len() > 1;
@ -5323,7 +5329,7 @@ impl<'a> Checker<'a> {
*range, *range,
); );
if matches!(child.node, StmtKind::ImportFrom { .. }) { if matches!(child.node, StmtKind::ImportFrom { .. }) {
diagnostic.set_parent(child.location); diagnostic.set_parent(child.start());
} }
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
@ -5451,30 +5457,33 @@ impl<'a> Checker<'a> {
// Extract a `Docstring` from a `Definition`. // Extract a `Docstring` from a `Definition`.
let expr = definition.docstring.unwrap(); let expr = definition.docstring.unwrap();
let contents = self.locator.slice(expr); let contents = self.locator.slice(expr.range());
let indentation = self.locator.slice(Range::new(
Location::new(expr.location.row(), 0), let indentation = self.locator.slice(TextRange::new(
Location::new(expr.location.row(), expr.location.column()), self.locator.line_start(expr.start()),
expr.start(),
)); ));
if pydocstyle::helpers::should_ignore_docstring(contents) { if pydocstyle::helpers::should_ignore_docstring(contents) {
#[allow(deprecated)]
let location = self.locator.compute_source_location(expr.start());
warn_user!( warn_user!(
"Docstring at {}:{}:{} contains implicit string concatenation; ignoring...", "Docstring at {}:{}:{} contains implicit string concatenation; ignoring...",
relativize_path(self.path), relativize_path(self.path),
expr.location.row(), location.row,
expr.location.column() + 1 location.column
); );
continue; continue;
} }
// SAFETY: Safe for docstrings that pass `should_ignore_docstring`. // SAFETY: Safe for docstrings that pass `should_ignore_docstring`.
let body = str::raw_contents(contents).unwrap(); let body_range = str::raw_contents_range(contents).unwrap();
let docstring = Docstring { let docstring = Docstring {
kind: definition.kind, kind: definition.kind,
expr, expr,
contents, contents,
indentation, indentation,
body, body_range,
}; };
if !pydocstyle::rules::not_empty(self, &docstring) { if !pydocstyle::rules::not_empty(self, &docstring) {
@ -5624,7 +5633,7 @@ pub fn check_ast(
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
indexer: &Indexer, indexer: &Indexer,
noqa_line_for: &IntMap<usize, usize>, noqa_line_for: &NoqaMapping,
settings: &Settings, settings: &Settings,
autofix: flags::Autofix, autofix: flags::Autofix,
noqa: flags::Noqa, noqa: flags::Noqa,

View file

@ -30,13 +30,11 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
for stmt in blocks.iter().flat_map(|block| &block.imports) { for stmt in blocks.iter().flat_map(|block| &block.imports) {
match &stmt.node { match &stmt.node {
StmtKind::Import { names } => { StmtKind::Import { names } => {
module_imports.extend(names.iter().map(|name| { module_imports.extend(
ModuleImport::new( names
name.node.name.clone(), .iter()
stmt.location, .map(|name| ModuleImport::new(name.node.name.clone(), stmt.range())),
stmt.end_location.unwrap(), );
)
}));
} }
StmtKind::ImportFrom { StmtKind::ImportFrom {
module, module,
@ -61,11 +59,7 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
Cow::Owned(module_path[..module_path.len() - level].join(".")) Cow::Owned(module_path[..module_path.len() - level].join("."))
}; };
module_imports.extend(names.iter().map(|name| { module_imports.extend(names.iter().map(|name| {
ModuleImport::new( ModuleImport::new(format!("{}.{}", module, name.node.name), name.range())
format!("{}.{}", module, name.node.name),
name.location,
name.end_location.unwrap(),
)
})); }));
} }
_ => panic!("Expected StmtKind::Import | StmtKind::ImportFrom"), _ => panic!("Expected StmtKind::Import | StmtKind::ImportFrom"),

View file

@ -1,9 +1,9 @@
use rustpython_parser::ast::Location; use ruff_text_size::TextRange;
use rustpython_parser::lexer::LexResult; use rustpython_parser::lexer::LexResult;
use ruff_diagnostics::{Diagnostic, Fix}; use ruff_diagnostics::{Diagnostic, Fix};
use ruff_python_ast::source_code::{Locator, Stylist}; use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_ast::types::Range; use ruff_python_ast::token_kind::TokenKind;
use crate::registry::{AsRule, Rule}; use crate::registry::{AsRule, Rule};
use crate::rules::pycodestyle::rules::logical_lines::{ use crate::rules::pycodestyle::rules::logical_lines::{
@ -63,8 +63,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location, range: TextRange::empty(location),
end_location: location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });
@ -75,8 +74,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location, range: TextRange::empty(location),
end_location: location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });
@ -86,8 +84,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location, range: TextRange::empty(location),
end_location: location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });
@ -108,8 +105,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location, range: TextRange::empty(location),
end_location: location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });
@ -121,8 +117,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location, range: TextRange::empty(location),
end_location: location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });
@ -133,8 +128,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location, range: TextRange::empty(location),
end_location: location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });
@ -142,12 +136,13 @@ pub fn check_logical_lines(
} }
} }
if line.flags().contains(TokenFlags::COMMENT) { if line.flags().contains(TokenFlags::COMMENT) {
for (range, kind) in whitespace_before_comment(&line.tokens(), locator) { for (range, kind) in
whitespace_before_comment(&line.tokens(), locator, prev_line.is_none())
{
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location: range.location, range,
end_location: range.end_location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });
@ -167,12 +162,21 @@ pub fn check_logical_lines(
} }
// Extract the indentation level. // Extract the indentation level.
let Some(start_loc) = line.first_token_location() else { continue; }; let Some(first_token) = line.first_token() else {
let start_line = locator.slice(Range::new(Location::new(start_loc.row(), 0), start_loc)); continue;
let indent_level = expand_indent(start_line); };
let range = if first_token.kind() == TokenKind::Indent {
first_token.range()
} else {
TextRange::new(locator.line_start(first_token.start()), first_token.start())
};
let indent_level = expand_indent(locator.slice(range));
let indent_size = 4; let indent_size = 4;
for (location, kind) in indentation( for kind in indentation(
&line, &line,
prev_line.as_ref(), prev_line.as_ref(),
indent_char, indent_char,
@ -183,8 +187,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) { if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic { diagnostics.push(Diagnostic {
kind, kind,
location: Location::new(start_loc.row(), 0), range,
end_location: location,
fix: Fix::empty(), fix: Fix::empty(),
parent: None, parent: None,
}); });

View file

@ -1,15 +1,13 @@
//! `NoQA` enforcement and validation. //! `NoQA` enforcement and validation.
use nohash_hasher::IntMap; use itertools::Itertools;
use rustpython_parser::ast::Location; use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_diagnostics::{Diagnostic, Edit}; use ruff_diagnostics::{Diagnostic, Edit};
use ruff_python_ast::newlines::StrExt; use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::codes::NoqaCode;
use crate::noqa; use crate::noqa;
use crate::noqa::{Directive, FileExemption}; use crate::noqa::{Directive, FileExemption, NoqaDirectives, NoqaMapping};
use crate::registry::{AsRule, Rule}; use crate::registry::{AsRule, Rule};
use crate::rule_redirects::get_redirect_target; use crate::rule_redirects::get_redirect_target;
use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA}; use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA};
@ -17,37 +15,25 @@ use crate::settings::{flags, Settings};
pub fn check_noqa( pub fn check_noqa(
diagnostics: &mut Vec<Diagnostic>, diagnostics: &mut Vec<Diagnostic>,
contents: &str, locator: &Locator,
commented_lines: &[usize], comment_ranges: &[TextRange],
noqa_line_for: &IntMap<usize, usize>, noqa_line_for: &NoqaMapping,
settings: &Settings, settings: &Settings,
autofix: flags::Autofix, autofix: flags::Autofix,
) -> Vec<usize> { ) -> Vec<usize> {
let enforce_noqa = settings.rules.enabled(Rule::UnusedNOQA); let enforce_noqa = settings.rules.enabled(Rule::UnusedNOQA);
let lines: Vec<&str> = contents.universal_newlines().collect();
// Identify any codes that are globally exempted (within the current file). // Identify any codes that are globally exempted (within the current file).
let exemption = noqa::file_exemption(&lines, commented_lines); let exemption = noqa::file_exemption(locator.contents(), comment_ranges);
// Map from line number to `noqa` directive on that line, along with any codes
// that were matched by the directive.
let mut noqa_directives: IntMap<usize, (Directive, Vec<NoqaCode>)> = IntMap::default();
// Extract all `noqa` directives. // Extract all `noqa` directives.
if enforce_noqa { let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, locator);
for lineno in commented_lines {
noqa_directives
.entry(lineno - 1)
.or_insert_with(|| (noqa::extract_noqa_directive(lines[lineno - 1]), vec![]));
}
}
// Indices of diagnostics that were ignored by a `noqa` directive. // Indices of diagnostics that were ignored by a `noqa` directive.
let mut ignored_diagnostics = vec![]; let mut ignored_diagnostics = vec![];
// Remove any ignored diagnostics. // Remove any ignored diagnostics.
for (index, diagnostic) in diagnostics.iter().enumerate() { 'outer: for (index, diagnostic) in diagnostics.iter().enumerate() {
if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) { if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) {
continue; continue;
} }
@ -68,92 +54,65 @@ pub fn check_noqa(
FileExemption::None => {} FileExemption::None => {}
} }
let diagnostic_lineno = diagnostic.location.row(); let noqa_offsets = diagnostic
.parent
.into_iter()
.chain(std::iter::once(diagnostic.start()))
.map(|position| noqa_line_for.resolve(position))
.unique();
// Is the violation ignored by a `noqa` directive on the parent line? for noqa_offset in noqa_offsets {
if let Some(parent_lineno) = diagnostic.parent.map(|location| location.row()) { if let Some(directive_line) = noqa_directives.find_line_with_directive_mut(noqa_offset)
if parent_lineno != diagnostic_lineno { {
let noqa_lineno = noqa_line_for.get(&parent_lineno).unwrap_or(&parent_lineno); let suppressed = match &directive_line.directive {
if commented_lines.contains(noqa_lineno) { Directive::All(..) => {
let noqa = noqa_directives.entry(noqa_lineno - 1).or_insert_with(|| { directive_line
(noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![]) .matches
}); .push(diagnostic.kind.rule().noqa_code());
match noqa {
(Directive::All(..), matches) => {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
}
(Directive::None, ..) => {}
}
}
}
}
// Is the diagnostic ignored by a `noqa` directive on the same line?
let noqa_lineno = noqa_line_for
.get(&diagnostic_lineno)
.unwrap_or(&diagnostic_lineno);
if commented_lines.contains(noqa_lineno) {
let noqa = noqa_directives
.entry(noqa_lineno - 1)
.or_insert_with(|| (noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![]));
match noqa {
(Directive::All(..), matches) => {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index); ignored_diagnostics.push(index);
continue; true
} }
Directive::Codes(.., codes, _) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
directive_line
.matches
.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
true
} else {
false
}
}
Directive::None => unreachable!(),
};
if suppressed {
continue 'outer;
} }
(Directive::None, ..) => {}
} }
} }
} }
// Enforce that the noqa directive was actually used (RUF100). // Enforce that the noqa directive was actually used (RUF100).
if enforce_noqa { if enforce_noqa {
for (row, (directive, matches)) in noqa_directives { for line in noqa_directives.lines() {
match directive { match &line.directive {
Directive::All(leading_spaces, start_byte, end_byte, trailing_spaces) => { Directive::All(leading_spaces, noqa_range, trailing_spaces) => {
if matches.is_empty() { if line.matches.is_empty() {
let start_char = lines[row][..start_byte].chars().count(); let mut diagnostic =
let end_char = Diagnostic::new(UnusedNOQA { codes: None }, *noqa_range);
start_char + lines[row][start_byte..end_byte].chars().count();
let mut diagnostic = Diagnostic::new(
UnusedNOQA { codes: None },
Range::new(
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
),
);
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) { if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
diagnostic.set_fix(delete_noqa( diagnostic.set_fix(delete_noqa(
row, *leading_spaces,
lines[row], *noqa_range,
leading_spaces, *trailing_spaces,
start_byte, locator,
end_byte,
trailing_spaces,
)); ));
} }
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
Directive::Codes(leading_spaces, start_byte, end_byte, codes, trailing_spaces) => { Directive::Codes(leading_spaces, range, codes, trailing_spaces) => {
let mut disabled_codes = vec![]; let mut disabled_codes = vec![];
let mut unknown_codes = vec![]; let mut unknown_codes = vec![];
let mut unmatched_codes = vec![]; let mut unmatched_codes = vec![];
@ -166,7 +125,9 @@ pub fn check_noqa(
break; break;
} }
if matches.iter().any(|m| *m == code) || settings.external.contains(code) { if line.matches.iter().any(|m| *m == code)
|| settings.external.contains(code)
{
valid_codes.push(code); valid_codes.push(code);
} else { } else {
if let Ok(rule) = Rule::from_code(code) { if let Ok(rule) = Rule::from_code(code) {
@ -189,10 +150,6 @@ pub fn check_noqa(
&& unknown_codes.is_empty() && unknown_codes.is_empty()
&& unmatched_codes.is_empty()) && unmatched_codes.is_empty())
{ {
let start_char = lines[row][..start_byte].chars().count();
let end_char =
start_char + lines[row][start_byte..end_byte].chars().count();
let mut diagnostic = Diagnostic::new( let mut diagnostic = Diagnostic::new(
UnusedNOQA { UnusedNOQA {
codes: Some(UnusedCodes { codes: Some(UnusedCodes {
@ -210,26 +167,20 @@ pub fn check_noqa(
.collect(), .collect(),
}), }),
}, },
Range::new( *range,
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
),
); );
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) { if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
if valid_codes.is_empty() { if valid_codes.is_empty() {
diagnostic.set_fix(delete_noqa( diagnostic.set_fix(delete_noqa(
row, *leading_spaces,
lines[row], *range,
leading_spaces, *trailing_spaces,
start_byte, locator,
end_byte,
trailing_spaces,
)); ));
} else { } else {
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(
format!("# noqa: {}", valid_codes.join(", ")), format!("# noqa: {}", valid_codes.join(", ")),
Location::new(row + 1, start_char), *range,
Location::new(row + 1, end_char),
)); ));
} }
} }
@ -247,39 +198,37 @@ pub fn check_noqa(
/// Generate a [`Edit`] to delete a `noqa` directive. /// Generate a [`Edit`] to delete a `noqa` directive.
fn delete_noqa( fn delete_noqa(
row: usize, leading_spaces: TextSize,
line: &str, noqa_range: TextRange,
leading_spaces: usize, trailing_spaces: TextSize,
start_byte: usize, locator: &Locator,
end_byte: usize,
trailing_spaces: usize,
) -> Edit { ) -> Edit {
if start_byte - leading_spaces == 0 && end_byte == line.len() { let line_range = locator.line_range(noqa_range.start());
// Ex) `# noqa`
Edit::deletion(Location::new(row + 1, 0), Location::new(row + 2, 0)) // Ex) `# noqa`
} else if end_byte == line.len() { if line_range
// Ex) `x = 1 # noqa` == TextRange::new(
let start_char = line[..start_byte].chars().count(); noqa_range.start() - leading_spaces,
let end_char = start_char + line[start_byte..end_byte].chars().count(); noqa_range.end() + trailing_spaces,
Edit::deletion(
Location::new(row + 1, start_char - leading_spaces),
Location::new(row + 1, end_char + trailing_spaces),
) )
} else if line[end_byte..].trim_start().starts_with('#') { {
// Ex) `x = 1 # noqa # type: ignore` let full_line_end = locator.full_line_end(line_range.end());
let start_char = line[..start_byte].chars().count(); Edit::deletion(line_range.start(), full_line_end)
let end_char = start_char + line[start_byte..end_byte].chars().count(); }
// Ex) `x = 1 # noqa`
else if noqa_range.end() + trailing_spaces == line_range.end() {
Edit::deletion(noqa_range.start() - leading_spaces, line_range.end())
}
// Ex) `x = 1 # noqa # type: ignore`
else if locator.contents()[usize::from(noqa_range.end() + trailing_spaces)..].starts_with('#')
{
Edit::deletion(noqa_range.start(), noqa_range.end() + trailing_spaces)
}
// Ex) `x = 1 # noqa here`
else {
Edit::deletion( Edit::deletion(
Location::new(row + 1, start_char), noqa_range.start() + "# ".text_len(),
Location::new(row + 1, end_char + trailing_spaces), noqa_range.end() + trailing_spaces,
)
} else {
// Ex) `x = 1 # noqa here`
let start_char = line[..start_byte].chars().count();
let end_char = start_char + line[start_byte..end_byte].chars().count();
Edit::deletion(
Location::new(row + 1, start_char + 1 + 1),
Location::new(row + 1, end_char + trailing_spaces),
) )
} }
} }

View file

@ -1,5 +1,6 @@
//! Lint rules based on checking physical lines. //! Lint rules based on checking physical lines.
use ruff_text_size::TextSize;
use std::path::Path; use std::path::Path;
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
@ -25,7 +26,7 @@ pub fn check_physical_lines(
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
indexer: &Indexer, indexer: &Indexer,
doc_lines: &[usize], doc_lines: &[TextSize],
settings: &Settings, settings: &Settings,
autofix: flags::Autofix, autofix: flags::Autofix,
) -> Vec<Diagnostic> { ) -> Vec<Diagnostic> {
@ -55,20 +56,19 @@ pub fn check_physical_lines(
let fix_shebang_whitespace = let fix_shebang_whitespace =
autofix.into() && settings.rules.should_fix(Rule::ShebangLeadingWhitespace); autofix.into() && settings.rules.should_fix(Rule::ShebangLeadingWhitespace);
let mut commented_lines_iter = indexer.commented_lines().iter().peekable(); let mut commented_lines_iter = indexer.comment_ranges().iter().peekable();
let mut doc_lines_iter = doc_lines.iter().peekable(); let mut doc_lines_iter = doc_lines.iter().peekable();
let string_lines = indexer.triple_quoted_string_ranges();
let string_lines = indexer.string_ranges();
for (index, line) in locator.contents().universal_newlines().enumerate() { for (index, line) in locator.contents().universal_newlines().enumerate() {
while commented_lines_iter while commented_lines_iter
.next_if(|lineno| &(index + 1) == *lineno) .next_if(|comment_range| line.range().contains_range(**comment_range))
.is_some() .is_some()
{ {
if enforce_unnecessary_coding_comment { if enforce_unnecessary_coding_comment {
if index < 2 { if index < 2 {
if let Some(diagnostic) = if let Some(diagnostic) =
unnecessary_coding_comment(index, line, fix_unnecessary_coding_comment) unnecessary_coding_comment(&line, fix_unnecessary_coding_comment)
{ {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
@ -76,11 +76,11 @@ pub fn check_physical_lines(
} }
if enforce_blanket_type_ignore { if enforce_blanket_type_ignore {
blanket_type_ignore(&mut diagnostics, index, line); blanket_type_ignore(&mut diagnostics, &line);
} }
if enforce_blanket_noqa { if enforce_blanket_noqa {
blanket_noqa(&mut diagnostics, index, line); blanket_noqa(&mut diagnostics, &line);
} }
if enforce_shebang_missing if enforce_shebang_missing
@ -89,31 +89,31 @@ pub fn check_physical_lines(
|| enforce_shebang_newline || enforce_shebang_newline
|| enforce_shebang_python || enforce_shebang_python
{ {
let shebang = extract_shebang(line); let shebang = extract_shebang(&line);
if enforce_shebang_not_executable { if enforce_shebang_not_executable {
if let Some(diagnostic) = shebang_not_executable(path, index, &shebang) { if let Some(diagnostic) = shebang_not_executable(path, line.range(), &shebang) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
if enforce_shebang_missing { if enforce_shebang_missing {
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(_, _, _, _)) { if !has_any_shebang && matches!(shebang, ShebangDirective::Match(..)) {
has_any_shebang = true; has_any_shebang = true;
} }
} }
if enforce_shebang_whitespace { if enforce_shebang_whitespace {
if let Some(diagnostic) = if let Some(diagnostic) =
shebang_whitespace(index, &shebang, fix_shebang_whitespace) shebang_whitespace(line.range(), &shebang, fix_shebang_whitespace)
{ {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
if enforce_shebang_newline { if enforce_shebang_newline {
if let Some(diagnostic) = shebang_newline(index, &shebang) { if let Some(diagnostic) = shebang_newline(line.range(), &shebang, index == 0) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
if enforce_shebang_python { if enforce_shebang_python {
if let Some(diagnostic) = shebang_python(index, &shebang) { if let Some(diagnostic) = shebang_python(line.range(), &shebang) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
@ -121,40 +121,40 @@ pub fn check_physical_lines(
} }
while doc_lines_iter while doc_lines_iter
.next_if(|lineno| &(index + 1) == *lineno) .next_if(|doc_line_start| line.range().contains(**doc_line_start))
.is_some() .is_some()
{ {
if enforce_doc_line_too_long { if enforce_doc_line_too_long {
if let Some(diagnostic) = doc_line_too_long(index, line, settings) { if let Some(diagnostic) = doc_line_too_long(&line, settings) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
} }
if enforce_mixed_spaces_and_tabs { if enforce_mixed_spaces_and_tabs {
if let Some(diagnostic) = mixed_spaces_and_tabs(index, line) { if let Some(diagnostic) = mixed_spaces_and_tabs(&line) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
if enforce_line_too_long { if enforce_line_too_long {
if let Some(diagnostic) = line_too_long(index, line, settings) { if let Some(diagnostic) = line_too_long(&line, settings) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
if enforce_bidirectional_unicode { if enforce_bidirectional_unicode {
diagnostics.extend(pylint::rules::bidirectional_unicode(index, line)); diagnostics.extend(pylint::rules::bidirectional_unicode(&line));
} }
if enforce_trailing_whitespace || enforce_blank_line_contains_whitespace { if enforce_trailing_whitespace || enforce_blank_line_contains_whitespace {
if let Some(diagnostic) = trailing_whitespace(index, line, settings, autofix) { if let Some(diagnostic) = trailing_whitespace(&line, settings, autofix) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
if enforce_tab_indentation { if enforce_tab_indentation {
if let Some(diagnostic) = tab_indentation(index + 1, line, string_lines) { if let Some(diagnostic) = tab_indentation(&line, string_lines) {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
} }
@ -197,7 +197,7 @@ mod tests {
let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8. let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8.
let locator = Locator::new(line); let locator = Locator::new(line);
let tokens: Vec<_> = lex(line, Mode::Module).collect(); let tokens: Vec<_> = lex(line, Mode::Module).collect();
let indexer: Indexer = tokens.as_slice().into(); let indexer = Indexer::from_tokens(&tokens, &locator);
let stylist = Stylist::from_tokens(&tokens, &locator); let stylist = Stylist::from_tokens(&tokens, &locator);
let check_with_max_line_length = |line_length: usize| { let check_with_max_line_length = |line_length: usize| {

View file

@ -64,7 +64,7 @@ pub fn check_tokens(
// RUF001, RUF002, RUF003 // RUF001, RUF002, RUF003
if enforce_ambiguous_unicode_character { if enforce_ambiguous_unicode_character {
let mut state_machine = StateMachine::default(); let mut state_machine = StateMachine::default();
for &(start, ref tok, end) in tokens.iter().flatten() { for &(ref tok, range) in tokens.iter().flatten() {
let is_docstring = if enforce_ambiguous_unicode_character { let is_docstring = if enforce_ambiguous_unicode_character {
state_machine.consume(tok) state_machine.consume(tok)
} else { } else {
@ -74,8 +74,7 @@ pub fn check_tokens(
if matches!(tok, Tok::String { .. } | Tok::Comment(_)) { if matches!(tok, Tok::String { .. } | Tok::Comment(_)) {
diagnostics.extend(ruff::rules::ambiguous_unicode_character( diagnostics.extend(ruff::rules::ambiguous_unicode_character(
locator, locator,
start, range,
end,
if matches!(tok, Tok::String { .. }) { if matches!(tok, Tok::String { .. }) {
if is_docstring { if is_docstring {
Context::Docstring Context::Docstring
@ -94,10 +93,10 @@ pub fn check_tokens(
// ERA001 // ERA001
if enforce_commented_out_code { if enforce_commented_out_code {
for (start, tok, end) in tokens.iter().flatten() { for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::Comment(_)) { if matches!(tok, Tok::Comment(_)) {
if let Some(diagnostic) = if let Some(diagnostic) =
eradicate::rules::commented_out_code(locator, *start, *end, settings, autofix) eradicate::rules::commented_out_code(locator, *range, settings, autofix)
{ {
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
@ -107,12 +106,11 @@ pub fn check_tokens(
// W605 // W605
if enforce_invalid_escape_sequence { if enforce_invalid_escape_sequence {
for (start, tok, end) in tokens.iter().flatten() { for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::String { .. }) { if matches!(tok, Tok::String { .. }) {
diagnostics.extend(pycodestyle::rules::invalid_escape_sequence( diagnostics.extend(pycodestyle::rules::invalid_escape_sequence(
locator, locator,
*start, *range,
*end,
autofix.into() && settings.rules.should_fix(Rule::InvalidEscapeSequence), autofix.into() && settings.rules.should_fix(Rule::InvalidEscapeSequence),
)); ));
} }
@ -120,10 +118,10 @@ pub fn check_tokens(
} }
// PLE2510, PLE2512, PLE2513 // PLE2510, PLE2512, PLE2513
if enforce_invalid_string_character { if enforce_invalid_string_character {
for (start, tok, end) in tokens.iter().flatten() { for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::String { .. }) { if matches!(tok, Tok::String { .. }) {
diagnostics.extend( diagnostics.extend(
pylint::rules::invalid_string_characters(locator, *start, *end, autofix.into()) pylint::rules::invalid_string_characters(locator, *range, autofix.into())
.into_iter() .into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())), .filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
); );
@ -155,6 +153,7 @@ pub fn check_tokens(
flake8_implicit_str_concat::rules::implicit( flake8_implicit_str_concat::rules::implicit(
tokens, tokens,
&settings.flake8_implicit_str_concat, &settings.flake8_implicit_str_concat,
locator,
) )
.into_iter() .into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())), .filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),

View file

@ -1,8 +1,15 @@
use crate::registry::{Linter, Rule}; use crate::registry::{Linter, Rule};
use std::fmt::Formatter;
#[derive(PartialEq, Eq, PartialOrd, Ord)] #[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct NoqaCode(&'static str, &'static str); pub struct NoqaCode(&'static str, &'static str);
impl std::fmt::Debug for NoqaCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for NoqaCode { impl std::fmt::Display for NoqaCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{}{}", self.0, self.1) write!(f, "{}{}", self.0, self.1)

View file

@ -1,8 +1,9 @@
//! Extract `# noqa` and `# isort: skip` directives from tokenized source. //! Extract `# noqa` and `# isort: skip` directives from tokenized source.
use crate::noqa::NoqaMapping;
use bitflags::bitflags; use bitflags::bitflags;
use nohash_hasher::{IntMap, IntSet}; use ruff_python_ast::source_code::{Indexer, Locator};
use rustpython_parser::ast::Location; use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult; use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok; use rustpython_parser::Tok;
@ -11,7 +12,7 @@ use crate::settings::Settings;
bitflags! { bitflags! {
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct Flags: u8 { pub struct Flags: u8 {
const NOQA = 0b0000_0001; const NOQA = 0b0000_0001;
const ISORT = 0b0000_0010; const ISORT = 0b0000_0010;
} }
} }
@ -30,27 +31,50 @@ impl Flags {
} }
} }
#[derive(Default)] #[derive(Default, Debug)]
pub struct IsortDirectives { pub struct IsortDirectives {
pub exclusions: IntSet<usize>, /// Ranges for which sorting is disabled
pub splits: Vec<usize>, pub exclusions: Vec<TextRange>,
/// Text positions at which splits should be inserted
pub splits: Vec<TextSize>,
pub skip_file: bool, pub skip_file: bool,
} }
impl IsortDirectives {
pub fn is_excluded(&self, offset: TextSize) -> bool {
for range in &self.exclusions {
if range.contains(offset) {
return true;
}
if range.start() > offset {
break;
}
}
false
}
}
pub struct Directives { pub struct Directives {
pub noqa_line_for: IntMap<usize, usize>, pub noqa_line_for: NoqaMapping,
pub isort: IsortDirectives, pub isort: IsortDirectives,
} }
pub fn extract_directives(lxr: &[LexResult], flags: Flags) -> Directives { pub fn extract_directives(
lxr: &[LexResult],
flags: Flags,
locator: &Locator,
indexer: &Indexer,
) -> Directives {
Directives { Directives {
noqa_line_for: if flags.contains(Flags::NOQA) { noqa_line_for: if flags.contains(Flags::NOQA) {
extract_noqa_line_for(lxr) extract_noqa_line_for(lxr, locator, indexer)
} else { } else {
IntMap::default() NoqaMapping::default()
}, },
isort: if flags.contains(Flags::ISORT) { isort: if flags.contains(Flags::ISORT) {
extract_isort_directives(lxr) extract_isort_directives(lxr, locator)
} else { } else {
IsortDirectives::default() IsortDirectives::default()
}, },
@ -58,48 +82,92 @@ pub fn extract_directives(lxr: &[LexResult], flags: Flags) -> Directives {
} }
/// Extract a mapping from logical line to noqa line. /// Extract a mapping from logical line to noqa line.
pub fn extract_noqa_line_for(lxr: &[LexResult]) -> IntMap<usize, usize> { pub fn extract_noqa_line_for(
let mut noqa_line_for: IntMap<usize, usize> = IntMap::default(); lxr: &[LexResult],
let mut prev_non_newline: Option<(&Location, &Tok, &Location)> = None; locator: &Locator,
for (start, tok, end) in lxr.iter().flatten() { indexer: &Indexer,
if matches!(tok, Tok::EndOfFile) { ) -> NoqaMapping {
break; let mut string_mappings = Vec::new();
}
// For multi-line strings, we expect `noqa` directives on the last line of the for (tok, range) in lxr.iter().flatten() {
// string. match tok {
if matches!(tok, Tok::String { .. }) && end.row() > start.row() { Tok::EndOfFile => {
for i in start.row()..end.row() { break;
noqa_line_for.insert(i, end.row());
} }
}
// For continuations, we expect `noqa` directives on the last line of the // For multi-line strings, we expect `noqa` directives on the last line of the
// continuation. // string.
if matches!( Tok::String {
tok, triple_quoted: true,
Tok::Newline | Tok::NonLogicalNewline | Tok::Comment(..) ..
) { } => {
if let Some((.., end)) = prev_non_newline { if locator.contains_line_break(*range) {
for i in end.row()..start.row() { string_mappings.push(*range);
noqa_line_for.insert(i, start.row());
} }
} }
prev_non_newline = None;
} else if prev_non_newline.is_none() { _ => {}
prev_non_newline = Some((start, tok, end));
} }
} }
noqa_line_for
let mut continuation_mappings = Vec::new();
// For continuations, we expect `noqa` directives on the last line of the
// continuation.
let mut last: Option<TextRange> = None;
for continuation_line in indexer.continuation_line_starts() {
let line_end = locator.full_line_end(*continuation_line);
if let Some(last_range) = last.take() {
if last_range.end() == *continuation_line {
last = Some(TextRange::new(last_range.start(), line_end));
continue;
}
// new continuation
continuation_mappings.push(last_range);
}
last = Some(TextRange::new(*continuation_line, line_end));
}
if let Some(last_range) = last.take() {
continuation_mappings.push(last_range);
}
// Merge the mappings in sorted order
let mut mappings =
NoqaMapping::with_capacity(continuation_mappings.len() + string_mappings.len());
let mut continuation_mappings = continuation_mappings.into_iter().peekable();
let mut string_mappings = string_mappings.into_iter().peekable();
while let (Some(continuation), Some(string)) =
(continuation_mappings.peek(), string_mappings.peek())
{
if continuation.start() <= string.start() {
mappings.push_mapping(continuation_mappings.next().unwrap());
} else {
mappings.push_mapping(string_mappings.next().unwrap());
}
}
for mapping in continuation_mappings {
mappings.push_mapping(mapping);
}
for mapping in string_mappings {
mappings.push_mapping(mapping);
}
mappings
} }
/// Extract a set of lines over which to disable isort. /// Extract a set of ranges over which to disable isort.
pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { pub fn extract_isort_directives(lxr: &[LexResult], locator: &Locator) -> IsortDirectives {
let mut exclusions: IntSet<usize> = IntSet::default(); let mut exclusions: Vec<TextRange> = Vec::default();
let mut splits: Vec<usize> = Vec::default(); let mut splits: Vec<TextSize> = Vec::default();
let mut off: Option<Location> = None; let mut off: Option<TextSize> = None;
let mut last: Option<Location> = None;
for &(start, ref tok, end) in lxr.iter().flatten() {
last = Some(end);
for &(ref tok, range) in lxr.iter().flatten() {
let Tok::Comment(comment_text) = tok else { let Tok::Comment(comment_text) = tok else {
continue; continue;
}; };
@ -109,7 +177,7 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
// required to include the space, and must appear on their own lines. // required to include the space, and must appear on their own lines.
let comment_text = comment_text.trim_end(); let comment_text = comment_text.trim_end();
if matches!(comment_text, "# isort: split" | "# ruff: isort: split") { if matches!(comment_text, "# isort: split" | "# ruff: isort: split") {
splits.push(start.row()); splits.push(range.start());
} else if matches!( } else if matches!(
comment_text, comment_text,
"# isort: skip_file" "# isort: skip_file"
@ -123,30 +191,25 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
}; };
} else if off.is_some() { } else if off.is_some() {
if comment_text == "# isort: on" || comment_text == "# ruff: isort: on" { if comment_text == "# isort: on" || comment_text == "# ruff: isort: on" {
if let Some(start) = off { if let Some(exclusion_start) = off {
for row in start.row() + 1..=end.row() { exclusions.push(TextRange::new(exclusion_start, range.start()));
exclusions.insert(row);
}
} }
off = None; off = None;
} }
} else { } else {
if comment_text.contains("isort: skip") || comment_text.contains("isort:skip") { if comment_text.contains("isort: skip") || comment_text.contains("isort:skip") {
exclusions.insert(start.row()); exclusions.push(locator.line_range(range.start()));
} else if comment_text == "# isort: off" || comment_text == "# ruff: isort: off" { } else if comment_text == "# isort: off" || comment_text == "# ruff: isort: off" {
off = Some(start); off = Some(range.start());
} }
} }
} }
if let Some(start) = off { if let Some(start) = off {
// Enforce unterminated `isort: off`. // Enforce unterminated `isort: off`.
if let Some(end) = last { exclusions.push(TextRange::new(start, locator.contents().text_len()));
for row in start.row() + 1..=end.row() {
exclusions.insert(row);
}
}
} }
IsortDirectives { IsortDirectives {
exclusions, exclusions,
splits, splits,
@ -156,120 +219,98 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use nohash_hasher::{IntMap, IntSet}; use ruff_python_ast::source_code::{Indexer, Locator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult; use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode}; use rustpython_parser::{lexer, Mode};
use crate::directives::{extract_isort_directives, extract_noqa_line_for}; use crate::directives::{extract_isort_directives, extract_noqa_line_for};
use crate::noqa::NoqaMapping;
fn noqa_mappings(contents: &str) -> NoqaMapping {
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
extract_noqa_line_for(&lxr, &locator, &indexer)
}
#[test] #[test]
fn noqa_extraction() { fn noqa_extraction() {
let lxr: Vec<LexResult> = lexer::lex( let contents = "x = 1
"x = 1 y = 2 \
y = 2 + 1
z = x + 1", z = x + 1";
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::lex( assert_eq!(noqa_mappings(contents), NoqaMapping::default());
"
let contents = "
x = 1 x = 1
y = 2 y = 2
z = x + 1", z = x + 1";
Mode::Module, assert_eq!(noqa_mappings(contents), NoqaMapping::default());
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::lex( let contents = "x = 1
"x = 1
y = 2 y = 2
z = x + 1 z = x + 1
", ";
Mode::Module, assert_eq!(noqa_mappings(contents), NoqaMapping::default());
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::lex( let contents = "x = 1
"x = 1
y = 2 y = 2
z = x + 1 z = x + 1
", ";
Mode::Module, assert_eq!(noqa_mappings(contents), NoqaMapping::default());
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::lex( let contents = "x = '''abc
"x = '''abc
def def
ghi ghi
''' '''
y = 2 y = 2
z = x + 1", z = x + 1";
Mode::Module,
)
.collect();
assert_eq!( assert_eq!(
extract_noqa_line_for(&lxr), noqa_mappings(contents),
IntMap::from_iter([(1, 4), (2, 4), (3, 4)]) NoqaMapping::from_iter([TextRange::new(TextSize::from(4), TextSize::from(22)),])
); );
let lxr: Vec<LexResult> = lexer::lex( let contents = "x = 1
"x = 1
y = '''abc y = '''abc
def def
ghi ghi
''' '''
z = 2", z = 2";
Mode::Module,
)
.collect();
assert_eq!( assert_eq!(
extract_noqa_line_for(&lxr), noqa_mappings(contents),
IntMap::from_iter([(2, 5), (3, 5), (4, 5)]) NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
); );
let lxr: Vec<LexResult> = lexer::lex( let contents = "x = 1
"x = 1
y = '''abc y = '''abc
def def
ghi ghi
'''", '''";
Mode::Module,
)
.collect();
assert_eq!( assert_eq!(
extract_noqa_line_for(&lxr), noqa_mappings(contents),
IntMap::from_iter([(2, 5), (3, 5), (4, 5)]) NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
); );
let lxr: Vec<LexResult> = lexer::lex( let contents = r#"x = \
r#"x = \ 1"#;
1"#, assert_eq!(
Mode::Module, noqa_mappings(contents),
) NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(6))])
.collect(); );
assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)]));
let lxr: Vec<LexResult> = lexer::lex( let contents = r#"from foo import \
r#"from foo import \
bar as baz, \ bar as baz, \
qux as quux"#, qux as quux"#;
Mode::Module,
)
.collect();
assert_eq!( assert_eq!(
extract_noqa_line_for(&lxr), noqa_mappings(contents),
IntMap::from_iter([(1, 3), (2, 3)]) NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(36))])
); );
let lxr: Vec<LexResult> = lexer::lex( let contents = r#"
r#"
# Foo # Foo
from foo import \ from foo import \
bar as baz, \ bar as baz, \
@ -277,13 +318,14 @@ from foo import \
x = \ x = \
1 1
y = \ y = \
2"#, 2"#;
Mode::Module,
)
.collect();
assert_eq!( assert_eq!(
extract_noqa_line_for(&lxr), noqa_mappings(contents),
IntMap::from_iter([(3, 5), (4, 5), (6, 7), (8, 9)]) NoqaMapping::from_iter([
TextRange::new(TextSize::from(7), TextSize::from(43)),
TextRange::new(TextSize::from(65), TextSize::from(71)),
TextRange::new(TextSize::from(77), TextSize::from(83)),
])
); );
} }
@ -293,7 +335,10 @@ y = \
y = 2 y = 2
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off let contents = "# isort: off
x = 1 x = 1
@ -302,8 +347,8 @@ y = 2
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!( assert_eq!(
extract_isort_directives(&lxr).exclusions, extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
IntSet::from_iter([2, 3, 4]) Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(25))])
); );
let contents = "# isort: off let contents = "# isort: off
@ -315,8 +360,8 @@ z = x + 1
# isort: on"; # isort: on";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!( assert_eq!(
extract_isort_directives(&lxr).exclusions, extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
IntSet::from_iter([2, 3, 4, 5]) Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(38))])
); );
let contents = "# isort: off let contents = "# isort: off
@ -325,8 +370,8 @@ y = 2
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!( assert_eq!(
extract_isort_directives(&lxr).exclusions, extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
IntSet::from_iter([2, 3, 4]) Vec::from_iter([TextRange::at(TextSize::from(0), contents.text_len())])
); );
let contents = "# isort: skip_file let contents = "# isort: skip_file
@ -334,7 +379,10 @@ x = 1
y = 2 y = 2
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off let contents = "# isort: off
x = 1 x = 1
@ -343,7 +391,10 @@ y = 2
# isort: skip_file # isort: skip_file
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
} }
#[test] #[test]
@ -352,19 +403,28 @@ z = x + 1";
y = 2 y = 2
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, Vec::<usize>::new()); assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
Vec::new()
);
let contents = "x = 1 let contents = "x = 1
y = 2 y = 2
# isort: split # isort: split
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![3]); assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(12)]
);
let contents = "x = 1 let contents = "x = 1
y = 2 # isort: split y = 2 # isort: split
z = x + 1"; z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect(); let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![2]); assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(13)]
);
} }
} }

View file

@ -1,8 +1,10 @@
//! Doc line extraction. In this context, a doc line is a line consisting of a //! Doc line extraction. In this context, a doc line is a line consisting of a
//! standalone comment or a constant string statement. //! standalone comment or a constant string statement.
use ruff_text_size::{TextRange, TextSize};
use std::iter::FusedIterator; use std::iter::FusedIterator;
use ruff_python_ast::source_code::Locator;
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite}; use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
use rustpython_parser::lexer::LexResult; use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok; use rustpython_parser::Tok;
@ -11,46 +13,56 @@ use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor; use ruff_python_ast::visitor::Visitor;
/// Extract doc lines (standalone comments) from a token sequence. /// Extract doc lines (standalone comments) from a token sequence.
pub fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines { pub fn doc_lines_from_tokens<'a>(lxr: &'a [LexResult], locator: &'a Locator<'a>) -> DocLines<'a> {
DocLines::new(lxr) DocLines::new(lxr, locator)
} }
pub struct DocLines<'a> { pub struct DocLines<'a> {
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>, inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
prev: Option<usize>, locator: &'a Locator<'a>,
prev: TextSize,
} }
impl<'a> DocLines<'a> { impl<'a> DocLines<'a> {
fn new(lxr: &'a [LexResult]) -> Self { fn new(lxr: &'a [LexResult], locator: &'a Locator) -> Self {
Self { Self {
inner: lxr.iter().flatten(), inner: lxr.iter().flatten(),
prev: None, locator,
prev: TextSize::default(),
} }
} }
} }
impl Iterator for DocLines<'_> { impl Iterator for DocLines<'_> {
type Item = usize; type Item = TextSize;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
let mut at_start_of_line = true;
loop { loop {
let (start, tok, end) = self.inner.next()?; let (tok, range) = self.inner.next()?;
match tok { match tok {
Tok::Indent | Tok::Dedent | Tok::Newline => continue,
Tok::Comment(..) => { Tok::Comment(..) => {
if let Some(prev) = self.prev { if at_start_of_line
if start.row() > prev { || self
break Some(start.row()); .locator
} .contains_line_break(TextRange::new(self.prev, range.start()))
} else { {
break Some(start.row()); break Some(range.start());
} }
} }
_ => {} Tok::Newline => {
at_start_of_line = true;
}
Tok::Indent | Tok::Dedent => {
// ignore
}
_ => {
at_start_of_line = false;
}
} }
self.prev = Some(end.row()); self.prev = range.end();
} }
} }
} }
@ -59,7 +71,7 @@ impl FusedIterator for DocLines<'_> {}
#[derive(Default)] #[derive(Default)]
struct StringLinesVisitor { struct StringLinesVisitor {
string_lines: Vec<usize>, string_lines: Vec<TextSize>,
} }
impl Visitor<'_> for StringLinesVisitor { impl Visitor<'_> for StringLinesVisitor {
@ -70,16 +82,15 @@ impl Visitor<'_> for StringLinesVisitor {
.. ..
} = &value.node } = &value.node
{ {
self.string_lines self.string_lines.push(value.start());
.extend(value.location.row()..=value.end_location.unwrap().row());
} }
} }
visitor::walk_stmt(self, stmt); visitor::walk_stmt(self, stmt);
} }
} }
/// Extract doc lines (standalone strings) from an AST. /// Extract doc lines (standalone strings) start positions from an AST.
pub fn doc_lines_from_ast(python_ast: &Suite) -> Vec<usize> { pub fn doc_lines_from_ast(python_ast: &Suite) -> Vec<TextSize> {
let mut visitor = StringLinesVisitor::default(); let mut visitor = StringLinesVisitor::default();
visitor.visit_body(python_ast); visitor.visit_body(python_ast);
visitor.string_lines visitor.string_lines

View file

@ -1,4 +1,7 @@
use ruff_text_size::{TextRange, TextSize};
use rustpython_parser::ast::{Expr, Stmt}; use rustpython_parser::ast::{Expr, Stmt};
use std::fmt::{Debug, Formatter};
use std::ops::Deref;
use ruff_python_semantic::analyze::visibility::{ use ruff_python_semantic::analyze::visibility::{
class_visibility, function_visibility, method_visibility, Modifier, Visibility, VisibleScope, class_visibility, function_visibility, method_visibility, Modifier, Visibility, VisibleScope,
@ -25,11 +28,78 @@ pub struct Definition<'a> {
pub struct Docstring<'a> { pub struct Docstring<'a> {
pub kind: DefinitionKind<'a>, pub kind: DefinitionKind<'a>,
pub expr: &'a Expr, pub expr: &'a Expr,
/// The content of the docstring, including the leading and trailing quotes.
pub contents: &'a str, pub contents: &'a str,
pub body: &'a str,
/// The range of the docstring body (without the quotes). The range is relative to [`Self::contents`].
pub body_range: TextRange,
pub indentation: &'a str, pub indentation: &'a str,
} }
impl<'a> Docstring<'a> {
pub fn body(&self) -> DocstringBody {
DocstringBody { docstring: self }
}
pub const fn start(&self) -> TextSize {
self.expr.start()
}
pub const fn end(&self) -> TextSize {
self.expr.end()
}
pub const fn range(&self) -> TextRange {
self.expr.range()
}
pub fn leading_quote(&self) -> &'a str {
&self.contents[TextRange::up_to(self.body_range.start())]
}
}
#[derive(Copy, Clone)]
pub struct DocstringBody<'a> {
docstring: &'a Docstring<'a>,
}
impl<'a> DocstringBody<'a> {
#[inline]
pub fn start(self) -> TextSize {
self.range().start()
}
#[inline]
pub fn end(self) -> TextSize {
self.range().end()
}
pub fn range(self) -> TextRange {
self.docstring.body_range + self.docstring.start()
}
pub fn as_str(self) -> &'a str {
&self.docstring.contents[self.docstring.body_range]
}
}
impl Deref for DocstringBody<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl Debug for DocstringBody<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DocstringBody")
.field("text", &self.as_str())
.field("range", &self.range())
.finish()
}
}
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub enum Documentable { pub enum Documentable {
Class, Class,

View file

@ -1,5 +1,10 @@
use ruff_python_ast::newlines::{StrExt, UniversalNewlineIterator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use std::fmt::{Debug, Formatter};
use std::iter::FusedIterator;
use strum_macros::EnumIter; use strum_macros::EnumIter;
use crate::docstrings::definition::{Docstring, DocstringBody};
use crate::docstrings::styles::SectionStyle; use crate::docstrings::styles::SectionStyle;
use ruff_python_ast::whitespace; use ruff_python_ast::whitespace;
@ -125,17 +130,259 @@ impl SectionKind {
} }
} }
pub(crate) struct SectionContexts<'a> {
contexts: Vec<SectionContextData>,
docstring: &'a Docstring<'a>,
}
impl<'a> SectionContexts<'a> {
/// Extract all `SectionContext` values from a docstring.
pub fn from_docstring(docstring: &'a Docstring<'a>, style: SectionStyle) -> Self {
let contents = docstring.body();
let mut contexts = Vec::new();
let mut last: Option<SectionContextData> = None;
let mut previous_line = None;
for line in contents.universal_newlines() {
if previous_line.is_none() {
// skip the first line
previous_line = Some(line.as_str());
continue;
}
if let Some(section_kind) = suspected_as_section(&line, style) {
let indent = whitespace::leading_space(&line);
let section_name = whitespace::leading_words(&line);
let section_name_range = TextRange::at(indent.text_len(), section_name.text_len());
if is_docstring_section(
&line,
section_name_range,
previous_line.unwrap_or_default(),
) {
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.range.start(), line.start());
contexts.push(last);
}
last = Some(SectionContextData {
kind: section_kind,
name_range: section_name_range + line.start(),
range: TextRange::empty(line.start()),
summary_full_end: line.full_end(),
});
}
}
previous_line = Some(line.as_str());
}
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.range.start(), contents.text_len());
contexts.push(last);
}
Self {
contexts,
docstring,
}
}
pub fn len(&self) -> usize {
self.contexts.len()
}
pub fn iter(&self) -> SectionContextsIter {
SectionContextsIter {
docstring_body: self.docstring.body(),
inner: self.contexts.iter(),
}
}
}
impl<'a> IntoIterator for &'a SectionContexts<'a> {
type Item = SectionContext<'a>;
type IntoIter = SectionContextsIter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl Debug for SectionContexts<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
pub struct SectionContextsIter<'a> {
docstring_body: DocstringBody<'a>,
inner: std::slice::Iter<'a, SectionContextData>,
}
impl<'a> Iterator for SectionContextsIter<'a> {
type Item = SectionContext<'a>;
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner.next()?;
Some(SectionContext {
data: next,
docstring_body: self.docstring_body,
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a> DoubleEndedIterator for SectionContextsIter<'a> {
fn next_back(&mut self) -> Option<Self::Item> {
let back = self.inner.next_back()?;
Some(SectionContext {
data: back,
docstring_body: self.docstring_body,
})
}
}
impl FusedIterator for SectionContextsIter<'_> {}
impl ExactSizeIterator for SectionContextsIter<'_> {}
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct SectionContext<'a> { struct SectionContextData {
/// The "kind" of the section, e.g. "SectionKind::Args" or "SectionKind::Returns". kind: SectionKind,
pub(crate) kind: SectionKind,
/// Range of the section name, relative to the [`Docstring::body`]
name_range: TextRange,
/// Range from the start to the end of the section, relative to the [`Docstring::body`]
range: TextRange,
/// End of the summary, relative to the [`Docstring::body`]
summary_full_end: TextSize,
}
pub struct SectionContext<'a> {
data: &'a SectionContextData,
docstring_body: DocstringBody<'a>,
}
impl<'a> SectionContext<'a> {
pub fn is_last(&self) -> bool {
self.range().end() == self.docstring_body.end()
}
/// The `kind` of the section, e.g. [`SectionKind::Args`] or [`SectionKind::Returns`].
pub const fn kind(&self) -> SectionKind {
self.data.kind
}
/// The name of the section as it appears in the docstring, e.g. "Args" or "Returns". /// The name of the section as it appears in the docstring, e.g. "Args" or "Returns".
pub(crate) section_name: &'a str, pub fn section_name(&self) -> &'a str {
pub(crate) previous_line: &'a str, &self.docstring_body.as_str()[self.data.name_range]
pub(crate) line: &'a str, }
pub(crate) following_lines: &'a [&'a str],
pub(crate) is_last_section: bool, /// Returns the rest of the summary line after the section name.
pub(crate) original_index: usize, pub fn summary_after_section_name(&self) -> &'a str {
&self.summary_line()[usize::from(self.data.name_range.end() - self.data.range.start())..]
}
fn offset(&self) -> TextSize {
self.docstring_body.start()
}
/// The absolute range of the section name
pub fn section_name_range(&self) -> TextRange {
self.data.name_range + self.offset()
}
/// Summary range relative to the start of the document. Includes the trailing newline.
pub fn summary_full_range(&self) -> TextRange {
self.summary_full_range_relative() + self.offset()
}
/// The absolute range of the summary line, excluding any trailing newline character.
pub fn summary_range(&self) -> TextRange {
TextRange::at(self.range().start(), self.summary_line().text_len())
}
/// Range of the summary line relative to [`Docstring::body`], including the trailing newline character.
fn summary_full_range_relative(&self) -> TextRange {
TextRange::new(self.range_relative().start(), self.data.summary_full_end)
}
/// Returns the range of this section relative to [`Docstring::body`]
const fn range_relative(&self) -> TextRange {
self.data.range
}
/// The absolute range of the full-section.
pub fn range(&self) -> TextRange {
self.range_relative() + self.offset()
}
/// Summary line without the trailing newline characters
pub fn summary_line(&self) -> &'a str {
let full_summary = &self.docstring_body.as_str()[self.summary_full_range_relative()];
let mut bytes = full_summary.bytes().rev();
let newline_width = match bytes.next() {
Some(b'\n') => {
if bytes.next() == Some(b'\r') {
2
} else {
1
}
}
Some(b'\r') => 1,
_ => 0,
};
&full_summary[..full_summary.len() - newline_width]
}
/// Returns the text of the last line of the previous section or an empty string if it is the first section.
pub fn previous_line(&self) -> Option<&'a str> {
let previous =
&self.docstring_body.as_str()[TextRange::up_to(self.range_relative().start())];
previous.universal_newlines().last().map(|l| l.as_str())
}
/// Returns the lines belonging to this section after the summary line.
pub fn following_lines(&self) -> UniversalNewlineIterator<'a> {
let lines = self.following_lines_str();
UniversalNewlineIterator::with_offset(lines, self.offset() + self.data.summary_full_end)
}
fn following_lines_str(&self) -> &'a str {
&self.docstring_body.as_str()[self.following_range_relative()]
}
/// Returns the range to the following lines relative to [`Docstring::body`].
const fn following_range_relative(&self) -> TextRange {
TextRange::new(self.data.summary_full_end, self.range_relative().end())
}
/// Returns the absolute range of the following lines.
pub fn following_range(&self) -> TextRange {
self.following_range_relative() + self.offset()
}
}
impl Debug for SectionContext<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SectionContext")
.field("kind", &self.kind())
.field("section_name", &self.section_name())
.field("summary_line", &self.summary_line())
.field("following_lines", &&self.following_lines_str())
.finish()
}
} }
fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind> { fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind> {
@ -148,20 +395,15 @@ fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind>
} }
/// Check if the suspected context is really a section header. /// Check if the suspected context is really a section header.
fn is_docstring_section(context: &SectionContext) -> bool { fn is_docstring_section(line: &str, section_name_range: TextRange, previous_lines: &str) -> bool {
let section_name_suffix = context let section_name_suffix = line[usize::from(section_name_range.end())..].trim();
.line
.trim()
.strip_prefix(context.section_name)
.unwrap()
.trim();
let this_looks_like_a_section_name = let this_looks_like_a_section_name =
section_name_suffix == ":" || section_name_suffix.is_empty(); section_name_suffix == ":" || section_name_suffix.is_empty();
if !this_looks_like_a_section_name { if !this_looks_like_a_section_name {
return false; return false;
} }
let prev_line = context.previous_line.trim(); let prev_line = previous_lines.trim();
let prev_line_ends_with_punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')'] let prev_line_ends_with_punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
.into_iter() .into_iter()
.any(|char| prev_line.ends_with(char)); .any(|char| prev_line.ends_with(char));
@ -173,50 +415,3 @@ fn is_docstring_section(context: &SectionContext) -> bool {
true true
} }
/// Extract all `SectionContext` values from a docstring.
pub(crate) fn section_contexts<'a>(
lines: &'a [&'a str],
style: SectionStyle,
) -> Vec<SectionContext<'a>> {
let mut contexts = vec![];
for (kind, lineno) in lines
.iter()
.enumerate()
.skip(1)
.filter_map(|(lineno, line)| suspected_as_section(line, style).map(|kind| (kind, lineno)))
{
let context = SectionContext {
kind,
section_name: whitespace::leading_words(lines[lineno]),
previous_line: lines[lineno - 1],
line: lines[lineno],
following_lines: &lines[lineno + 1..],
original_index: lineno,
is_last_section: false,
};
if is_docstring_section(&context) {
contexts.push(context);
}
}
let mut truncated_contexts = Vec::with_capacity(contexts.len());
let mut end: Option<usize> = None;
for context in contexts.into_iter().rev() {
let next_end = context.original_index;
truncated_contexts.push(SectionContext {
kind: context.kind,
section_name: context.section_name,
previous_line: context.previous_line,
line: context.line,
following_lines: end.map_or(context.following_lines, |end| {
&lines[context.original_index + 1..end]
}),
original_index: context.original_index,
is_last_section: end.is_none(),
});
end = Some(next_end);
}
truncated_contexts.reverse();
truncated_contexts
}

View file

@ -2,8 +2,9 @@
use anyhow::Result; use anyhow::Result;
use libcst_native::{Codegen, CodegenState, ImportAlias, Name, NameOrAttribute}; use libcst_native::{Codegen, CodegenState, ImportAlias, Name, NameOrAttribute};
use ruff_text_size::TextSize;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use rustpython_parser::ast::{Location, Stmt, StmtKind, Suite}; use rustpython_parser::ast::{Stmt, StmtKind, Suite};
use rustpython_parser::{lexer, Mode, Tok}; use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit; use ruff_diagnostics::Edit;
@ -95,7 +96,7 @@ impl<'a> Importer<'a> {
/// Add the given member to an existing `StmtKind::ImportFrom` statement. /// Add the given member to an existing `StmtKind::ImportFrom` statement.
pub fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> { pub fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
let mut tree = match_module(self.locator.slice(stmt))?; let mut tree = match_module(self.locator.slice(stmt.range()))?;
let import_from = match_import_from(&mut tree)?; let import_from = match_import_from(&mut tree)?;
let aliases = match_aliases(import_from)?; let aliases = match_aliases(import_from)?;
aliases.push(ImportAlias { aliases.push(ImportAlias {
@ -113,11 +114,7 @@ impl<'a> Importer<'a> {
..CodegenState::default() ..CodegenState::default()
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), stmt.range()))
state.to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
} }
} }
@ -126,13 +123,13 @@ struct Insertion {
/// The content to add before the insertion. /// The content to add before the insertion.
prefix: &'static str, prefix: &'static str,
/// The location at which to insert. /// The location at which to insert.
location: Location, location: TextSize,
/// The content to add after the insertion. /// The content to add after the insertion.
suffix: &'static str, suffix: &'static str,
} }
impl Insertion { impl Insertion {
fn new(prefix: &'static str, location: Location, suffix: &'static str) -> Self { fn new(prefix: &'static str, location: TextSize, suffix: &'static str) -> Self {
Self { Self {
prefix, prefix,
location, location,
@ -142,7 +139,7 @@ impl Insertion {
} }
/// Find the end of the last docstring. /// Find the end of the last docstring.
fn match_docstring_end(body: &[Stmt]) -> Option<Location> { fn match_docstring_end(body: &[Stmt]) -> Option<TextSize> {
let mut iter = body.iter(); let mut iter = body.iter();
let Some(mut stmt) = iter.next() else { let Some(mut stmt) = iter.next() else {
return None; return None;
@ -156,7 +153,7 @@ fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
} }
stmt = next; stmt = next;
} }
Some(stmt.end_location.unwrap()) Some(stmt.end())
} }
/// Find the location at which a "top-of-file" import should be inserted, /// Find the location at which a "top-of-file" import should be inserted,
@ -173,17 +170,17 @@ fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
/// The location returned will be the start of the `import os` statement, /// The location returned will be the start of the `import os` statement,
/// along with a trailing newline suffix. /// along with a trailing newline suffix.
fn end_of_statement_insertion(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> Insertion { fn end_of_statement_insertion(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> Insertion {
let location = stmt.end_location.unwrap(); let location = stmt.end();
let mut tokens = lexer::lex_located(locator.after(location), Mode::Module, location).flatten(); let mut tokens = lexer::lex_located(locator.after(location), Mode::Module, location).flatten();
if let Some((.., Tok::Semi, end)) = tokens.next() { if let Some((Tok::Semi, range)) = tokens.next() {
// If the first token after the docstring is a semicolon, insert after the semicolon as an // If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement; // inline statement;
Insertion::new(" ", end, ";") Insertion::new(" ", range.end(), ";")
} else { } else {
// Otherwise, insert on the next line. // Otherwise, insert on the next line.
Insertion::new( Insertion::new(
"", "",
Location::new(location.row() + 1, 0), locator.line_end(location),
stylist.line_ending().as_str(), stylist.line_ending().as_str(),
) )
} }
@ -210,22 +207,22 @@ fn top_of_file_insertion(body: &[Stmt], locator: &Locator, stylist: &Stylist) ->
let first_token = lexer::lex_located(locator.after(location), Mode::Module, location) let first_token = lexer::lex_located(locator.after(location), Mode::Module, location)
.flatten() .flatten()
.next(); .next();
if let Some((.., Tok::Semi, end)) = first_token { if let Some((Tok::Semi, range)) = first_token {
return Insertion::new(" ", end, ";"); return Insertion::new(" ", range.end(), ";");
} }
// Otherwise, advance to the next row. // Otherwise, advance to the next row.
Location::new(location.row() + 1, 0) locator.full_line_end(location)
} else { } else {
Location::default() TextSize::default()
}; };
// Skip over any comments and empty lines. // Skip over any comments and empty lines.
for (.., tok, end) in for (tok, range) in
lexer::lex_located(locator.after(location), Mode::Module, location).flatten() lexer::lex_located(locator.after(location), Mode::Module, location).flatten()
{ {
if matches!(tok, Tok::Comment(..) | Tok::Newline) { if matches!(tok, Tok::Comment(..) | Tok::Newline) {
location = Location::new(end.row() + 1, 0); location = locator.full_line_end(range.end());
} else { } else {
break; break;
} }
@ -237,8 +234,8 @@ fn top_of_file_insertion(body: &[Stmt], locator: &Locator, stylist: &Stylist) ->
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use anyhow::Result; use anyhow::Result;
use ruff_text_size::TextSize;
use rustpython_parser as parser; use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::LexResult; use rustpython_parser::lexer::LexResult;
use ruff_python_ast::source_code::{LineEnding, Locator, Stylist}; use ruff_python_ast::source_code::{LineEnding, Locator, Stylist};
@ -258,7 +255,7 @@ mod tests {
let contents = ""; let contents = "";
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(1, 0), LineEnding::default().as_str()) Insertion::new("", TextSize::from(0), LineEnding::default().as_str())
); );
let contents = r#" let contents = r#"
@ -266,7 +263,7 @@ mod tests {
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(2, 0), LineEnding::default().as_str()) Insertion::new("", TextSize::from(19), LineEnding::default().as_str())
); );
let contents = r#" let contents = r#"
@ -275,7 +272,7 @@ mod tests {
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(2, 0), "\n") Insertion::new("", TextSize::from(20), "\n")
); );
let contents = r#" let contents = r#"
@ -285,7 +282,7 @@ mod tests {
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n") Insertion::new("", TextSize::from(40), "\n")
); );
let contents = r#" let contents = r#"
@ -294,7 +291,7 @@ x = 1
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(1, 0), "\n") Insertion::new("", TextSize::from(0), "\n")
); );
let contents = r#" let contents = r#"
@ -303,7 +300,7 @@ x = 1
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(2, 0), "\n") Insertion::new("", TextSize::from(23), "\n")
); );
let contents = r#" let contents = r#"
@ -313,7 +310,7 @@ x = 1
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n") Insertion::new("", TextSize::from(43), "\n")
); );
let contents = r#" let contents = r#"
@ -323,7 +320,7 @@ x = 1
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n") Insertion::new("", TextSize::from(43), "\n")
); );
let contents = r#" let contents = r#"
@ -332,7 +329,7 @@ x = 1
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new("", Location::new(1, 0), "\n") Insertion::new("", TextSize::from(0), "\n")
); );
let contents = r#" let contents = r#"
@ -341,7 +338,7 @@ x = 1
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new(" ", Location::new(1, 20), ";") Insertion::new(" ", TextSize::from(20), ";")
); );
let contents = r#" let contents = r#"
@ -351,7 +348,7 @@ x = 1
.trim_start(); .trim_start();
assert_eq!( assert_eq!(
insert(contents)?, insert(contents)?,
Insertion::new(" ", Location::new(1, 20), ";") Insertion::new(" ", TextSize::from(20), ";")
); );
Ok(()) Ok(())

View file

@ -1,3 +1,4 @@
use ruff_text_size::TextRange;
use std::fs::File; use std::fs::File;
use std::io::{BufReader, BufWriter}; use std::io::{BufReader, BufWriter};
use std::iter; use std::iter;
@ -7,7 +8,6 @@ use serde::Serialize;
use serde_json::error::Category; use serde_json::error::Category;
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
use ruff_python_ast::types::Range;
use crate::jupyter::{CellType, JupyterNotebook, SourceValue}; use crate::jupyter::{CellType, JupyterNotebook, SourceValue};
use crate::rules::pycodestyle::rules::SyntaxError; use crate::rules::pycodestyle::rules::SyntaxError;
@ -18,7 +18,7 @@ pub const JUPYTER_NOTEBOOK_EXT: &str = "ipynb";
/// Jupyter Notebook indexing table /// Jupyter Notebook indexing table
/// ///
/// When we lint a jupyter notebook, we have to translate the row/column based on /// When we lint a jupyter notebook, we have to translate the row/column based on
/// [`crate::message::Location`] /// [`ruff_text_size::TextSize`]
/// to jupyter notebook cell/row/column. /// to jupyter notebook cell/row/column.
#[derive(Debug, Eq, PartialEq)] #[derive(Debug, Eq, PartialEq)]
pub struct JupyterIndex { pub struct JupyterIndex {
@ -46,7 +46,7 @@ impl JupyterNotebook {
IOError { IOError {
message: format!("{err}"), message: format!("{err}"),
}, },
Range::default(), TextRange::default(),
) )
})?); })?);
let notebook: JupyterNotebook = match serde_json::from_reader(reader) { let notebook: JupyterNotebook = match serde_json::from_reader(reader) {
@ -59,7 +59,7 @@ impl JupyterNotebook {
IOError { IOError {
message: format!("{err}"), message: format!("{err}"),
}, },
Range::default(), TextRange::default(),
), ),
Category::Syntax | Category::Eof => { Category::Syntax | Category::Eof => {
// Maybe someone saved the python sources (those with the `# %%` separator) // Maybe someone saved the python sources (those with the `# %%` separator)
@ -69,7 +69,7 @@ impl JupyterNotebook {
IOError { IOError {
message: format!("{err}"), message: format!("{err}"),
}, },
Range::default(), TextRange::default(),
) )
})?; })?;
// Check if tokenizing was successful and the file is non-empty // Check if tokenizing was successful and the file is non-empty
@ -84,7 +84,7 @@ impl JupyterNotebook {
but this file isn't valid JSON: {err}" but this file isn't valid JSON: {err}"
), ),
}, },
Range::default(), TextRange::default(),
) )
} else { } else {
Diagnostic::new( Diagnostic::new(
@ -95,7 +95,7 @@ impl JupyterNotebook {
but found a Python source file: {err}" but found a Python source file: {err}"
), ),
}, },
Range::default(), TextRange::default(),
) )
} }
} }
@ -108,7 +108,7 @@ impl JupyterNotebook {
"This file does not match the schema expected of Jupyter Notebooks: {err}" "This file does not match the schema expected of Jupyter Notebooks: {err}"
), ),
}, },
Range::default(), TextRange::default(),
) )
} }
} }
@ -126,7 +126,7 @@ impl JupyterNotebook {
notebook.nbformat notebook.nbformat
), ),
}, },
Range::default(), TextRange::default(),
))); )));
} }

View file

@ -6,7 +6,6 @@
//! [Ruff]: https://github.com/charliermarsh/ruff //! [Ruff]: https://github.com/charliermarsh/ruff
pub use ruff_python_ast::source_code::round_trip; pub use ruff_python_ast::source_code::round_trip;
pub use ruff_python_ast::types::Range;
pub use rule_selector::RuleSelector; pub use rule_selector::RuleSelector;
pub use rules::pycodestyle::rules::IOError; pub use rules::pycodestyle::rules::IOError;

View file

@ -23,6 +23,7 @@ use crate::checkers::physical_lines::check_physical_lines;
use crate::checkers::tokens::check_tokens; use crate::checkers::tokens::check_tokens;
use crate::directives::Directives; use crate::directives::Directives;
use crate::doc_lines::{doc_lines_from_ast, doc_lines_from_tokens}; use crate::doc_lines::{doc_lines_from_ast, doc_lines_from_tokens};
use crate::logging::DisplayParseError;
use crate::message::Message; use crate::message::Message;
use crate::noqa::add_noqa; use crate::noqa::add_noqa;
use crate::registry::{AsRule, Rule}; use crate::registry::{AsRule, Rule};
@ -68,7 +69,6 @@ pub struct FixerResult<'a> {
pub fn check_path( pub fn check_path(
path: &Path, path: &Path,
package: Option<&Path>, package: Option<&Path>,
contents: &str,
tokens: Vec<LexResult>, tokens: Vec<LexResult>,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
@ -88,7 +88,7 @@ pub fn check_path(
let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong); let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong);
let mut doc_lines = vec![]; let mut doc_lines = vec![];
if use_doc_lines { if use_doc_lines {
doc_lines.extend(doc_lines_from_tokens(&tokens)); doc_lines.extend(doc_lines_from_tokens(&tokens, locator));
} }
// Run the token-based rules. // Run the token-based rules.
@ -178,7 +178,7 @@ pub fn check_path(
// if it's disabled via any of the usual mechanisms (e.g., `noqa`, // if it's disabled via any of the usual mechanisms (e.g., `noqa`,
// `per-file-ignores`), and the easiest way to detect that suppression is // `per-file-ignores`), and the easiest way to detect that suppression is
// to see if the diagnostic persists to the end of the function. // to see if the diagnostic persists to the end of the function.
pycodestyle::rules::syntax_error(&mut diagnostics, &parse_error); pycodestyle::rules::syntax_error(&mut diagnostics, &parse_error, locator);
error = Some(parse_error); error = Some(parse_error);
} }
} }
@ -218,8 +218,8 @@ pub fn check_path(
{ {
let ignored = check_noqa( let ignored = check_noqa(
&mut diagnostics, &mut diagnostics,
contents, locator,
indexer.commented_lines(), indexer.comment_ranges(),
&directives.noqa_line_for, &directives.noqa_line_for,
settings, settings,
error.as_ref().map_or(autofix, |_| flags::Autofix::Disabled), error.as_ref().map_or(autofix, |_| flags::Autofix::Disabled),
@ -268,11 +268,15 @@ pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings
let stylist = Stylist::from_tokens(&tokens, &locator); let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code. // Extra indices from the code.
let indexer: Indexer = tokens.as_slice().into(); let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source. // Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = let directives = directives::extract_directives(
directives::extract_directives(&tokens, directives::Flags::from_settings(settings)); &tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics, ignoring any existing `noqa` directives. // Generate diagnostics, ignoring any existing `noqa` directives.
let LinterResult { let LinterResult {
@ -281,7 +285,6 @@ pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings
} = check_path( } = check_path(
path, path,
package, package,
&contents,
tokens, tokens,
&locator, &locator,
&stylist, &stylist,
@ -294,20 +297,15 @@ pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings
// Log any parse errors. // Log any parse errors.
if let Some(err) = error { if let Some(err) = error {
error!( error!("{}", DisplayParseError::new(err, locator.to_source_code()));
"{}{}{} {err:?}",
"Failed to parse ".bold(),
fs::relativize_path(path).bold(),
":".bold()
);
} }
// Add any missing `# noqa` pragmas. // Add any missing `# noqa` pragmas.
add_noqa( add_noqa(
path, path,
&diagnostics.0, &diagnostics.0,
&contents, &locator,
indexer.commented_lines(), indexer.comment_ranges(),
&directives.noqa_line_for, &directives.noqa_line_for,
stylist.line_ending(), stylist.line_ending(),
) )
@ -333,17 +331,20 @@ pub fn lint_only(
let stylist = Stylist::from_tokens(&tokens, &locator); let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code. // Extra indices from the code.
let indexer: Indexer = tokens.as_slice().into(); let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source. // Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = let directives = directives::extract_directives(
directives::extract_directives(&tokens, directives::Flags::from_settings(settings)); &tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics. // Generate diagnostics.
let result = check_path( let result = check_path(
path, path,
package, package,
contents,
tokens, tokens,
&locator, &locator,
&stylist, &stylist,
@ -356,7 +357,7 @@ pub fn lint_only(
result.map(|(diagnostics, imports)| { result.map(|(diagnostics, imports)| {
( (
diagnostics_to_messages(diagnostics, path, settings, &locator, &directives), diagnostics_to_messages(diagnostics, path, &locator, &directives),
imports, imports,
) )
}) })
@ -366,14 +367,15 @@ pub fn lint_only(
fn diagnostics_to_messages( fn diagnostics_to_messages(
diagnostics: Vec<Diagnostic>, diagnostics: Vec<Diagnostic>,
path: &Path, path: &Path,
settings: &Settings,
locator: &Locator, locator: &Locator,
directives: &Directives, directives: &Directives,
) -> Vec<Message> { ) -> Vec<Message> {
let file = once_cell::unsync::Lazy::new(|| { let file = once_cell::unsync::Lazy::new(|| {
let mut builder = SourceFileBuilder::new(&path.to_string_lossy()); let mut builder =
if settings.show_source { SourceFileBuilder::new(path.to_string_lossy().as_ref(), locator.contents());
builder.set_source_code(&locator.to_source_code());
if let Some(line_index) = locator.line_index() {
builder.set_line_index(line_index.clone());
} }
builder.finish() builder.finish()
@ -382,9 +384,8 @@ fn diagnostics_to_messages(
diagnostics diagnostics
.into_iter() .into_iter()
.map(|diagnostic| { .map(|diagnostic| {
let lineno = diagnostic.location.row(); let noqa_offset = directives.noqa_line_for.resolve(diagnostic.start());
let noqa_row = *directives.noqa_line_for.get(&lineno).unwrap_or(&lineno); Message::from_diagnostic(diagnostic, file.deref().clone(), noqa_offset)
Message::from_diagnostic(diagnostic, file.deref().clone(), noqa_row)
}) })
.collect() .collect()
} }
@ -421,17 +422,20 @@ pub fn lint_fix<'a>(
let stylist = Stylist::from_tokens(&tokens, &locator); let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code. // Extra indices from the code.
let indexer: Indexer = tokens.as_slice().into(); let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source. // Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = let directives = directives::extract_directives(
directives::extract_directives(&tokens, directives::Flags::from_settings(settings)); &tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics. // Generate diagnostics.
let result = check_path( let result = check_path(
path, path,
package, package,
&transformed,
tokens, tokens,
&locator, &locator,
&stylist, &stylist,
@ -513,7 +517,7 @@ This indicates a bug in `{}`. If you could open an issue at:
return Ok(FixerResult { return Ok(FixerResult {
result: result.map(|(diagnostics, imports)| { result: result.map(|(diagnostics, imports)| {
( (
diagnostics_to_messages(diagnostics, path, settings, &locator, &directives), diagnostics_to_messages(diagnostics, path, &locator, &directives),
imports, imports,
) )
}), }),

View file

@ -1,10 +1,15 @@
use std::fmt::{Display, Formatter};
use std::path::Path;
use std::sync::Mutex; use std::sync::Mutex;
use crate::fs;
use anyhow::Result; use anyhow::Result;
use colored::Colorize; use colored::Colorize;
use fern; use fern;
use log::Level; use log::Level;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use ruff_python_ast::source_code::SourceCode;
use rustpython_parser::ParseError;
pub(crate) static WARNINGS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default); pub(crate) static WARNINGS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default);
@ -42,13 +47,13 @@ macro_rules! warn_user_once {
#[macro_export] #[macro_export]
macro_rules! warn_user { macro_rules! warn_user {
($($arg:tt)*) => { ($($arg:tt)*) => {{
use colored::Colorize; use colored::Colorize;
use log::warn; use log::warn;
let message = format!("{}", format_args!($($arg)*)); let message = format!("{}", format_args!($($arg)*));
warn!("{}", message.bold()); warn!("{}", message.bold());
}; }};
} }
#[macro_export] #[macro_export]
@ -127,6 +132,34 @@ pub fn set_up_logging(level: &LogLevel) -> Result<()> {
Ok(()) Ok(())
} }
pub struct DisplayParseError<'a> {
error: ParseError,
source_code: SourceCode<'a, 'a>,
}
impl<'a> DisplayParseError<'a> {
pub fn new(error: ParseError, source_code: SourceCode<'a, 'a>) -> Self {
Self { error, source_code }
}
}
impl Display for DisplayParseError<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let source_location = self.source_code.source_location(self.error.location);
write!(
f,
"{header} {path}{colon}{row}{colon}{column}{colon} {inner}",
header = "Failed to parse ".bold(),
path = fs::relativize_path(Path::new(&self.error.source_path)).bold(),
row = source_location.row,
column = source_location.column,
colon = ":".cyan(),
inner = &self.error.error
)
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::logging::LogLevel; use crate::logging::LogLevel;

View file

@ -1,5 +1,6 @@
use crate::message::{Emitter, EmitterContext, Message}; use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule; use crate::registry::AsRule;
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use std::io::Write; use std::io::Write;
/// Generate error logging commands for Azure Pipelines format. /// Generate error logging commands for Azure Pipelines format.
@ -15,12 +16,15 @@ impl Emitter for AzureEmitter {
context: &EmitterContext, context: &EmitterContext,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
for message in messages { for message in messages {
let (line, col) = if context.is_jupyter_notebook(message.filename()) { let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats, // We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback // so we show one that's clearly a fallback
(1, 0) SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
} else { } else {
(message.location.row(), message.location.column()) message.compute_start_location()
}; };
writeln!( writeln!(
@ -28,6 +32,8 @@ impl Emitter for AzureEmitter {
"##vso[task.logissue type=error\ "##vso[task.logissue type=error\
;sourcepath={filename};linenumber={line};columnnumber={col};code={code};]{body}", ;sourcepath={filename};linenumber={line};columnnumber={col};code={code};]{body}",
filename = message.filename(), filename = message.filename(),
line = location.row,
col = location.column,
code = message.kind.rule().noqa_code(), code = message.kind.rule().noqa_code(),
body = message.kind.body, body = message.kind.body,
)?; )?;

View file

@ -1,8 +1,7 @@
use crate::message::Message; use crate::message::Message;
use colored::{Color, ColoredString, Colorize, Styles}; use colored::{Color, ColoredString, Colorize, Styles};
use ruff_diagnostics::Fix; use ruff_diagnostics::Fix;
use ruff_python_ast::source_code::{OneIndexed, SourceCode}; use ruff_python_ast::source_code::{OneIndexed, SourceFile};
use ruff_python_ast::types::Range;
use ruff_text_size::{TextRange, TextSize}; use ruff_text_size::{TextRange, TextSize};
use similar::{ChangeTag, TextDiff}; use similar::{ChangeTag, TextDiff};
use std::fmt::{Display, Formatter}; use std::fmt::{Display, Formatter};
@ -18,38 +17,39 @@ use std::num::NonZeroUsize;
/// * Compute the diff from the [`Edit`] because diff calculation is expensive. /// * Compute the diff from the [`Edit`] because diff calculation is expensive.
pub(super) struct Diff<'a> { pub(super) struct Diff<'a> {
fix: &'a Fix, fix: &'a Fix,
source_code: SourceCode<'a, 'a>, source_code: &'a SourceFile,
} }
impl<'a> Diff<'a> { impl<'a> Diff<'a> {
pub fn from_message(message: &'a Message) -> Option<Diff> { pub fn from_message(message: &'a Message) -> Option<Diff> {
match message.file.source_code() { if message.fix.is_empty() {
Some(source_code) if !message.fix.is_empty() => Some(Diff { None
source_code, } else {
Some(Diff {
source_code: &message.file,
fix: &message.fix, fix: &message.fix,
}), })
_ => None,
} }
} }
} }
impl Display for Diff<'_> { impl Display for Diff<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut output = String::with_capacity(self.source_code.text().len()); let mut output = String::with_capacity(self.source_code.source_text().len());
let mut last_end = TextSize::default(); let mut last_end = TextSize::default();
for edit in self.fix.edits() { for edit in self.fix.edits() {
let edit_range = self output.push_str(
.source_code self.source_code
.text_range(Range::new(edit.location(), edit.end_location())); .slice(TextRange::new(last_end, edit.start())),
output.push_str(&self.source_code.text()[TextRange::new(last_end, edit_range.start())]); );
output.push_str(edit.content().unwrap_or_default()); output.push_str(edit.content().unwrap_or_default());
last_end = edit_range.end(); last_end = edit.end();
} }
output.push_str(&self.source_code.text()[usize::from(last_end)..]); output.push_str(&self.source_code.source_text()[usize::from(last_end)..]);
let diff = TextDiff::from_lines(self.source_code.text(), &output); let diff = TextDiff::from_lines(self.source_code.source_text(), &output);
writeln!(f, "{}", " Suggested fix".blue())?; writeln!(f, "{}", " Suggested fix".blue())?;

View file

@ -1,6 +1,7 @@
use crate::fs::relativize_path; use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext, Message}; use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule; use crate::registry::AsRule;
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use std::io::Write; use std::io::Write;
/// Generate error workflow command in GitHub Actions format. /// Generate error workflow command in GitHub Actions format.
@ -16,30 +17,38 @@ impl Emitter for GithubEmitter {
context: &EmitterContext, context: &EmitterContext,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
for message in messages { for message in messages {
let (row, column) = if context.is_jupyter_notebook(message.filename()) { let source_location = message.compute_start_location();
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats, // We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback // so we show one that's clearly a fallback
(1, 0) SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
} else { } else {
(message.location.row(), message.location.column()) source_location.clone()
}; };
let end_location = message.compute_end_location();
write!( write!(
writer, writer,
"::error title=Ruff \ "::error title=Ruff \
({code}),file={file},line={row},col={column},endLine={end_row},endColumn={end_column}::", ({code}),file={file},line={row},col={column},endLine={end_row},endColumn={end_column}::",
code = message.kind.rule().noqa_code(), code = message.kind.rule().noqa_code(),
file = message.filename(), file = message.filename(),
row = message.location.row(), row = source_location.row,
column = message.location.column(), column = source_location.column,
end_row = message.end_location.row(), end_row = end_location.row,
end_column = message.end_location.column(), end_column = end_location.column,
)?; )?;
writeln!( writeln!(
writer, writer,
"{path}:{row}:{column}: {code} {body}", "{path}:{row}:{column}: {code} {body}",
path = relativize_path(message.filename()), path = relativize_path(message.filename()),
row = location.row,
column = location.column,
code = message.kind.rule().noqa_code(), code = message.kind.rule().noqa_code(),
body = message.kind.body, body = message.kind.body,
)?; )?;

View file

@ -64,9 +64,11 @@ impl Serialize for SerializedMessages<'_> {
"end": 1 "end": 1
}) })
} else { } else {
let start_location = message.compute_start_location();
let end_location = message.compute_end_location();
json!({ json!({
"begin": message.location.row(), "begin": start_location.row,
"end": message.end_location.row() "end": end_location.row
}) })
}; };
@ -96,20 +98,16 @@ impl Serialize for SerializedMessages<'_> {
fn fingerprint(message: &Message) -> String { fn fingerprint(message: &Message) -> String {
let Message { let Message {
kind, kind,
location, range,
end_location,
fix: _fix, fix: _fix,
file, file,
noqa_row: _noqa_row, noqa_offset: _,
} = message; } = message;
let mut hasher = DefaultHasher::new(); let mut hasher = DefaultHasher::new();
kind.rule().hash(&mut hasher); kind.rule().hash(&mut hasher);
location.row().hash(&mut hasher); range.hash(&mut hasher);
location.column().hash(&mut hasher);
end_location.row().hash(&mut hasher);
end_location.column().hash(&mut hasher);
file.name().hash(&mut hasher); file.name().hash(&mut hasher);
format!("{:x}", hasher.finish()) format!("{:x}", hasher.finish())

View file

@ -1,14 +1,20 @@
use crate::fs::relativize_path; use crate::fs::relativize_path;
use crate::jupyter::JupyterIndex; use crate::jupyter::JupyterIndex;
use crate::message::diff::calculate_print_width;
use crate::message::text::{MessageCodeFrame, RuleCodeAndBody}; use crate::message::text::{MessageCodeFrame, RuleCodeAndBody};
use crate::message::{group_messages_by_filename, Emitter, EmitterContext, Message}; use crate::message::{
group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation,
};
use colored::Colorize; use colored::Colorize;
use ruff_python_ast::source_code::OneIndexed;
use std::fmt::{Display, Formatter}; use std::fmt::{Display, Formatter};
use std::io::Write; use std::io::Write;
use std::num::NonZeroUsize;
#[derive(Default)] #[derive(Default)]
pub struct GroupedEmitter { pub struct GroupedEmitter {
show_fix_status: bool, show_fix_status: bool,
show_source: bool,
} }
impl GroupedEmitter { impl GroupedEmitter {
@ -17,6 +23,12 @@ impl GroupedEmitter {
self.show_fix_status = show_fix_status; self.show_fix_status = show_fix_status;
self self
} }
#[must_use]
pub fn with_show_source(mut self, show_source: bool) -> Self {
self.show_source = show_source;
self
}
} }
impl Emitter for GroupedEmitter { impl Emitter for GroupedEmitter {
@ -29,20 +41,17 @@ impl Emitter for GroupedEmitter {
for (filename, messages) in group_messages_by_filename(messages) { for (filename, messages) in group_messages_by_filename(messages) {
// Compute the maximum number of digits in the row and column, for messages in // Compute the maximum number of digits in the row and column, for messages in
// this file. // this file.
let row_length = num_digits(
messages let mut max_row_length = OneIndexed::MIN;
.iter() let mut max_column_length = OneIndexed::MIN;
.map(|message| message.location.row())
.max() for message in &messages {
.unwrap(), max_row_length = max_row_length.max(message.start_location.row);
); max_column_length = max_column_length.max(message.start_location.column);
let column_length = num_digits( }
messages
.iter() let row_length = calculate_print_width(max_row_length);
.map(|message| message.location.column()) let column_length = calculate_print_width(max_column_length);
.max()
.unwrap(),
);
// Print the filename. // Print the filename.
writeln!(writer, "{}:", relativize_path(filename).underline())?; writeln!(writer, "{}:", relativize_path(filename).underline())?;
@ -53,11 +62,12 @@ impl Emitter for GroupedEmitter {
writer, writer,
"{}", "{}",
DisplayGroupedMessage { DisplayGroupedMessage {
jupyter_index: context.jupyter_index(message.filename()),
message, message,
show_fix_status: self.show_fix_status, show_fix_status: self.show_fix_status,
show_source: self.show_source,
row_length, row_length,
column_length, column_length,
jupyter_index: context.jupyter_index(message.filename()),
} }
)?; )?;
} }
@ -69,21 +79,26 @@ impl Emitter for GroupedEmitter {
} }
struct DisplayGroupedMessage<'a> { struct DisplayGroupedMessage<'a> {
message: &'a Message, message: MessageWithLocation<'a>,
show_fix_status: bool, show_fix_status: bool,
row_length: usize, show_source: bool,
column_length: usize, row_length: NonZeroUsize,
column_length: NonZeroUsize,
jupyter_index: Option<&'a JupyterIndex>, jupyter_index: Option<&'a JupyterIndex>,
} }
impl Display for DisplayGroupedMessage<'_> { impl Display for DisplayGroupedMessage<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let message = self.message; let MessageWithLocation {
message,
start_location,
} = &self.message;
write!( write!(
f, f,
" {row_padding}", " {row_padding}",
row_padding = " ".repeat(self.row_length - num_digits(message.location.row())) row_padding =
" ".repeat(self.row_length.get() - calculate_print_width(start_location.row).get())
)?; )?;
// Check if we're working on a jupyter notebook and translate positions with cell accordingly // Check if we're working on a jupyter notebook and translate positions with cell accordingly
@ -91,29 +106,31 @@ impl Display for DisplayGroupedMessage<'_> {
write!( write!(
f, f,
"cell {cell}{sep}", "cell {cell}{sep}",
cell = jupyter_index.row_to_cell[message.location.row()], cell = jupyter_index.row_to_cell[start_location.row.get()],
sep = ":".cyan() sep = ":".cyan()
)?; )?;
( (
jupyter_index.row_to_row_in_cell[message.location.row()] as usize, jupyter_index.row_to_row_in_cell[start_location.row.get()] as usize,
message.location.column(), start_location.column.get(),
) )
} else { } else {
(message.location.row(), message.location.column()) (start_location.row.get(), start_location.column.get())
}; };
writeln!( writeln!(
f, f,
"{row}{sep}{col}{col_padding} {code_and_body}", "{row}{sep}{col}{col_padding} {code_and_body}",
sep = ":".cyan(), sep = ":".cyan(),
col_padding = " ".repeat(self.column_length - num_digits(message.location.column())), col_padding = " ".repeat(
self.column_length.get() - calculate_print_width(start_location.column).get()
),
code_and_body = RuleCodeAndBody { code_and_body = RuleCodeAndBody {
message_kind: &message.kind, message_kind: &message.kind,
show_fix_status: self.show_fix_status show_fix_status: self.show_fix_status
}, },
)?; )?;
{ if self.show_source {
use std::fmt::Write; use std::fmt::Write;
let mut padded = PadAdapter::new(f); let mut padded = PadAdapter::new(f);
write!(padded, "{}", MessageCodeFrame { message })?; write!(padded, "{}", MessageCodeFrame { message })?;
@ -125,16 +142,6 @@ impl Display for DisplayGroupedMessage<'_> {
} }
} }
fn num_digits(n: usize) -> usize {
std::iter::successors(Some(n), |n| {
let next = n / 10;
(next > 0).then_some(next)
})
.count()
.max(1)
}
/// Adapter that adds a ' ' at the start of every line without the need to copy the string. /// Adapter that adds a ' ' at the start of every line without the need to copy the string.
/// Inspired by Rust's `debug_struct()` internal implementation that also uses a `PadAdapter`. /// Inspired by Rust's `debug_struct()` internal implementation that also uses a `PadAdapter`.
struct PadAdapter<'buf> { struct PadAdapter<'buf> {
@ -174,7 +181,7 @@ mod tests {
#[test] #[test]
fn default() { fn default() {
let mut emitter = GroupedEmitter::default(); let mut emitter = GroupedEmitter::default().with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages()); let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content); assert_snapshot!(content);
@ -182,7 +189,9 @@ mod tests {
#[test] #[test]
fn fix_status() { fn fix_status() {
let mut emitter = GroupedEmitter::default().with_show_fix_status(true); let mut emitter = GroupedEmitter::default()
.with_show_fix_status(true)
.with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages()); let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content); assert_snapshot!(content);

View file

@ -1,9 +1,10 @@
use crate::message::{Emitter, EmitterContext, Message}; use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule; use crate::registry::AsRule;
use ruff_diagnostics::Edit; use ruff_diagnostics::Edit;
use ruff_python_ast::source_code::{SourceCode, SourceLocation};
use serde::ser::SerializeSeq; use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer}; use serde::{Serialize, Serializer};
use serde_json::json; use serde_json::{json, Value};
use std::io::Write; use std::io::Write;
#[derive(Default)] #[derive(Default)]
@ -34,23 +35,29 @@ impl Serialize for ExpandedMessages<'_> {
let mut s = serializer.serialize_seq(Some(self.messages.len()))?; let mut s = serializer.serialize_seq(Some(self.messages.len()))?;
for message in self.messages { for message in self.messages {
let source_code = message.file.to_source_code();
let fix = if message.fix.is_empty() { let fix = if message.fix.is_empty() {
None None
} else { } else {
Some(json!({ Some(json!({
"message": message.kind.suggestion.as_deref(), "message": message.kind.suggestion.as_deref(),
"edits": &ExpandedEdits { edits: message.fix.edits() }, "edits": &ExpandedEdits { edits: message.fix.edits(), source_code: &source_code },
})) }))
}; };
let start_location = source_code.source_location(message.start());
let end_location = source_code.source_location(message.end());
let noqa_location = source_code.source_location(message.noqa_offset);
let value = json!({ let value = json!({
"code": message.kind.rule().noqa_code().to_string(), "code": message.kind.rule().noqa_code().to_string(),
"message": message.kind.body, "message": message.kind.body,
"fix": fix, "fix": fix,
"location": message.location, "location": start_location,
"end_location": message.end_location, "end_location": end_location,
"filename": message.filename(), "filename": message.filename(),
"noqa_row": message.noqa_row "noqa_row": noqa_location.row
}); });
s.serialize_element(&value)?; s.serialize_element(&value)?;
@ -62,6 +69,7 @@ impl Serialize for ExpandedMessages<'_> {
struct ExpandedEdits<'a> { struct ExpandedEdits<'a> {
edits: &'a [Edit], edits: &'a [Edit],
source_code: &'a SourceCode<'a, 'a>,
} }
impl Serialize for ExpandedEdits<'_> { impl Serialize for ExpandedEdits<'_> {
@ -72,10 +80,12 @@ impl Serialize for ExpandedEdits<'_> {
let mut s = serializer.serialize_seq(Some(self.edits.len()))?; let mut s = serializer.serialize_seq(Some(self.edits.len()))?;
for edit in self.edits { for edit in self.edits {
let start_location = self.source_code.source_location(edit.start());
let end_location = self.source_code.source_location(edit.end());
let value = json!({ let value = json!({
"content": edit.content().unwrap_or_default(), "content": edit.content().unwrap_or_default(),
"location": edit.location(), "location": to_zero_indexed_column(&start_location),
"end_location": edit.end_location() "end_location": to_zero_indexed_column(&end_location)
}); });
s.serialize_element(&value)?; s.serialize_element(&value)?;
@ -85,6 +95,13 @@ impl Serialize for ExpandedEdits<'_> {
} }
} }
fn to_zero_indexed_column(location: &SourceLocation) -> Value {
json!({
"row": location.row,
"column": location.column.to_zero_indexed()
})
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::tests::{capture_emitter_output, create_messages};

View file

@ -1,6 +1,9 @@
use crate::message::{group_messages_by_filename, Emitter, EmitterContext, Message}; use crate::message::{
group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation,
};
use crate::registry::AsRule; use crate::registry::AsRule;
use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite}; use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;
@ -23,17 +26,29 @@ impl Emitter for JunitEmitter {
.insert("package".to_string(), "org.ruff".to_string()); .insert("package".to_string(), "org.ruff".to_string());
for message in messages { for message in messages {
let MessageWithLocation {
message,
start_location,
} = message;
let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure); let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure);
status.set_message(message.kind.body.clone()); status.set_message(message.kind.body.clone());
let (row, col) = if context.is_jupyter_notebook(message.filename()) { let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats, // We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback // so we show one that's clearly a fallback
(1, 0) SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
} else { } else {
(message.location.row(), message.location.column()) start_location
}; };
status.set_description(format!("line {row}, col {col}, {}", message.kind.body)); status.set_description(format!(
"line {row}, col {col}, {body}",
row = location.row,
col = location.column,
body = message.kind.body
));
let mut case = TestCase::new( let mut case = TestCase::new(
format!("org.ruff.{}", message.kind.rule().noqa_code()), format!("org.ruff.{}", message.kind.rule().noqa_code()),
status, status,
@ -43,9 +58,9 @@ impl Emitter for JunitEmitter {
let classname = file_path.parent().unwrap().join(file_stem); let classname = file_path.parent().unwrap().join(file_stem);
case.set_classname(classname.to_str().unwrap()); case.set_classname(classname.to_str().unwrap());
case.extra case.extra
.insert("line".to_string(), message.location.row().to_string()); .insert("line".to_string(), location.row.to_string());
case.extra case.extra
.insert("column".to_string(), message.location.column().to_string()); .insert("column".to_string(), location.column.to_string());
test_suite.add_test_case(case); test_suite.add_test_case(case);
} }

View file

@ -8,10 +8,12 @@ mod junit;
mod pylint; mod pylint;
mod text; mod text;
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::io::Write; use std::io::Write;
use std::ops::Deref;
pub use azure::AzureEmitter; pub use azure::AzureEmitter;
pub use github::GithubEmitter; pub use github::GithubEmitter;
@ -20,49 +22,64 @@ pub use grouped::GroupedEmitter;
pub use json::JsonEmitter; pub use json::JsonEmitter;
pub use junit::JunitEmitter; pub use junit::JunitEmitter;
pub use pylint::PylintEmitter; pub use pylint::PylintEmitter;
pub use rustpython_parser::ast::Location;
pub use text::TextEmitter; pub use text::TextEmitter;
use crate::jupyter::JupyterIndex; use crate::jupyter::JupyterIndex;
use crate::registry::AsRule;
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix}; use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix};
use ruff_python_ast::source_code::SourceFile; use ruff_python_ast::source_code::{SourceFile, SourceLocation};
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct Message { pub struct Message {
pub kind: DiagnosticKind, pub kind: DiagnosticKind,
pub location: Location, pub range: TextRange,
pub end_location: Location,
pub fix: Fix, pub fix: Fix,
pub file: SourceFile, pub file: SourceFile,
pub noqa_row: usize, pub noqa_offset: TextSize,
} }
impl Message { impl Message {
pub fn from_diagnostic(diagnostic: Diagnostic, file: SourceFile, noqa_row: usize) -> Self { pub fn from_diagnostic(
diagnostic: Diagnostic,
file: SourceFile,
noqa_offset: TextSize,
) -> Self {
Self { Self {
range: diagnostic.range(),
kind: diagnostic.kind, kind: diagnostic.kind,
location: Location::new(diagnostic.location.row(), diagnostic.location.column() + 1),
end_location: Location::new(
diagnostic.end_location.row(),
diagnostic.end_location.column() + 1,
),
fix: diagnostic.fix, fix: diagnostic.fix,
file, file,
noqa_row, noqa_offset,
} }
} }
pub fn filename(&self) -> &str { pub fn filename(&self) -> &str {
self.file.name() self.file.name()
} }
pub fn compute_start_location(&self) -> SourceLocation {
self.file.to_source_code().source_location(self.start())
}
pub fn compute_end_location(&self) -> SourceLocation {
self.file.to_source_code().source_location(self.end())
}
pub const fn start(&self) -> TextSize {
self.range.start()
}
pub const fn end(&self) -> TextSize {
self.range.end()
}
} }
impl Ord for Message { impl Ord for Message {
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
(self.filename(), self.location.row(), self.location.column()).cmp(&( (self.filename(), self.start(), self.kind.rule()).cmp(&(
other.filename(), other.filename(),
other.location.row(), other.start(),
other.location.column(), other.kind.rule(),
)) ))
} }
} }
@ -73,13 +90,28 @@ impl PartialOrd for Message {
} }
} }
fn group_messages_by_filename(messages: &[Message]) -> BTreeMap<&str, Vec<&Message>> { struct MessageWithLocation<'a> {
message: &'a Message,
start_location: SourceLocation,
}
impl Deref for MessageWithLocation<'_> {
type Target = Message;
fn deref(&self) -> &Self::Target {
self.message
}
}
fn group_messages_by_filename(messages: &[Message]) -> BTreeMap<&str, Vec<MessageWithLocation>> {
let mut grouped_messages = BTreeMap::default(); let mut grouped_messages = BTreeMap::default();
for message in messages { for message in messages {
grouped_messages grouped_messages
.entry(message.filename()) .entry(message.filename())
.or_insert_with(Vec::new) .or_insert_with(Vec::new)
.push(message); .push(MessageWithLocation {
message,
start_location: message.compute_start_location(),
});
} }
grouped_messages grouped_messages
} }
@ -120,11 +152,11 @@ impl<'a> EmitterContext<'a> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::message::{Emitter, EmitterContext, Location, Message}; use crate::message::{Emitter, EmitterContext, Message};
use crate::rules::pyflakes::rules::{UndefinedName, UnusedImport, UnusedVariable}; use crate::rules::pyflakes::rules::{UndefinedName, UnusedImport, UnusedVariable};
use ruff_diagnostics::{Diagnostic, Edit, Fix}; use ruff_diagnostics::{Diagnostic, Edit, Fix};
use ruff_python_ast::source_code::SourceFileBuilder; use ruff_python_ast::source_code::SourceFileBuilder;
use ruff_python_ast::types::Range; use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
pub(super) fn create_messages() -> Vec<Message> { pub(super) fn create_messages() -> Vec<Message> {
@ -148,20 +180,20 @@ def fibonacci(n):
context: None, context: None,
multiple: false, multiple: false,
}, },
Range::new(Location::new(1, 7), Location::new(1, 9)), TextRange::new(TextSize::from(7), TextSize::from(9)),
); );
let fib_source = SourceFileBuilder::new("fib.py").source_text(fib).finish(); let fib_source = SourceFileBuilder::new("fib.py", fib).finish();
let unused_variable = Diagnostic::new( let unused_variable = Diagnostic::new(
UnusedVariable { UnusedVariable {
name: "x".to_string(), name: "x".to_string(),
}, },
Range::new(Location::new(6, 4), Location::new(6, 5)), TextRange::new(TextSize::from(94), TextSize::from(95)),
) )
.with_fix(Fix::new(vec![Edit::deletion( .with_fix(Fix::new(vec![Edit::deletion(
Location::new(6, 4), TextSize::from(94),
Location::new(6, 9), TextSize::from(99),
)])); )]));
let file_2 = r#"if a == 1: pass"#; let file_2 = r#"if a == 1: pass"#;
@ -170,17 +202,18 @@ def fibonacci(n):
UndefinedName { UndefinedName {
name: "a".to_string(), name: "a".to_string(),
}, },
Range::new(Location::new(1, 3), Location::new(1, 4)), TextRange::new(TextSize::from(3), TextSize::from(4)),
); );
let file_2_source = SourceFileBuilder::new("undef.py") let file_2_source = SourceFileBuilder::new("undef.py", file_2).finish();
.source_text(file_2)
.finish();
let unused_import_start = unused_import.start();
let unused_variable_start = unused_variable.start();
let undefined_name_start = undefined_name.start();
vec![ vec![
Message::from_diagnostic(unused_import, fib_source.clone(), 1), Message::from_diagnostic(unused_import, fib_source.clone(), unused_import_start),
Message::from_diagnostic(unused_variable, fib_source, 1), Message::from_diagnostic(unused_variable, fib_source, unused_variable_start),
Message::from_diagnostic(undefined_name, file_2_source, 1), Message::from_diagnostic(undefined_name, file_2_source, undefined_name_start),
] ]
} }

View file

@ -1,6 +1,7 @@
use crate::fs::relativize_path; use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext, Message}; use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule; use crate::registry::AsRule;
use ruff_python_ast::source_code::OneIndexed;
use std::io::Write; use std::io::Write;
/// Generate violations in Pylint format. /// Generate violations in Pylint format.
@ -19,9 +20,9 @@ impl Emitter for PylintEmitter {
let row = if context.is_jupyter_notebook(message.filename()) { let row = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats, // We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback // so we show one that's clearly a fallback
1 OneIndexed::from_zero_indexed(0)
} else { } else {
message.location.row() message.compute_start_location().row
}; };
writeln!( writeln!(

View file

@ -46,7 +46,7 @@ expression: content
"column": 6 "column": 6
}, },
"filename": "fib.py", "filename": "fib.py",
"noqa_row": 1 "noqa_row": 6
}, },
{ {
"code": "F821", "code": "F821",

View file

@ -4,30 +4,45 @@ use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule; use crate::registry::AsRule;
use annotate_snippets::display_list::{DisplayList, FormatOptions}; use annotate_snippets::display_list::{DisplayList, FormatOptions};
use annotate_snippets::snippet::{Annotation, AnnotationType, Slice, Snippet, SourceAnnotation}; use annotate_snippets::snippet::{Annotation, AnnotationType, Slice, Snippet, SourceAnnotation};
use bitflags::bitflags;
use colored::Colorize; use colored::Colorize;
use ruff_diagnostics::DiagnosticKind; use ruff_diagnostics::DiagnosticKind;
use ruff_python_ast::source_code::OneIndexed; use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use ruff_text_size::TextRange; use ruff_text_size::TextRange;
use std::cmp;
use std::fmt::{Display, Formatter}; use std::fmt::{Display, Formatter};
use std::io::Write; use std::io::Write;
bitflags! {
#[derive(Default)]
struct EmitterFlags: u8 {
const SHOW_FIX_STATUS = 0b0000_0001;
const SHOW_FIX = 0b0000_0010;
const SHOW_SOURCE = 0b0000_0100;
}
}
#[derive(Default)] #[derive(Default)]
pub struct TextEmitter { pub struct TextEmitter {
show_fix_status: bool, flags: EmitterFlags,
show_fix: bool,
} }
impl TextEmitter { impl TextEmitter {
#[must_use] #[must_use]
pub fn with_show_fix_status(mut self, show_fix_status: bool) -> Self { pub fn with_show_fix_status(mut self, show_fix_status: bool) -> Self {
self.show_fix_status = show_fix_status; self.flags
.set(EmitterFlags::SHOW_FIX_STATUS, show_fix_status);
self self
} }
#[must_use] #[must_use]
pub fn with_show_fix(mut self, show_fix: bool) -> Self { pub fn with_show_fix(mut self, show_fix: bool) -> Self {
self.show_fix = show_fix; self.flags.set(EmitterFlags::SHOW_FIX, show_fix);
self
}
#[must_use]
pub fn with_show_source(mut self, show_source: bool) -> Self {
self.flags.set(EmitterFlags::SHOW_SOURCE, show_source);
self self
} }
} }
@ -47,41 +62,48 @@ impl Emitter for TextEmitter {
sep = ":".cyan(), sep = ":".cyan(),
)?; )?;
// Check if we're working on a jupyter notebook and translate positions with cell accordingly let start_location = message.compute_start_location();
let (row, col) = if let Some(jupyter_index) = context.jupyter_index(message.filename())
{
write!(
writer,
"cell {cell}{sep}",
cell = jupyter_index.row_to_cell[message.location.row()],
sep = ":".cyan(),
)?;
( // Check if we're working on a jupyter notebook and translate positions with cell accordingly
jupyter_index.row_to_row_in_cell[message.location.row()] as usize, let diagnostic_location =
message.location.column(), if let Some(jupyter_index) = context.jupyter_index(message.filename()) {
) write!(
} else { writer,
(message.location.row(), message.location.column()) "cell {cell}{sep}",
}; cell = jupyter_index.row_to_cell[start_location.row.get()],
sep = ":".cyan(),
)?;
SourceLocation {
row: OneIndexed::new(
jupyter_index.row_to_row_in_cell[start_location.row.get()] as usize,
)
.unwrap(),
column: start_location.column,
}
} else {
start_location
};
writeln!( writeln!(
writer, writer,
"{row}{sep}{col}{sep} {code_and_body}", "{row}{sep}{col}{sep} {code_and_body}",
row = diagnostic_location.row,
col = diagnostic_location.column,
sep = ":".cyan(), sep = ":".cyan(),
code_and_body = RuleCodeAndBody { code_and_body = RuleCodeAndBody {
message_kind: &message.kind, message_kind: &message.kind,
show_fix_status: self.show_fix_status show_fix_status: self.flags.contains(EmitterFlags::SHOW_FIX_STATUS)
} }
)?; )?;
if message.file.source_code().is_some() { if self.flags.contains(EmitterFlags::SHOW_SOURCE) {
writeln!(writer, "{}", MessageCodeFrame { message })?; writeln!(writer, "{}", MessageCodeFrame { message })?;
}
if self.show_fix { if self.flags.contains(EmitterFlags::SHOW_FIX) {
if let Some(diff) = Diff::from_message(message) { if let Some(diff) = Diff::from_message(message) {
writeln!(writer, "{diff}")?; writeln!(writer, "{diff}")?;
}
} }
} }
} }
@ -135,105 +157,91 @@ pub(super) struct MessageCodeFrame<'a> {
impl Display for MessageCodeFrame<'_> { impl Display for MessageCodeFrame<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let Message { let Message {
kind, kind, file, range, ..
file,
location,
end_location,
..
} = self.message; } = self.message;
if let Some(source_code) = file.source_code() { let suggestion = kind.suggestion.as_deref();
let suggestion = kind.suggestion.as_deref(); let footer = if suggestion.is_some() {
let footer = if suggestion.is_some() { vec![Annotation {
vec![Annotation { id: None,
id: None, label: suggestion,
label: suggestion, annotation_type: AnnotationType::Help,
annotation_type: AnnotationType::Help, }]
}] } else {
} else { Vec::new()
Vec::new() };
};
let mut start_index = let source_code = file.to_source_code();
OneIndexed::new(cmp::max(1, location.row().saturating_sub(2))).unwrap(); let content_start_index = source_code.line_index(range.start());
let content_start_index = OneIndexed::new(location.row()).unwrap(); let mut start_index = content_start_index.saturating_sub(2);
// Trim leading empty lines. // Trim leading empty lines.
while start_index < content_start_index { while start_index < content_start_index {
if !source_code.line_text(start_index).trim().is_empty() { if !source_code.line_text(start_index).trim().is_empty() {
break; break;
} }
start_index = start_index.saturating_add(1); start_index = start_index.saturating_add(1);
}
let content_end_index = source_code.line_index(range.end());
let mut end_index = content_end_index
.saturating_add(2)
.min(OneIndexed::from_zero_indexed(source_code.line_count()));
// Trim trailing empty lines
while end_index > content_end_index {
if !source_code.line_text(end_index).trim().is_empty() {
break;
} }
let mut end_index = OneIndexed::new(cmp::min( end_index = end_index.saturating_sub(1);
end_location.row().saturating_add(2), }
source_code.line_count() + 1,
))
.unwrap();
let content_end_index = OneIndexed::new(end_location.row()).unwrap(); let start_offset = source_code.line_start(start_index);
let end_offset = source_code.line_end(end_index);
// Trim trailing empty lines let source_text = source_code.slice(TextRange::new(start_offset, end_offset));
while end_index > content_end_index {
if !source_code.line_text(end_index).trim().is_empty() {
break;
}
end_index = end_index.saturating_sub(1); let annotation_start_offset = range.start() - start_offset;
} let annotation_end_offset = range.end() - start_offset;
let start_offset = source_code.line_start(start_index); let start_char = source_text[TextRange::up_to(annotation_start_offset)]
let end_offset = source_code.line_end(end_index);
let source_text = &source_code.text()[TextRange::new(start_offset, end_offset)];
let annotation_start_offset =
// Message columns are one indexed
source_code.offset(location.with_col_offset(-1)) - start_offset;
let annotation_end_offset =
source_code.offset(end_location.with_col_offset(-1)) - start_offset;
let start_char = source_text[TextRange::up_to(annotation_start_offset)]
.chars()
.count();
let char_length = source_text
[TextRange::new(annotation_start_offset, annotation_end_offset)]
.chars() .chars()
.count(); .count();
let label = kind.rule().noqa_code().to_string(); let char_length = source_text
[TextRange::new(annotation_start_offset, annotation_end_offset)]
.chars()
.count();
let snippet = Snippet { let label = kind.rule().noqa_code().to_string();
title: None,
slices: vec![Slice { let snippet = Snippet {
source: source_text, title: None,
line_start: location.row(), slices: vec![Slice {
annotations: vec![SourceAnnotation { source: source_text,
label: &label, line_start: content_start_index.get(),
annotation_type: AnnotationType::Error, annotations: vec![SourceAnnotation {
range: (start_char, start_char + char_length), label: &label,
}], annotation_type: AnnotationType::Error,
// The origin (file name, line number, and column number) is already encoded range: (start_char, start_char + char_length),
// in the `label`.
origin: None,
fold: false,
}], }],
footer, // The origin (file name, line number, and column number) is already encoded
opt: FormatOptions { // in the `label`.
#[cfg(test)] origin: None,
color: false, fold: false,
#[cfg(not(test))] }],
color: colored::control::SHOULD_COLORIZE.should_colorize(), footer,
..FormatOptions::default() opt: FormatOptions {
}, #[cfg(test)]
}; color: false,
#[cfg(not(test))]
color: colored::control::SHOULD_COLORIZE.should_colorize(),
..FormatOptions::default()
},
};
writeln!(f, "{message}", message = DisplayList::from(snippet))?; writeln!(f, "{message}", message = DisplayList::from(snippet))
}
Ok(())
} }
} }
@ -245,7 +253,7 @@ mod tests {
#[test] #[test]
fn default() { fn default() {
let mut emitter = TextEmitter::default(); let mut emitter = TextEmitter::default().with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages()); let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content); assert_snapshot!(content);
@ -253,7 +261,9 @@ mod tests {
#[test] #[test]
fn fix_status() { fn fix_status() {
let mut emitter = TextEmitter::default().with_show_fix_status(true); let mut emitter = TextEmitter::default()
.with_show_fix_status(true)
.with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages()); let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content); assert_snapshot!(content);

View file

@ -1,3 +1,4 @@
use std::collections::BTreeMap;
use std::fmt::{Display, Write}; use std::fmt::{Display, Write};
use std::fs; use std::fs;
use std::path::Path; use std::path::Path;
@ -5,16 +6,12 @@ use std::path::Path;
use anyhow::Result; use anyhow::Result;
use itertools::Itertools; use itertools::Itertools;
use log::warn; use log::warn;
use nohash_hasher::IntMap;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use rustc_hash::FxHashMap; use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::ast::Location;
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
use ruff_python_ast::newlines::StrExt;
use ruff_python_ast::source_code::{LineEnding, Locator}; use ruff_python_ast::source_code::{LineEnding, Locator};
use ruff_python_ast::types::Range;
use crate::codes::NoqaCode; use crate::codes::NoqaCode;
use crate::registry::{AsRule, Rule, RuleSet}; use crate::registry::{AsRule, Rule, RuleSet};
@ -31,46 +28,52 @@ static SPLIT_COMMA_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"[,\s]").unwrap
#[derive(Debug)] #[derive(Debug)]
pub enum Directive<'a> { pub enum Directive<'a> {
None, None,
All(usize, usize, usize, usize), // (leading spaces, noqa_range, trailing_spaces)
Codes(usize, usize, usize, Vec<&'a str>, usize), All(TextSize, TextRange, TextSize),
// (leading spaces, start_offset, end_offset, codes, trailing_spaces)
Codes(TextSize, TextRange, Vec<&'a str>, TextSize),
} }
/// Extract the noqa `Directive` from a line of Python source code. /// Extract the noqa `Directive` from a line of Python source code.
pub fn extract_noqa_directive(line: &str) -> Directive { pub fn extract_noqa_directive<'a>(range: TextRange, locator: &'a Locator) -> Directive<'a> {
match NOQA_LINE_REGEX.captures(line) { let text = &locator.contents()[range];
Some(caps) => match caps.name("leading_spaces") { match NOQA_LINE_REGEX.captures(text) {
Some(leading_spaces) => match caps.name("trailing_spaces") { Some(caps) => match (
Some(trailing_spaces) => match caps.name("noqa") { caps.name("leading_spaces"),
Some(noqa) => match caps.name("codes") { caps.name("noqa"),
Some(codes) => { caps.name("codes"),
let codes: Vec<&str> = SPLIT_COMMA_REGEX caps.name("trailing_spaces"),
.split(codes.as_str().trim()) ) {
.map(str::trim) (Some(leading_spaces), Some(noqa), Some(codes), Some(trailing_spaces)) => {
.filter(|code| !code.is_empty()) let codes: Vec<&str> = SPLIT_COMMA_REGEX
.collect(); .split(codes.as_str().trim())
if codes.is_empty() { .map(str::trim)
warn!("Expected rule codes on `noqa` directive: \"{line}\""); .filter(|code| !code.is_empty())
} .collect();
Directive::Codes(
leading_spaces.as_str().chars().count(), let start = range.start() + TextSize::try_from(noqa.start()).unwrap();
noqa.start(), if codes.is_empty() {
noqa.end(), #[allow(deprecated)]
codes, let line = locator.compute_line_index(start);
trailing_spaces.as_str().chars().count(), warn!("Expected rule codes on `noqa` directive: \"{line}\"");
) }
} Directive::Codes(
None => Directive::All( leading_spaces.as_str().text_len(),
leading_spaces.as_str().chars().count(), TextRange::at(start, noqa.as_str().text_len()),
noqa.start(), codes,
noqa.end(), trailing_spaces.as_str().text_len(),
trailing_spaces.as_str().chars().count(), )
), }
},
None => Directive::None, (Some(leading_spaces), Some(noqa), None, Some(trailing_spaces)) => Directive::All(
}, leading_spaces.as_str().text_len(),
None => Directive::None, TextRange::at(
}, range.start() + TextSize::try_from(noqa.start()).unwrap(),
None => Directive::None, noqa.as_str().text_len(),
),
trailing_spaces.as_str().text_len(),
),
_ => Directive::None,
}, },
None => Directive::None, None => Directive::None,
} }
@ -129,16 +132,13 @@ pub fn includes(needle: Rule, haystack: &[&str]) -> bool {
/// Returns `true` if the given [`Rule`] is ignored at the specified `lineno`. /// Returns `true` if the given [`Rule`] is ignored at the specified `lineno`.
pub fn rule_is_ignored( pub fn rule_is_ignored(
code: Rule, code: Rule,
lineno: usize, offset: TextSize,
noqa_line_for: &IntMap<usize, usize>, noqa_line_for: &NoqaMapping,
locator: &Locator, locator: &Locator,
) -> bool { ) -> bool {
let noqa_lineno = noqa_line_for.get(&lineno).unwrap_or(&lineno); let offset = noqa_line_for.resolve(offset);
let line = locator.slice(Range::new( let line_range = locator.line_range(offset);
Location::new(*noqa_lineno, 0), match extract_noqa_directive(line_range, locator) {
Location::new(noqa_lineno + 1, 0),
));
match extract_noqa_directive(line) {
Directive::None => false, Directive::None => false,
Directive::All(..) => true, Directive::All(..) => true,
Directive::Codes(.., codes, _) => includes(code, &codes), Directive::Codes(.., codes, _) => includes(code, &codes),
@ -153,11 +153,11 @@ pub enum FileExemption {
/// Extract the [`FileExemption`] for a given Python source file, enumerating any rules that are /// Extract the [`FileExemption`] for a given Python source file, enumerating any rules that are
/// globally ignored within the file. /// globally ignored within the file.
pub fn file_exemption(lines: &[&str], commented_lines: &[usize]) -> FileExemption { pub fn file_exemption(contents: &str, comment_ranges: &[TextRange]) -> FileExemption {
let mut exempt_codes: Vec<NoqaCode> = vec![]; let mut exempt_codes: Vec<NoqaCode> = vec![];
for lineno in commented_lines { for range in comment_ranges {
match parse_file_exemption(lines[lineno - 1]) { match parse_file_exemption(&contents[*range]) {
ParsedExemption::All => { ParsedExemption::All => {
return FileExemption::All; return FileExemption::All;
} }
@ -182,17 +182,18 @@ pub fn file_exemption(lines: &[&str], commented_lines: &[usize]) -> FileExemptio
} }
} }
/// Adds noqa comments to suppress all diagnostics of a file.
pub fn add_noqa( pub fn add_noqa(
path: &Path, path: &Path,
diagnostics: &[Diagnostic], diagnostics: &[Diagnostic],
contents: &str, locator: &Locator,
commented_lines: &[usize], commented_lines: &[TextRange],
noqa_line_for: &IntMap<usize, usize>, noqa_line_for: &NoqaMapping,
line_ending: LineEnding, line_ending: LineEnding,
) -> Result<usize> { ) -> Result<usize> {
let (count, output) = add_noqa_inner( let (count, output) = add_noqa_inner(
diagnostics, diagnostics,
contents, locator,
commented_lines, commented_lines,
noqa_line_for, noqa_line_for,
line_ending, line_ending,
@ -203,19 +204,19 @@ pub fn add_noqa(
fn add_noqa_inner( fn add_noqa_inner(
diagnostics: &[Diagnostic], diagnostics: &[Diagnostic],
contents: &str, locator: &Locator,
commented_lines: &[usize], commented_ranges: &[TextRange],
noqa_line_for: &IntMap<usize, usize>, noqa_line_for: &NoqaMapping,
line_ending: LineEnding, line_ending: LineEnding,
) -> (usize, String) { ) -> (usize, String) {
// Map of line number to set of (non-ignored) diagnostic codes that are triggered on that line. // Map of line start offset to set of (non-ignored) diagnostic codes that are triggered on that line.
let mut matches_by_line: FxHashMap<usize, RuleSet> = FxHashMap::default(); let mut matches_by_line: BTreeMap<TextSize, (RuleSet, Option<&Directive>)> =
BTreeMap::default();
let lines: Vec<&str> = contents.universal_newlines().collect();
// Whether the file is exempted from all checks. // Whether the file is exempted from all checks.
// Codes that are globally exempted (within the current file). // Codes that are globally exempted (within the current file).
let exemption = file_exemption(&lines, commented_lines); let exemption = file_exemption(locator.contents(), commented_ranges);
let directives = NoqaDirectives::from_commented_ranges(commented_ranges, locator);
// Mark any non-ignored diagnostics. // Mark any non-ignored diagnostics.
for diagnostic in diagnostics { for diagnostic in diagnostics {
@ -233,116 +234,122 @@ fn add_noqa_inner(
FileExemption::None => {} FileExemption::None => {}
} }
let diagnostic_lineno = diagnostic.location.row();
// Is the violation ignored by a `noqa` directive on the parent line? // Is the violation ignored by a `noqa` directive on the parent line?
if let Some(parent_lineno) = diagnostic.parent.map(|location| location.row()) { if let Some(parent) = diagnostic.parent {
if parent_lineno != diagnostic_lineno { if let Some(directive_line) =
let noqa_lineno = noqa_line_for.get(&parent_lineno).unwrap_or(&parent_lineno); directives.find_line_with_directive(noqa_line_for.resolve(parent))
if commented_lines.contains(noqa_lineno) { {
match extract_noqa_directive(lines[noqa_lineno - 1]) { match &directive_line.directive {
Directive::All(..) => { Directive::All(..) => {
continue;
}
Directive::Codes(.., codes, _) => {
if includes(diagnostic.kind.rule(), codes) {
continue; continue;
} }
Directive::Codes(.., codes, _) => {
if includes(diagnostic.kind.rule(), &codes) {
continue;
}
}
Directive::None => {}
} }
Directive::None => {}
} }
} }
} }
// Is the diagnostic ignored by a `noqa` directive on the same line? let noqa_offset = noqa_line_for.resolve(diagnostic.start());
let noqa_lineno = noqa_line_for
.get(&diagnostic_lineno) // Or ignored by the directive itself
.unwrap_or(&diagnostic_lineno); if let Some(directive_line) = directives.find_line_with_directive(noqa_offset) {
if commented_lines.contains(noqa_lineno) { match &directive_line.directive {
match extract_noqa_directive(lines[noqa_lineno - 1]) {
Directive::All(..) => { Directive::All(..) => {
continue; continue;
} }
Directive::Codes(.., codes, _) => { Directive::Codes(.., codes, _) => {
if includes(diagnostic.kind.rule(), &codes) { let rule = diagnostic.kind.rule();
continue; if !includes(rule, codes) {
matches_by_line
.entry(directive_line.range.start())
.or_insert_with(|| {
(RuleSet::default(), Some(&directive_line.directive))
})
.0
.insert(rule);
} }
continue;
} }
Directive::None => {} Directive::None => {}
} }
} }
// The diagnostic is not ignored by any `noqa` directive; add it to the list. // There's no existing noqa directive that suppresses the diagnostic.
let lineno = diagnostic.location.row() - 1;
let noqa_lineno = noqa_line_for.get(&(lineno + 1)).unwrap_or(&(lineno + 1)) - 1;
matches_by_line matches_by_line
.entry(noqa_lineno) .entry(locator.line_start(noqa_offset))
.or_default() .or_insert_with(|| (RuleSet::default(), None))
.0
.insert(diagnostic.kind.rule()); .insert(diagnostic.kind.rule());
} }
let mut count: usize = 0; let mut count = 0;
let mut output = String::new(); let mut output = String::with_capacity(locator.len());
for (lineno, line) in lines.into_iter().enumerate() { let mut prev_end = TextSize::default();
match matches_by_line.get(&lineno) {
None => { for (offset, (rules, directive)) in matches_by_line {
output.push_str(line); output.push_str(&locator.contents()[TextRange::new(prev_end, offset)]);
let line = locator.full_line(offset);
match directive {
None | Some(Directive::None) => {
// Add existing content.
output.push_str(line.trim_end());
// Add `noqa` directive.
output.push_str(" # noqa: ");
// Add codes.
push_codes(&mut output, rules.iter().map(|rule| rule.noqa_code()));
output.push_str(&line_ending);
count += 1;
}
Some(Directive::All(..)) => {
// Does not get inserted into the map.
}
Some(Directive::Codes(_, noqa_range, existing, _)) => {
// Reconstruct the line based on the preserved rule codes.
// This enables us to tally the number of edits.
let output_start = output.len();
// Add existing content.
output.push_str(
locator
.slice(TextRange::new(offset, noqa_range.start()))
.trim_end(),
);
// Add `noqa` directive.
output.push_str(" # noqa: ");
// Add codes.
push_codes(
&mut output,
rules
.iter()
.map(|r| r.noqa_code().to_string())
.chain(existing.iter().map(ToString::to_string))
.sorted_unstable(),
);
// Only count if the new line is an actual edit.
if &output[output_start..] != line.trim_end() {
count += 1;
}
output.push_str(&line_ending); output.push_str(&line_ending);
} }
Some(rules) => {
match extract_noqa_directive(line) {
Directive::None => {
// Add existing content.
output.push_str(line.trim_end());
// Add `noqa` directive.
output.push_str(" # noqa: ");
// Add codes.
push_codes(&mut output, rules.iter().map(|rule| rule.noqa_code()));
output.push_str(&line_ending);
count += 1;
}
Directive::All(..) => {
// Leave the line as-is.
output.push_str(line);
output.push_str(&line_ending);
}
Directive::Codes(_, start_byte, _, existing, _) => {
// Reconstruct the line based on the preserved rule codes.
// This enables us to tally the number of edits.
let mut formatted = String::with_capacity(line.len());
// Add existing content.
formatted.push_str(line[..start_byte].trim_end());
// Add `noqa` directive.
formatted.push_str(" # noqa: ");
// Add codes.
push_codes(
&mut formatted,
rules
.iter()
.map(|r| r.noqa_code().to_string())
.chain(existing.into_iter().map(ToString::to_string))
.sorted_unstable(),
);
output.push_str(&formatted);
output.push_str(&line_ending);
// Only count if the new line is an actual edit.
if formatted != line {
count += 1;
}
}
};
}
} }
prev_end = offset + line.text_len();
} }
output.push_str(&locator.contents()[usize::from(prev_end)..]);
(count, output) (count, output)
} }
@ -352,21 +359,161 @@ fn push_codes<I: Display>(str: &mut String, codes: impl Iterator<Item = I>) {
if !first { if !first {
str.push_str(", "); str.push_str(", ");
} }
_ = write!(str, "{code}"); write!(str, "{code}").unwrap();
first = false; first = false;
} }
} }
#[derive(Debug)]
pub(crate) struct NoqaDirectiveLine<'a> {
// The range of the text line for which the noqa directive applies.
pub range: TextRange,
pub directive: Directive<'a>,
pub matches: Vec<NoqaCode>,
}
#[derive(Debug, Default)]
pub(crate) struct NoqaDirectives<'a> {
inner: Vec<NoqaDirectiveLine<'a>>,
}
impl<'a> NoqaDirectives<'a> {
pub fn from_commented_ranges(comment_ranges: &[TextRange], locator: &'a Locator<'a>) -> Self {
let mut directives = Vec::new();
for comment_range in comment_ranges {
let line_range = locator.line_range(comment_range.start());
let directive = match extract_noqa_directive(line_range, locator) {
Directive::None => {
continue;
}
directive @ (Directive::All(..) | Directive::Codes(..)) => directive,
};
// noqa comments are guaranteed to be single line.
directives.push(NoqaDirectiveLine {
range: line_range,
directive,
matches: Vec::new(),
});
}
// Extend a mapping at the end of the file to also include the EOF token.
if let Some(last) = directives.last_mut() {
if last.range.end() == locator.contents().text_len() {
last.range = last.range.add_end(TextSize::from(1));
}
}
Self { inner: directives }
}
pub fn find_line_with_directive(&self, offset: TextSize) -> Option<&NoqaDirectiveLine> {
self.find_line_index(offset).map(|index| &self.inner[index])
}
pub fn find_line_with_directive_mut(
&mut self,
offset: TextSize,
) -> Option<&mut NoqaDirectiveLine<'a>> {
if let Some(index) = self.find_line_index(offset) {
Some(&mut self.inner[index])
} else {
None
}
}
fn find_line_index(&self, offset: TextSize) -> Option<usize> {
self.inner
.binary_search_by(|directive| {
if directive.range.end() < offset {
std::cmp::Ordering::Less
} else if directive.range.contains(offset) {
std::cmp::Ordering::Equal
} else {
std::cmp::Ordering::Greater
}
})
.ok()
}
pub fn lines(&self) -> &[NoqaDirectiveLine] {
&self.inner
}
}
/// Remaps offsets falling into one of the ranges to instead check for a noqa comment on the
/// line specified by the offset.
#[derive(Debug, Default, PartialEq, Eq)]
pub struct NoqaMapping {
ranges: Vec<TextRange>,
}
impl NoqaMapping {
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self {
ranges: Vec::with_capacity(capacity),
}
}
/// Returns the re-mapped position or `position` if no mapping exists.
pub fn resolve(&self, offset: TextSize) -> TextSize {
let index = self.ranges.binary_search_by(|range| {
if range.end() < offset {
std::cmp::Ordering::Less
} else if range.contains(offset) {
std::cmp::Ordering::Equal
} else {
std::cmp::Ordering::Greater
}
});
if let Ok(index) = index {
self.ranges[index].end()
} else {
offset
}
}
pub fn push_mapping(&mut self, range: TextRange) {
if let Some(last_range) = self.ranges.last_mut() {
// Strictly sorted insertion
if last_range.end() <= range.start() {
// OK
}
// Try merging with the last inserted range
else if let Some(intersected) = last_range.intersect(range) {
*last_range = intersected;
return;
} else {
panic!("Ranges must be inserted in sorted order")
}
}
self.ranges.push(range);
}
}
impl FromIterator<TextRange> for NoqaMapping {
fn from_iter<T: IntoIterator<Item = TextRange>>(iter: T) -> Self {
let mut mappings = NoqaMapping::default();
for range in iter {
mappings.push_mapping(range);
}
mappings
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use nohash_hasher::IntMap; use ruff_text_size::{TextRange, TextSize};
use rustpython_parser::ast::Location;
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::LineEnding; use ruff_python_ast::source_code::{LineEnding, Locator};
use ruff_python_ast::types::Range;
use crate::noqa::{add_noqa_inner, NOQA_LINE_REGEX}; use crate::noqa::{add_noqa_inner, NoqaMapping, NOQA_LINE_REGEX};
use crate::rules::pycodestyle::rules::AmbiguousVariableName; use crate::rules::pycodestyle::rules::AmbiguousVariableName;
use crate::rules::pyflakes; use crate::rules::pyflakes;
@ -386,87 +533,83 @@ mod tests {
#[test] #[test]
fn modification() { fn modification() {
let diagnostics = vec![];
let contents = "x = 1"; let contents = "x = 1";
let commented_lines = vec![]; let noqa_line_for = NoqaMapping::default();
let noqa_line_for = IntMap::default();
let (count, output) = add_noqa_inner( let (count, output) = add_noqa_inner(
&diagnostics, &[],
contents, &Locator::new(contents),
&commented_lines, &[],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
); );
assert_eq!(count, 0); assert_eq!(count, 0);
assert_eq!(output, format!("{contents}\n")); assert_eq!(output, format!("{contents}"));
let diagnostics = vec![Diagnostic::new( let diagnostics = [Diagnostic::new(
pyflakes::rules::UnusedVariable { pyflakes::rules::UnusedVariable {
name: "x".to_string(), name: "x".to_string(),
}, },
Range::new(Location::new(1, 0), Location::new(1, 0)), TextRange::new(TextSize::from(0), TextSize::from(0)),
)]; )];
let contents = "x = 1"; let contents = "x = 1";
let commented_lines = vec![1]; let noqa_line_for = NoqaMapping::default();
let noqa_line_for = IntMap::default();
let (count, output) = add_noqa_inner( let (count, output) = add_noqa_inner(
&diagnostics, &diagnostics,
contents, &Locator::new(contents),
&commented_lines, &[],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
); );
assert_eq!(count, 1); assert_eq!(count, 1);
assert_eq!(output, "x = 1 # noqa: F841\n"); assert_eq!(output, "x = 1 # noqa: F841\n");
let diagnostics = vec![ let diagnostics = [
Diagnostic::new( Diagnostic::new(
AmbiguousVariableName("x".to_string()), AmbiguousVariableName("x".to_string()),
Range::new(Location::new(1, 0), Location::new(1, 0)), TextRange::new(TextSize::from(0), TextSize::from(0)),
), ),
Diagnostic::new( Diagnostic::new(
pyflakes::rules::UnusedVariable { pyflakes::rules::UnusedVariable {
name: "x".to_string(), name: "x".to_string(),
}, },
Range::new(Location::new(1, 0), Location::new(1, 0)), TextRange::new(TextSize::from(0), TextSize::from(0)),
), ),
]; ];
let contents = "x = 1 # noqa: E741\n"; let contents = "x = 1 # noqa: E741\n";
let commented_lines = vec![1]; let noqa_line_for = NoqaMapping::default();
let noqa_line_for = IntMap::default();
let (count, output) = add_noqa_inner( let (count, output) = add_noqa_inner(
&diagnostics, &diagnostics,
contents, &Locator::new(contents),
&commented_lines, &[TextRange::new(TextSize::from(7), TextSize::from(19))],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
); );
assert_eq!(count, 1); assert_eq!(count, 1);
assert_eq!(output, "x = 1 # noqa: E741, F841\n"); assert_eq!(output, "x = 1 # noqa: E741, F841\n");
let diagnostics = vec![ let diagnostics = [
Diagnostic::new( Diagnostic::new(
AmbiguousVariableName("x".to_string()), AmbiguousVariableName("x".to_string()),
Range::new(Location::new(1, 0), Location::new(1, 0)), TextRange::new(TextSize::from(0), TextSize::from(0)),
), ),
Diagnostic::new( Diagnostic::new(
pyflakes::rules::UnusedVariable { pyflakes::rules::UnusedVariable {
name: "x".to_string(), name: "x".to_string(),
}, },
Range::new(Location::new(1, 0), Location::new(1, 0)), TextRange::new(TextSize::from(0), TextSize::from(0)),
), ),
]; ];
let contents = "x = 1 # noqa"; let contents = "x = 1 # noqa";
let commented_lines = vec![1]; let noqa_line_for = NoqaMapping::default();
let noqa_line_for = IntMap::default();
let (count, output) = add_noqa_inner( let (count, output) = add_noqa_inner(
&diagnostics, &diagnostics,
contents, &Locator::new(contents),
&commented_lines, &[TextRange::new(TextSize::from(7), TextSize::from(13))],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
); );
assert_eq!(count, 0); assert_eq!(count, 0);
assert_eq!(output, "x = 1 # noqa\n"); assert_eq!(output, "x = 1 # noqa");
} }
} }

View file

@ -1,9 +1,8 @@
use rustpython_parser::ast::Location; use ruff_text_size::{TextLen, TextRange};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::source_code::Locator; use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::registry::Rule; use crate::registry::Rule;
use crate::settings::{flags, Settings}; use crate::settings::{flags, Settings};
@ -49,20 +48,20 @@ fn is_standalone_comment(line: &str) -> bool {
/// ERA001 /// ERA001
pub fn commented_out_code( pub fn commented_out_code(
locator: &Locator, locator: &Locator,
start: Location, range: TextRange,
end: Location,
settings: &Settings, settings: &Settings,
autofix: flags::Autofix, autofix: flags::Autofix,
) -> Option<Diagnostic> { ) -> Option<Diagnostic> {
let location = Location::new(start.row(), 0); let line = locator.full_lines(range);
let end_location = Location::new(end.row() + 1, 0);
let line = locator.slice(Range::new(location, end_location));
// Verify that the comment is on its own line, and that it contains code. // Verify that the comment is on its own line, and that it contains code.
if is_standalone_comment(line) && comment_contains_code(line, &settings.task_tags[..]) { if is_standalone_comment(line) && comment_contains_code(line, &settings.task_tags[..]) {
let mut diagnostic = Diagnostic::new(CommentedOutCode, Range::new(start, end)); let mut diagnostic = Diagnostic::new(CommentedOutCode, range);
if autofix.into() && settings.rules.should_fix(Rule::CommentedOutCode) { if autofix.into() && settings.rules.should_fix(Rule::CommentedOutCode) {
diagnostic.set_fix(Edit::deletion(location, end_location)); diagnostic.set_fix(Edit::range_deletion(TextRange::at(
range.start(),
line.text_len(),
)));
} }
Some(diagnostic) Some(diagnostic)
} else { } else {

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::Rule; use crate::registry::Rule;
@ -141,13 +140,13 @@ pub fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) {
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersionSlice1, Range::from(value))); .push(Diagnostic::new(SysVersionSlice1, value.range()));
} else if *i == BigInt::from(3) } else if *i == BigInt::from(3)
&& checker.settings.rules.enabled(Rule::SysVersionSlice3) && checker.settings.rules.enabled(Rule::SysVersionSlice3)
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersionSlice3, Range::from(value))); .push(Diagnostic::new(SysVersionSlice3, value.range()));
} }
} }
} }
@ -159,12 +158,12 @@ pub fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) {
if *i == BigInt::from(2) && checker.settings.rules.enabled(Rule::SysVersion2) { if *i == BigInt::from(2) && checker.settings.rules.enabled(Rule::SysVersion2) {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersion2, Range::from(value))); .push(Diagnostic::new(SysVersion2, value.range()));
} else if *i == BigInt::from(0) && checker.settings.rules.enabled(Rule::SysVersion0) } else if *i == BigInt::from(0) && checker.settings.rules.enabled(Rule::SysVersion0)
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersion0, Range::from(value))); .push(Diagnostic::new(SysVersion0, value.range()));
} }
} }
@ -200,7 +199,7 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersionInfo0Eq3, Range::from(left))); .push(Diagnostic::new(SysVersionInfo0Eq3, left.range()));
} }
} }
} else if *i == BigInt::from(1) { } else if *i == BigInt::from(1) {
@ -219,7 +218,7 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
if checker.settings.rules.enabled(Rule::SysVersionInfo1CmpInt) { if checker.settings.rules.enabled(Rule::SysVersionInfo1CmpInt) {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersionInfo1CmpInt, Range::from(left))); .push(Diagnostic::new(SysVersionInfo1CmpInt, left.range()));
} }
} }
} }
@ -246,10 +245,9 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
.rules .rules
.enabled(Rule::SysVersionInfoMinorCmpInt) .enabled(Rule::SysVersionInfoMinorCmpInt)
{ {
checker.diagnostics.push(Diagnostic::new( checker
SysVersionInfoMinorCmpInt, .diagnostics
Range::from(left), .push(Diagnostic::new(SysVersionInfoMinorCmpInt, left.range()));
));
} }
} }
} }
@ -274,12 +272,12 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
if checker.settings.rules.enabled(Rule::SysVersionCmpStr10) { if checker.settings.rules.enabled(Rule::SysVersionCmpStr10) {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersionCmpStr10, Range::from(left))); .push(Diagnostic::new(SysVersionCmpStr10, left.range()));
} }
} else if checker.settings.rules.enabled(Rule::SysVersionCmpStr3) { } else if checker.settings.rules.enabled(Rule::SysVersionCmpStr3) {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SysVersionCmpStr3, Range::from(left))); .push(Diagnostic::new(SysVersionCmpStr3, left.range()));
} }
} }
} }
@ -294,6 +292,6 @@ pub fn name_or_attribute(checker: &mut Checker, expr: &Expr) {
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SixPY3, Range::from(expr))); .push(Diagnostic::new(SixPY3, expr.range()));
} }
} }

View file

@ -4,21 +4,19 @@ use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit; use ruff_diagnostics::Edit;
use ruff_python_ast::source_code::Locator; use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
/// ANN204 /// ANN204
pub fn add_return_annotation(locator: &Locator, stmt: &Stmt, annotation: &str) -> Result<Edit> { pub fn add_return_annotation(locator: &Locator, stmt: &Stmt, annotation: &str) -> Result<Edit> {
let range = Range::from(stmt); let contents = &locator.contents()[stmt.range()];
let contents = locator.slice(range);
// Find the colon (following the `def` keyword). // Find the colon (following the `def` keyword).
let mut seen_lpar = false; let mut seen_lpar = false;
let mut seen_rpar = false; let mut seen_rpar = false;
let mut count: usize = 0; let mut count: usize = 0;
for (start, tok, ..) in lexer::lex_located(contents, Mode::Module, range.location).flatten() { for (tok, range) in lexer::lex_located(contents, Mode::Module, stmt.start()).flatten() {
if seen_lpar && seen_rpar { if seen_lpar && seen_rpar {
if matches!(tok, Tok::Colon) { if matches!(tok, Tok::Colon) {
return Ok(Edit::insertion(format!(" -> {annotation}"), start)); return Ok(Edit::insertion(format!(" -> {annotation}"), range.start()));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Stmt};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Violation}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::ReturnStatementVisitor; use ruff_python_ast::helpers::ReturnStatementVisitor;
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor::Visitor; use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{cast, helpers}; use ruff_python_ast::{cast, helpers};
use ruff_python_semantic::analyze::visibility; use ruff_python_semantic::analyze::visibility;
@ -446,7 +445,7 @@ fn check_dynamically_typed<F>(
if checker.ctx.match_typing_expr(annotation, "Any") { if checker.ctx.match_typing_expr(annotation, "Any") {
diagnostics.push(Diagnostic::new( diagnostics.push(Diagnostic::new(
AnyType { name: func() }, AnyType { name: func() },
Range::from(annotation), annotation.range(),
)); ));
}; };
} }
@ -513,7 +512,7 @@ pub fn definition(
MissingTypeFunctionArgument { MissingTypeFunctionArgument {
name: arg.node.arg.to_string(), name: arg.node.arg.to_string(),
}, },
Range::from(arg), arg.range(),
)); ));
} }
} }
@ -544,7 +543,7 @@ pub fn definition(
MissingTypeArgs { MissingTypeArgs {
name: arg.node.arg.to_string(), name: arg.node.arg.to_string(),
}, },
Range::from(arg), arg.range(),
)); ));
} }
} }
@ -575,7 +574,7 @@ pub fn definition(
MissingTypeKwargs { MissingTypeKwargs {
name: arg.node.arg.to_string(), name: arg.node.arg.to_string(),
}, },
Range::from(arg), arg.range(),
)); ));
} }
} }
@ -592,7 +591,7 @@ pub fn definition(
MissingTypeCls { MissingTypeCls {
name: arg.node.arg.to_string(), name: arg.node.arg.to_string(),
}, },
Range::from(arg), arg.range(),
)); ));
} }
} else { } else {
@ -601,7 +600,7 @@ pub fn definition(
MissingTypeSelf { MissingTypeSelf {
name: arg.node.arg.to_string(), name: arg.node.arg.to_string(),
}, },
Range::from(arg), arg.range(),
)); ));
} }
} }

View file

@ -1,8 +1,8 @@
use ruff_text_size::{TextLen, TextRange};
use rustpython_parser::ast::Stmt; use rustpython_parser::ast::Stmt;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
/// ## What it does /// ## What it does
/// Checks for uses of the `assert` keyword. /// Checks for uses of the `assert` keyword.
@ -37,8 +37,5 @@ impl Violation for Assert {
/// S101 /// S101
pub fn assert_used(stmt: &Stmt) -> Diagnostic { pub fn assert_used(stmt: &Stmt) -> Diagnostic {
Diagnostic::new( Diagnostic::new(Assert, TextRange::at(stmt.start(), "assert".text_len()))
Assert,
Range::new(stmt.location, stmt.location.with_col_offset("assert".len())),
)
} }

View file

@ -7,7 +7,6 @@ use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path::compose_call_path; use ruff_python_ast::call_path::compose_call_path;
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -114,7 +113,7 @@ pub fn bad_file_permissions(
if (int_value & WRITE_WORLD > 0) || (int_value & EXECUTE_GROUP > 0) { if (int_value & WRITE_WORLD > 0) || (int_value & EXECUTE_GROUP > 0) {
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
BadFilePermissions { mask: int_value }, BadFilePermissions { mask: int_value },
Range::from(mode_arg), mode_arg.range(),
)); ));
} }
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
#[violation] #[violation]
pub struct ExecBuiltin; pub struct ExecBuiltin;
@ -22,5 +21,5 @@ pub fn exec_used(expr: &Expr, func: &Expr) -> Option<Diagnostic> {
if id != "exec" { if id != "exec" {
return None; return None;
} }
Some(Diagnostic::new(ExecBuiltin, Range::from(expr))) Some(Diagnostic::new(ExecBuiltin, expr.range()))
} }

View file

@ -1,6 +1,6 @@
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range; use ruff_text_size::TextRange;
#[violation] #[violation]
pub struct HardcodedBindAllInterfaces; pub struct HardcodedBindAllInterfaces;
@ -13,9 +13,9 @@ impl Violation for HardcodedBindAllInterfaces {
} }
/// S104 /// S104
pub fn hardcoded_bind_all_interfaces(value: &str, range: &Range) -> Option<Diagnostic> { pub fn hardcoded_bind_all_interfaces(value: &str, range: TextRange) -> Option<Diagnostic> {
if value == "0.0.0.0" { if value == "0.0.0.0" {
Some(Diagnostic::new(HardcodedBindAllInterfaces, *range)) Some(Diagnostic::new(HardcodedBindAllInterfaces, range))
} else { } else {
None None
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Arg, Arguments, Expr};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use super::super::helpers::{matches_password_name, string_literal}; use super::super::helpers::{matches_password_name, string_literal};
@ -29,7 +28,7 @@ fn check_password_kwarg(arg: &Arg, default: &Expr) -> Option<Diagnostic> {
HardcodedPasswordDefault { HardcodedPasswordDefault {
string: string.to_string(), string: string.to_string(),
}, },
Range::from(default), default.range(),
)) ))
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::Keyword;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use super::super::helpers::{matches_password_name, string_literal}; use super::super::helpers::{matches_password_name, string_literal};
@ -33,7 +32,7 @@ pub fn hardcoded_password_func_arg(keywords: &[Keyword]) -> Vec<Diagnostic> {
HardcodedPasswordFuncArg { HardcodedPasswordFuncArg {
string: string.to_string(), string: string.to_string(),
}, },
Range::from(keyword), keyword.range(),
)) ))
}) })
.collect() .collect()

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use super::super::helpers::{matches_password_name, string_literal}; use super::super::helpers::{matches_password_name, string_literal};
@ -52,7 +51,7 @@ pub fn compare_to_hardcoded_password_string(left: &Expr, comparators: &[Expr]) -
HardcodedPasswordString { HardcodedPasswordString {
string: string.to_string(), string: string.to_string(),
}, },
Range::from(comp), comp.range(),
)) ))
}) })
.collect() .collect()
@ -67,7 +66,7 @@ pub fn assign_hardcoded_password_string(value: &Expr, targets: &[Expr]) -> Optio
HardcodedPasswordString { HardcodedPasswordString {
string: string.to_string(), string: string.to_string(),
}, },
Range::from(value), value.range(),
)); ));
} }
} }

View file

@ -5,7 +5,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Operator};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::{any_over_expr, unparse_expr}; use ruff_python_ast::helpers::{any_over_expr, unparse_expr};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -98,7 +97,7 @@ pub fn hardcoded_sql_expression(checker: &mut Checker, expr: &Expr) {
Some(string) if matches_sql_statement(&string) => { Some(string) if matches_sql_statement(&string) => {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(HardcodedSQLExpression, Range::from(expr))); .push(Diagnostic::new(HardcodedSQLExpression, expr.range()));
} }
_ => (), _ => (),
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::Expr;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
#[violation] #[violation]
pub struct HardcodedTempFile { pub struct HardcodedTempFile {
@ -31,7 +30,7 @@ pub fn hardcoded_tmp_directory(
HardcodedTempFile { HardcodedTempFile {
string: value.to_string(), string: value.to_string(),
}, },
Range::from(expr), expr.range(),
)) ))
} else { } else {
None None

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -74,7 +73,7 @@ pub fn hashlib_insecure_hash_functions(
HashlibInsecureHashFunction { HashlibInsecureHashFunction {
string: hash_func_name.to_string(), string: hash_func_name.to_string(),
}, },
Range::from(name_arg), name_arg.range(),
)); ));
} }
} }
@ -91,7 +90,7 @@ pub fn hashlib_insecure_hash_functions(
HashlibInsecureHashFunction { HashlibInsecureHashFunction {
string: (*func_name).to_string(), string: (*func_name).to_string(),
}, },
Range::from(func), func.range(),
)); ));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -57,20 +56,20 @@ pub fn jinja2_autoescape_false(
if id.as_str() != "select_autoescape" { if id.as_str() != "select_autoescape" {
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
Jinja2AutoescapeFalse { value: true }, Jinja2AutoescapeFalse { value: true },
Range::from(autoescape_arg), autoescape_arg.range(),
)); ));
} }
} }
} }
_ => checker.diagnostics.push(Diagnostic::new( _ => checker.diagnostics.push(Diagnostic::new(
Jinja2AutoescapeFalse { value: true }, Jinja2AutoescapeFalse { value: true },
Range::from(autoescape_arg), autoescape_arg.range(),
)), )),
} }
} else { } else {
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
Jinja2AutoescapeFalse { value: false }, Jinja2AutoescapeFalse { value: false },
Range::from(func), func.range(),
)); ));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -34,10 +33,9 @@ pub fn logging_config_insecure_listen(
let call_args = SimpleCallArgs::new(args, keywords); let call_args = SimpleCallArgs::new(args, keywords);
if call_args.keyword_argument("verify").is_none() { if call_args.keyword_argument("verify").is_none() {
checker.diagnostics.push(Diagnostic::new( checker
LoggingConfigInsecureListen, .diagnostics
Range::from(func), .push(Diagnostic::new(LoggingConfigInsecureListen, func.range()));
));
} }
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -66,7 +65,7 @@ pub fn request_with_no_cert_validation(
RequestWithNoCertValidation { RequestWithNoCertValidation {
string: target.to_string(), string: target.to_string(),
}, },
Range::from(verify_arg), verify_arg.range(),
)); ));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::{unparse_constant, SimpleCallArgs}; use ruff_python_ast::helpers::{unparse_constant, SimpleCallArgs};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -56,13 +55,13 @@ pub fn request_without_timeout(
RequestWithoutTimeout { RequestWithoutTimeout {
timeout: Some(timeout), timeout: Some(timeout),
}, },
Range::from(timeout_arg), timeout_arg.range(),
)); ));
} }
} else { } else {
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
RequestWithoutTimeout { timeout: None }, RequestWithoutTimeout { timeout: None },
Range::from(func), func.range(),
)); ));
} }
} }

View file

@ -7,7 +7,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::Truthiness; use ruff_python_ast::helpers::Truthiness;
use ruff_python_ast::types::Range;
use ruff_python_semantic::context::Context; use ruff_python_semantic::context::Context;
use crate::{ use crate::{
@ -202,7 +201,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
SubprocessPopenWithShellEqualsTrue { SubprocessPopenWithShellEqualsTrue {
seems_safe: shell_call_seems_safe(arg), seems_safe: shell_call_seems_safe(arg),
}, },
Range::from(keyword), keyword.range(),
)); ));
} }
} }
@ -218,7 +217,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{ {
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
SubprocessWithoutShellEqualsTrue, SubprocessWithoutShellEqualsTrue,
Range::from(keyword), keyword.range(),
)); ));
} }
} }
@ -231,7 +230,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{ {
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
SubprocessWithoutShellEqualsTrue, SubprocessWithoutShellEqualsTrue,
Range::from(arg), arg.range(),
)); ));
} }
} }
@ -248,10 +247,9 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
.rules .rules
.enabled(Rule::CallWithShellEqualsTrue) .enabled(Rule::CallWithShellEqualsTrue)
{ {
checker.diagnostics.push(Diagnostic::new( checker
CallWithShellEqualsTrue, .diagnostics
Range::from(keyword), .push(Diagnostic::new(CallWithShellEqualsTrue, keyword.range()));
));
} }
} }
@ -263,7 +261,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
StartProcessWithAShell { StartProcessWithAShell {
seems_safe: shell_call_seems_safe(arg), seems_safe: shell_call_seems_safe(arg),
}, },
Range::from(arg), arg.range(),
)); ));
} }
} }
@ -278,7 +276,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(StartProcessWithNoShell, Range::from(func))); .push(Diagnostic::new(StartProcessWithNoShell, func.range()));
} }
} }
@ -292,10 +290,9 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{ {
if let Some(value) = try_string_literal(arg) { if let Some(value) = try_string_literal(arg) {
if FULL_PATH_REGEX.find(value).is_none() { if FULL_PATH_REGEX.find(value).is_none() {
checker.diagnostics.push(Diagnostic::new( checker
StartProcessWithPartialPath, .diagnostics
Range::from(arg), .push(Diagnostic::new(StartProcessWithPartialPath, arg.range()));
));
} }
} }
} }

View file

@ -4,7 +4,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -40,10 +39,9 @@ pub fn snmp_insecure_version(
} = &mp_model_arg.node } = &mp_model_arg.node
{ {
if value.is_zero() || value.is_one() { if value.is_zero() || value.is_one() {
checker.diagnostics.push(Diagnostic::new( checker
SnmpInsecureVersion, .diagnostics
Range::from(mp_model_arg), .push(Diagnostic::new(SnmpInsecureVersion, mp_model_arg.range()));
));
} }
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -38,7 +37,7 @@ pub fn snmp_weak_cryptography(
if call_args.len() < 3 { if call_args.len() < 3 {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(SnmpWeakCryptography, Range::from(func))); .push(Diagnostic::new(SnmpWeakCryptography, func.range()));
} }
} }
} }

View file

@ -5,7 +5,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Violation}; use ruff_diagnostics::{Diagnostic, DiagnosticKind, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -512,7 +511,7 @@ pub fn suspicious_function_call(checker: &mut Checker, expr: &Expr) {
Reason::Telnet => SuspiciousTelnetUsage.into(), Reason::Telnet => SuspiciousTelnetUsage.into(),
Reason::FTPLib => SuspiciousFTPLibUsage.into(), Reason::FTPLib => SuspiciousFTPLibUsage.into(),
}; };
let diagnostic = Diagnostic::new::<DiagnosticKind>(diagnostic_kind, Range::from(expr)); let diagnostic = Diagnostic::new::<DiagnosticKind>(diagnostic_kind, expr.range());
if checker.settings.rules.enabled(diagnostic.kind.rule()) { if checker.settings.rules.enabled(diagnostic.kind.rule()) {
checker.diagnostics.push(diagnostic); checker.diagnostics.push(diagnostic);
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Excepthandler, Expr, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::rules::flake8_bandit::helpers::is_untyped_exception; use crate::rules::flake8_bandit::helpers::is_untyped_exception;
@ -30,9 +29,8 @@ pub fn try_except_continue(
&& body[0].node == StmtKind::Continue && body[0].node == StmtKind::Continue
&& (check_typed_exception || is_untyped_exception(type_, checker)) && (check_typed_exception || is_untyped_exception(type_, checker))
{ {
checker.diagnostics.push(Diagnostic::new( checker
TryExceptContinue, .diagnostics
Range::from(excepthandler), .push(Diagnostic::new(TryExceptContinue, excepthandler.range()));
));
} }
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Excepthandler, Expr, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::rules::flake8_bandit::helpers::is_untyped_exception; use crate::rules::flake8_bandit::helpers::is_untyped_exception;
@ -32,6 +31,6 @@ pub fn try_except_pass(
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(TryExceptPass, Range::from(excepthandler))); .push(Diagnostic::new(TryExceptPass, excepthandler.range()));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -55,13 +54,13 @@ pub fn unsafe_yaml_load(checker: &mut Checker, func: &Expr, args: &[Expr], keywo
}; };
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
UnsafeYAMLLoad { loader }, UnsafeYAMLLoad { loader },
Range::from(loader_arg), loader_arg.range(),
)); ));
} }
} else { } else {
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
UnsafeYAMLLoad { loader: None }, UnsafeYAMLLoad { loader: None },
Range::from(func), func.range(),
)); ));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::{find_keyword, is_const_true}; use ruff_python_ast::helpers::{find_keyword, is_const_true};
use ruff_python_ast::types::Range;
use ruff_python_semantic::analyze::logging; use ruff_python_semantic::analyze::logging;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -84,7 +83,7 @@ pub fn blind_except(
BlindExcept { BlindExcept {
name: id.to_string(), name: id.to_string(),
}, },
Range::from(type_), type_.range(),
)); ));
} }
} }

View file

@ -4,7 +4,6 @@ use ruff_diagnostics::Violation;
use ruff_diagnostics::{Diagnostic, DiagnosticKind}; use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path::collect_call_path; use ruff_python_ast::call_path::collect_call_path;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -90,9 +89,7 @@ const fn is_boolean_arg(arg: &Expr) -> bool {
fn add_if_boolean(checker: &mut Checker, arg: &Expr, kind: DiagnosticKind) { fn add_if_boolean(checker: &mut Checker, arg: &Expr, kind: DiagnosticKind) {
if is_boolean_arg(arg) { if is_boolean_arg(arg) {
checker checker.diagnostics.push(Diagnostic::new(kind, arg.range()));
.diagnostics
.push(Diagnostic::new(kind, Range::from(arg)));
} }
} }
@ -134,7 +131,7 @@ pub fn check_positional_boolean_in_def(
} }
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
BooleanPositionalArgInFunctionDefinition, BooleanPositionalArgInFunctionDefinition,
Range::from(arg), arg.range(),
)); ));
} }
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_semantic::analyze::visibility::{is_abstract, is_overload}; use ruff_python_semantic::analyze::visibility::{is_abstract, is_overload};
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -128,7 +127,7 @@ pub fn abstract_base_class(
EmptyMethodWithoutAbstractDecorator { EmptyMethodWithoutAbstractDecorator {
name: format!("{name}.{method_name}"), name: format!("{name}.{method_name}"),
}, },
Range::from(stmt), stmt.range(),
)); ));
} }
} }
@ -142,7 +141,7 @@ pub fn abstract_base_class(
AbstractBaseClassWithoutAbstractMethod { AbstractBaseClassWithoutAbstractMethod {
name: name.to_string(), name: name.to_string(),
}, },
Range::from(stmt), stmt.range(),
)); ));
} }
} }

View file

@ -1,9 +1,9 @@
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Location, Stmt, StmtKind}; use ruff_text_size::TextSize;
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_stmt; use ruff_python_ast::helpers::unparse_stmt;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -24,16 +24,16 @@ impl AlwaysAutofixableViolation for AssertFalse {
fn assertion_error(msg: Option<&Expr>) -> Stmt { fn assertion_error(msg: Option<&Expr>) -> Stmt {
Stmt::new( Stmt::new(
Location::default(), TextSize::default(),
Location::default(), TextSize::default(),
StmtKind::Raise { StmtKind::Raise {
exc: Some(Box::new(Expr::new( exc: Some(Box::new(Expr::new(
Location::default(), TextSize::default(),
Location::default(), TextSize::default(),
ExprKind::Call { ExprKind::Call {
func: Box::new(Expr::new( func: Box::new(Expr::new(
Location::default(), TextSize::default(),
Location::default(), TextSize::default(),
ExprKind::Name { ExprKind::Name {
id: "AssertionError".to_string(), id: "AssertionError".to_string(),
ctx: ExprContext::Load, ctx: ExprContext::Load,
@ -61,12 +61,11 @@ pub fn assert_false(checker: &mut Checker, stmt: &Stmt, test: &Expr, msg: Option
return; return;
}; };
let mut diagnostic = Diagnostic::new(AssertFalse, Range::from(test)); let mut diagnostic = Diagnostic::new(AssertFalse, test.range());
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(
unparse_stmt(&assertion_error(msg), checker.stylist), unparse_stmt(&assertion_error(msg), checker.stylist),
stmt.location, stmt.range(),
stmt.end_location.unwrap(),
)); ));
} }
checker.diagnostics.push(diagnostic); checker.diagnostics.push(diagnostic);

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{ExprKind, Stmt, Withitem};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -99,6 +98,6 @@ pub fn assert_raises_exception(checker: &mut Checker, stmt: &Stmt, items: &[With
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
AssertRaisesException { kind }, AssertRaisesException { kind },
Range::from(stmt), stmt.range(),
)); ));
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -35,5 +34,5 @@ pub fn assignment_to_os_environ(checker: &mut Checker, targets: &[Expr]) {
} }
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(AssignmentToOsEnviron, Range::from(target))); .push(Diagnostic::new(AssignmentToOsEnviron, target.range()));
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_semantic::scope::ScopeKind; use ruff_python_semantic::scope::ScopeKind;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -51,10 +50,9 @@ pub fn cached_instance_method(checker: &mut Checker, decorator_list: &[Expr]) {
_ => decorator, _ => decorator,
}, },
) { ) {
checker.diagnostics.push(Diagnostic::new( checker
CachedInstanceMethod, .diagnostics
Range::from(decorator), .push(Diagnostic::new(CachedInstanceMethod, decorator.range()));
));
} }
} }
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -23,5 +22,5 @@ pub fn cannot_raise_literal(checker: &mut Checker, expr: &Expr) {
}; };
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(CannotRaiseLiteral, Range::from(expr))); .push(Diagnostic::new(CannotRaiseLiteral, expr.range()));
} }

View file

@ -1,8 +1,7 @@
use itertools::Itertools; use itertools::Itertools;
use ruff_text_size::TextSize;
use rustc_hash::{FxHashMap, FxHashSet}; use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_parser::ast::{ use rustpython_parser::ast::{Excepthandler, ExcepthandlerKind, Expr, ExprContext, ExprKind};
Excepthandler, ExcepthandlerKind, Expr, ExprContext, ExprKind, Location,
};
use ruff_diagnostics::{AlwaysAutofixableViolation, Violation}; use ruff_diagnostics::{AlwaysAutofixableViolation, Violation};
use ruff_diagnostics::{Diagnostic, Edit}; use ruff_diagnostics::{Diagnostic, Edit};
@ -10,7 +9,6 @@ use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path; use ruff_python_ast::call_path;
use ruff_python_ast::call_path::CallPath; use ruff_python_ast::call_path::CallPath;
use ruff_python_ast::helpers::unparse_expr; use ruff_python_ast::helpers::unparse_expr;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::{AsRule, Rule}; use crate::registry::{AsRule, Rule};
@ -52,8 +50,8 @@ impl AlwaysAutofixableViolation for DuplicateHandlerException {
fn type_pattern(elts: Vec<&Expr>) -> Expr { fn type_pattern(elts: Vec<&Expr>) -> Expr {
Expr::new( Expr::new(
Location::default(), TextSize::default(),
Location::default(), TextSize::default(),
ExprKind::Tuple { ExprKind::Tuple {
elts: elts.into_iter().cloned().collect(), elts: elts.into_iter().cloned().collect(),
ctx: ExprContext::Load, ctx: ExprContext::Load,
@ -95,17 +93,16 @@ fn duplicate_handler_exceptions<'a>(
.sorted() .sorted()
.collect::<Vec<String>>(), .collect::<Vec<String>>(),
}, },
Range::from(expr), expr.range(),
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(
if unique_elts.len() == 1 { if unique_elts.len() == 1 {
unparse_expr(unique_elts[0], checker.stylist) unparse_expr(unique_elts[0], checker.stylist)
} else { } else {
unparse_expr(&type_pattern(unique_elts), checker.stylist) unparse_expr(&type_pattern(unique_elts), checker.stylist)
}, },
expr.location, expr.range(),
expr.end_location.unwrap(),
)); ));
} }
checker.diagnostics.push(diagnostic); checker.diagnostics.push(diagnostic);
@ -156,7 +153,7 @@ pub fn duplicate_exceptions(checker: &mut Checker, handlers: &[Excepthandler]) {
DuplicateTryBlockException { DuplicateTryBlockException {
name: name.join("."), name: name.join("."),
}, },
Range::from(expr), expr.range(),
)); ));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{ExcepthandlerKind, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -27,9 +26,8 @@ pub fn except_with_empty_tuple(checker: &mut Checker, excepthandler: &Excepthand
return; return;
}; };
if elts.is_empty() { if elts.is_empty() {
checker.diagnostics.push(Diagnostic::new( checker
ExceptWithEmptyTuple, .diagnostics
Range::from(excepthandler), .push(Diagnostic::new(ExceptWithEmptyTuple, excepthandler.range()));
));
} }
} }

View file

@ -4,7 +4,6 @@ use rustpython_parser::ast::{Excepthandler, ExcepthandlerKind, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -55,10 +54,9 @@ pub fn except_with_non_exception_classes(checker: &mut Checker, excepthandler: &
| ExprKind::Name { .. } | ExprKind::Name { .. }
| ExprKind::Call { .. }, | ExprKind::Call { .. },
) { ) {
checker.diagnostics.push(Diagnostic::new( checker
ExceptWithNonExceptionClasses, .diagnostics
Range::from(expr), .push(Diagnostic::new(ExceptWithNonExceptionClasses, expr.range()));
));
} }
} }
} }

View file

@ -1,3 +1,4 @@
use ruff_text_size::TextRange;
use rustpython_parser::ast::{Arguments, Constant, Expr, ExprKind}; use rustpython_parser::ast::{Arguments, Constant, Expr, ExprKind};
use ruff_diagnostics::Violation; use ruff_diagnostics::Violation;
@ -5,7 +6,6 @@ use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path::from_qualified_name; use ruff_python_ast::call_path::from_qualified_name;
use ruff_python_ast::call_path::{compose_call_path, CallPath}; use ruff_python_ast::call_path::{compose_call_path, CallPath};
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor; use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor; use ruff_python_ast::visitor::Visitor;
@ -61,7 +61,7 @@ fn is_immutable_func(checker: &Checker, func: &Expr, extend_immutable_calls: &[C
struct ArgumentDefaultVisitor<'a> { struct ArgumentDefaultVisitor<'a> {
checker: &'a Checker<'a>, checker: &'a Checker<'a>,
diagnostics: Vec<(DiagnosticKind, Range)>, diagnostics: Vec<(DiagnosticKind, TextRange)>,
extend_immutable_calls: Vec<CallPath<'a>>, extend_immutable_calls: Vec<CallPath<'a>>,
} }
@ -81,7 +81,7 @@ where
name: compose_call_path(func), name: compose_call_path(func),
} }
.into(), .into(),
Range::from(expr), expr.range(),
)); ));
} }
visitor::walk_expr(self, expr); visitor::walk_expr(self, expr);

View file

@ -1,10 +1,11 @@
use ruff_text_size::TextRange;
use rustc_hash::FxHashSet; use rustc_hash::FxHashSet;
use rustpython_parser::ast::{Comprehension, Expr, ExprContext, ExprKind, Stmt, StmtKind}; use rustpython_parser::ast::{Comprehension, Expr, ExprContext, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::collect_arg_names; use ruff_python_ast::helpers::collect_arg_names;
use ruff_python_ast::types::{Node, Range}; use ruff_python_ast::types::Node;
use ruff_python_ast::visitor; use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor; use ruff_python_ast::visitor::Visitor;
@ -26,9 +27,9 @@ impl Violation for FunctionUsesLoopVariable {
#[derive(Default)] #[derive(Default)]
struct LoadedNamesVisitor<'a> { struct LoadedNamesVisitor<'a> {
// Tuple of: name, defining expression, and defining range. // Tuple of: name, defining expression, and defining range.
loaded: Vec<(&'a str, &'a Expr, Range)>, loaded: Vec<(&'a str, &'a Expr, TextRange)>,
// Tuple of: name, defining expression, and defining range. // Tuple of: name, defining expression, and defining range.
stored: Vec<(&'a str, &'a Expr, Range)>, stored: Vec<(&'a str, &'a Expr, TextRange)>,
} }
/// `Visitor` to collect all used identifiers in a statement. /// `Visitor` to collect all used identifiers in a statement.
@ -39,8 +40,8 @@ where
fn visit_expr(&mut self, expr: &'b Expr) { fn visit_expr(&mut self, expr: &'b Expr) {
match &expr.node { match &expr.node {
ExprKind::Name { id, ctx } => match ctx { ExprKind::Name { id, ctx } => match ctx {
ExprContext::Load => self.loaded.push((id, expr, Range::from(expr))), ExprContext::Load => self.loaded.push((id, expr, expr.range())),
ExprContext::Store => self.stored.push((id, expr, Range::from(expr))), ExprContext::Store => self.stored.push((id, expr, expr.range())),
ExprContext::Del => {} ExprContext::Del => {}
}, },
_ => visitor::walk_expr(self, expr), _ => visitor::walk_expr(self, expr),
@ -50,7 +51,7 @@ where
#[derive(Default)] #[derive(Default)]
struct SuspiciousVariablesVisitor<'a> { struct SuspiciousVariablesVisitor<'a> {
names: Vec<(&'a str, &'a Expr, Range)>, names: Vec<(&'a str, &'a Expr, TextRange)>,
safe_functions: Vec<&'a Expr>, safe_functions: Vec<&'a Expr>,
} }

View file

@ -1,9 +1,9 @@
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Location}; use ruff_text_size::TextSize;
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_expr; use ruff_python_ast::helpers::unparse_expr;
use ruff_python_ast::types::Range;
use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private}; use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private};
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -27,8 +27,8 @@ impl AlwaysAutofixableViolation for GetAttrWithConstant {
} }
fn attribute(value: &Expr, attr: &str) -> Expr { fn attribute(value: &Expr, attr: &str) -> Expr {
Expr::new( Expr::new(
Location::default(), TextSize::default(),
Location::default(), TextSize::default(),
ExprKind::Attribute { ExprKind::Attribute {
value: Box::new(value.clone()), value: Box::new(value.clone()),
attr: attr.to_string(), attr: attr.to_string(),
@ -61,13 +61,12 @@ pub fn getattr_with_constant(checker: &mut Checker, expr: &Expr, func: &Expr, ar
return; return;
} }
let mut diagnostic = Diagnostic::new(GetAttrWithConstant, Range::from(expr)); let mut diagnostic = Diagnostic::new(GetAttrWithConstant, expr.range());
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(
unparse_expr(&attribute(obj, value), checker.stylist), unparse_expr(&attribute(obj, value), checker.stylist),
expr.location, expr.range(),
expr.end_location.unwrap(),
)); ));
} }
checker.diagnostics.push(diagnostic); checker.diagnostics.push(diagnostic);

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -33,7 +32,7 @@ fn walk_stmt(checker: &mut Checker, body: &[Stmt], f: fn(&Stmt) -> bool) {
), ),
}, },
}, },
Range::from(stmt), stmt.range(),
)); ));
} }
match &stmt.node { match &stmt.node {

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor; use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor; use ruff_python_ast::visitor::Visitor;
@ -74,7 +73,7 @@ pub fn loop_variable_overrides_iterator(checker: &mut Checker, target: &Expr, it
LoopVariableOverridesIterator { LoopVariableOverridesIterator {
name: name.to_string(), name: name.to_string(),
}, },
Range::from(expr), expr.range(),
)); ));
} }
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Arguments, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_semantic::analyze::typing::is_immutable_annotation; use ruff_python_semantic::analyze::typing::is_immutable_annotation;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -74,10 +73,9 @@ pub fn mutable_argument_default(checker: &mut Checker, arguments: &Arguments) {
.as_ref() .as_ref()
.map_or(false, |expr| is_immutable_annotation(&checker.ctx, expr)) .map_or(false, |expr| is_immutable_annotation(&checker.ctx, expr))
{ {
checker.diagnostics.push(Diagnostic::new( checker
MutableArgumentDefault, .diagnostics
Range::from(default), .push(Diagnostic::new(MutableArgumentDefault, default.range()));
));
} }
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs; use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -64,5 +63,5 @@ pub fn no_explicit_stacklevel(
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(NoExplicitStacklevel, Range::from(func))); .push(Diagnostic::new(NoExplicitStacklevel, func.range()));
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Excepthandler, ExcepthandlerKind, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_expr; use ruff_python_ast::helpers::unparse_expr;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -45,13 +44,12 @@ pub fn redundant_tuple_in_exception_handler(checker: &mut Checker, handlers: &[E
RedundantTupleInExceptionHandler { RedundantTupleInExceptionHandler {
name: unparse_expr(elt, checker.stylist), name: unparse_expr(elt, checker.stylist),
}, },
Range::from(type_), type_.range(),
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(
unparse_expr(elt, checker.stylist), unparse_expr(elt, checker.stylist),
type_.location, type_.range(),
type_.end_location.unwrap(),
)); ));
} }
checker.diagnostics.push(diagnostic); checker.diagnostics.push(diagnostic);

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Comprehension, Expr, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor::{self, Visitor}; use ruff_python_ast::visitor::{self, Visitor};
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -339,6 +338,6 @@ pub fn reuse_of_groupby_generator(
for expr in finder.exprs { for expr in finder.exprs {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(ReuseOfGroupbyGenerator, Range::from(expr))); .push(Diagnostic::new(ReuseOfGroupbyGenerator, expr.range()));
} }
} }

View file

@ -1,10 +1,10 @@
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Location, Stmt, StmtKind}; use ruff_text_size::TextSize;
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_stmt; use ruff_python_ast::helpers::unparse_stmt;
use ruff_python_ast::source_code::Stylist; use ruff_python_ast::source_code::Stylist;
use ruff_python_ast::types::Range;
use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private}; use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private};
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -29,12 +29,12 @@ impl AlwaysAutofixableViolation for SetAttrWithConstant {
fn assignment(obj: &Expr, name: &str, value: &Expr, stylist: &Stylist) -> String { fn assignment(obj: &Expr, name: &str, value: &Expr, stylist: &Stylist) -> String {
let stmt = Stmt::new( let stmt = Stmt::new(
Location::default(), TextSize::default(),
Location::default(), TextSize::default(),
StmtKind::Assign { StmtKind::Assign {
targets: vec![Expr::new( targets: vec![Expr::new(
Location::default(), TextSize::default(),
Location::default(), TextSize::default(),
ExprKind::Attribute { ExprKind::Attribute {
value: Box::new(obj.clone()), value: Box::new(obj.clone()),
attr: name.to_string(), attr: name.to_string(),
@ -76,13 +76,12 @@ pub fn setattr_with_constant(checker: &mut Checker, expr: &Expr, func: &Expr, ar
// (i.e., it's directly within an `StmtKind::Expr`). // (i.e., it's directly within an `StmtKind::Expr`).
if let StmtKind::Expr { value: child } = &checker.ctx.current_stmt().node { if let StmtKind::Expr { value: child } = &checker.ctx.current_stmt().node {
if expr == child.as_ref() { if expr == child.as_ref() {
let mut diagnostic = Diagnostic::new(SetAttrWithConstant, Range::from(expr)); let mut diagnostic = Diagnostic::new(SetAttrWithConstant, expr.range());
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(
assignment(obj, name, value, checker.stylist), assignment(obj, name, value, checker.stylist),
expr.location, expr.range(),
expr.end_location.unwrap(),
)); ));
} }
checker.diagnostics.push(diagnostic); checker.diagnostics.push(diagnostic);

View file

@ -11,7 +11,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -38,12 +37,12 @@ pub fn star_arg_unpacking_after_keyword_arg(
let ExprKind::Starred { .. } = arg.node else { let ExprKind::Starred { .. } = arg.node else {
continue; continue;
}; };
if arg.location <= keyword.location { if arg.start() <= keyword.start() {
continue; continue;
} }
checker.diagnostics.push(Diagnostic::new( checker.diagnostics.push(Diagnostic::new(
StarArgUnpackingAfterKeywordArg, StarArgUnpackingAfterKeywordArg,
Range::from(arg), arg.range(),
)); ));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -40,6 +39,6 @@ pub fn strip_with_multi_characters(checker: &mut Checker, expr: &Expr, func: &Ex
if num_chars > 1 && num_chars != value.chars().unique().count() { if num_chars > 1 && num_chars != value.chars().unique().count() {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(StripWithMultiCharacters, Range::from(expr))); .push(Diagnostic::new(StripWithMultiCharacters, expr.range()));
} }
} }

View file

@ -21,7 +21,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Unaryop};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -48,5 +47,5 @@ pub fn unary_prefix_increment(checker: &mut Checker, expr: &Expr, op: &Unaryop,
} }
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(UnaryPrefixIncrement, Range::from(expr))); .push(Diagnostic::new(UnaryPrefixIncrement, expr.range()));
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Stmt};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -47,19 +46,17 @@ pub fn unintentional_type_annotation(
match &target.node { match &target.node {
ExprKind::Subscript { value, .. } => { ExprKind::Subscript { value, .. } => {
if matches!(&value.node, ExprKind::Name { .. }) { if matches!(&value.node, ExprKind::Name { .. }) {
checker.diagnostics.push(Diagnostic::new( checker
UnintentionalTypeAnnotation, .diagnostics
Range::from(stmt), .push(Diagnostic::new(UnintentionalTypeAnnotation, stmt.range()));
));
} }
} }
ExprKind::Attribute { value, .. } => { ExprKind::Attribute { value, .. } => {
if let ExprKind::Name { id, .. } = &value.node { if let ExprKind::Name { id, .. } = &value.node {
if id != "self" { if id != "self" {
checker.diagnostics.push(Diagnostic::new( checker
UnintentionalTypeAnnotation, .diagnostics
Range::from(stmt), .push(Diagnostic::new(UnintentionalTypeAnnotation, stmt.range()));
));
} }
} }
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -42,5 +41,5 @@ pub fn unreliable_callable_check(checker: &mut Checker, expr: &Expr, func: &Expr
} }
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(UnreliableCallableCheck, Range::from(expr))); .push(Diagnostic::new(UnreliableCallableCheck, expr.range()));
} }

View file

@ -24,7 +24,7 @@ use serde::{Deserialize, Serialize};
use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Violation}; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::{Range, RefEquality}; use ruff_python_ast::types::RefEquality;
use ruff_python_ast::visitor::Visitor; use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{helpers, visitor}; use ruff_python_ast::{helpers, visitor};
@ -160,7 +160,7 @@ pub fn unused_loop_control_variable(
rename: rename.clone(), rename: rename.clone(),
certainty, certainty,
}, },
Range::from(expr), expr.range(),
); );
if let Some(rename) = rename { if let Some(rename) = rename {
if certainty.into() && checker.patch(diagnostic.kind.rule()) { if certainty.into() && checker.patch(diagnostic.kind.rule()) {
@ -176,11 +176,7 @@ pub fn unused_loop_control_variable(
if let Some(binding) = binding { if let Some(binding) = binding {
if binding.kind.is_loop_var() { if binding.kind.is_loop_var() {
if !binding.used() { if !binding.used() {
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(rename, expr.range()));
rename,
expr.location,
expr.end_location.unwrap(),
));
} }
} }
} }

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -24,6 +23,6 @@ pub fn useless_comparison(checker: &mut Checker, expr: &Expr) {
if matches!(expr.node, ExprKind::Compare { .. }) { if matches!(expr.node, ExprKind::Compare { .. }) {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(UselessComparison, Range::from(expr))); .push(Diagnostic::new(UselessComparison, expr.range()));
} }
} }

View file

@ -1,10 +1,8 @@
use rustpython_parser::ast::Expr; use rustpython_parser::ast::Expr;
use crate::checkers::ast::Checker;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
#[violation] #[violation]
pub struct UselessContextlibSuppress; pub struct UselessContextlibSuppress;
@ -29,9 +27,8 @@ pub fn useless_contextlib_suppress(checker: &mut Checker, expr: &Expr, func: &Ex
call_path.as_slice() == ["contextlib", "suppress"] call_path.as_slice() == ["contextlib", "suppress"]
}) })
{ {
checker.diagnostics.push(Diagnostic::new( checker
UselessContextlibSuppress, .diagnostics
Range::from(expr), .push(Diagnostic::new(UselessContextlibSuppress, expr.range()));
));
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::contains_effect; use ruff_python_ast::helpers::contains_effect;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
@ -62,7 +61,7 @@ pub fn useless_expression(checker: &mut Checker, value: &Expr) {
UselessExpression { UselessExpression {
kind: Kind::Attribute, kind: Kind::Attribute,
}, },
Range::from(value), value.range(),
)); ));
} }
return; return;
@ -72,6 +71,6 @@ pub fn useless_expression(checker: &mut Checker, value: &Expr) {
UselessExpression { UselessExpression {
kind: Kind::Expression, kind: Kind::Expression,
}, },
Range::from(value), value.range(),
)); ));
} }

View file

@ -1,10 +1,8 @@
use rustpython_parser::ast::{Expr, ExprKind, Keyword}; use rustpython_parser::ast::{Expr, ExprKind, Keyword};
use crate::checkers::ast::Checker;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
#[violation] #[violation]
pub struct ZipWithoutExplicitStrict; pub struct ZipWithoutExplicitStrict;
@ -36,7 +34,7 @@ pub fn zip_without_explicit_strict(
{ {
checker checker
.diagnostics .diagnostics
.push(Diagnostic::new(ZipWithoutExplicitStrict, Range::from(expr))); .push(Diagnostic::new(ZipWithoutExplicitStrict, expr.range()));
} }
} }
} }

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::Located;
use ruff_diagnostics::Violation; use ruff_diagnostics::Violation;
use ruff_diagnostics::{Diagnostic, DiagnosticKind}; use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_stdlib::builtins::BUILTINS; use ruff_python_stdlib::builtins::BUILTINS;
use super::types::ShadowingType; use super::types::ShadowingType;
@ -191,7 +190,7 @@ pub fn builtin_shadowing<T>(
} }
.into(), .into(),
}, },
Range::from(located), located.range(),
)) ))
} else { } else {
None None

View file

@ -1,4 +1,5 @@
use itertools::Itertools; use itertools::Itertools;
use ruff_text_size::TextRange;
use rustpython_parser::lexer::{LexResult, Spanned}; use rustpython_parser::lexer::{LexResult, Spanned};
use rustpython_parser::Tok; use rustpython_parser::Tok;
@ -6,7 +7,6 @@ use ruff_diagnostics::{AlwaysAutofixableViolation, Violation};
use ruff_diagnostics::{Diagnostic, Edit}; use ruff_diagnostics::{Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::source_code::Locator; use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::registry::Rule; use crate::registry::Rule;
use crate::settings::{flags, Settings}; use crate::settings::{flags, Settings};
@ -46,7 +46,7 @@ impl<'tok> Token<'tok> {
} }
const fn from_spanned(spanned: &'tok Spanned) -> Token<'tok> { const fn from_spanned(spanned: &'tok Spanned) -> Token<'tok> {
let type_ = match &spanned.1 { let type_ = match &spanned.0 {
Tok::NonLogicalNewline => TokenType::NonLogicalNewline, Tok::NonLogicalNewline => TokenType::NonLogicalNewline,
Tok::Newline => TokenType::Newline, Tok::Newline => TokenType::Newline,
Tok::For => TokenType::For, Tok::For => TokenType::For,
@ -161,7 +161,7 @@ pub fn trailing_commas(
.iter() .iter()
.flatten() .flatten()
// Completely ignore comments -- they just interfere with the logic. // Completely ignore comments -- they just interfere with the logic.
.filter(|&r| !matches!(r, (_, Tok::Comment(_), _))) .filter(|&r| !matches!(r, (Tok::Comment(_), _)))
.map(Token::from_spanned); .map(Token::from_spanned);
let tokens = [Token::irrelevant(), Token::irrelevant()] let tokens = [Token::irrelevant(), Token::irrelevant()]
.into_iter() .into_iter()
@ -253,15 +253,9 @@ pub fn trailing_commas(
}; };
if comma_prohibited { if comma_prohibited {
let comma = prev.spanned.unwrap(); let comma = prev.spanned.unwrap();
let mut diagnostic = Diagnostic::new( let mut diagnostic = Diagnostic::new(ProhibitedTrailingComma, comma.1);
ProhibitedTrailingComma,
Range {
location: comma.0,
end_location: comma.2,
},
);
if autofix.into() && settings.rules.should_fix(Rule::ProhibitedTrailingComma) { if autofix.into() && settings.rules.should_fix(Rule::ProhibitedTrailingComma) {
diagnostic.set_fix(Edit::deletion(comma.0, comma.2)); diagnostic.set_fix(Edit::range_deletion(diagnostic.range()));
} }
diagnostics.push(diagnostic); diagnostics.push(diagnostic);
} }
@ -272,13 +266,7 @@ pub fn trailing_commas(
prev.type_ == TokenType::Comma && token.type_ == TokenType::Newline; prev.type_ == TokenType::Comma && token.type_ == TokenType::Newline;
if bare_comma_prohibited { if bare_comma_prohibited {
let comma = prev.spanned.unwrap(); let comma = prev.spanned.unwrap();
diagnostics.push(Diagnostic::new( diagnostics.push(Diagnostic::new(TrailingCommaOnBareTuple, comma.1));
TrailingCommaOnBareTuple,
Range {
location: comma.0,
end_location: comma.2,
},
));
} }
// Comma is required if: // Comma is required if:
@ -299,21 +287,17 @@ pub fn trailing_commas(
let missing_comma = prev_prev.spanned.unwrap(); let missing_comma = prev_prev.spanned.unwrap();
let mut diagnostic = Diagnostic::new( let mut diagnostic = Diagnostic::new(
MissingTrailingComma, MissingTrailingComma,
Range { TextRange::empty(missing_comma.1.end()),
location: missing_comma.2,
end_location: missing_comma.2,
},
); );
if autofix.into() && settings.rules.should_fix(Rule::MissingTrailingComma) { if autofix.into() && settings.rules.should_fix(Rule::MissingTrailingComma) {
// Create a replacement that includes the final bracket (or other token), // Create a replacement that includes the final bracket (or other token),
// rather than just inserting a comma at the end. This prevents the UP034 autofix // rather than just inserting a comma at the end. This prevents the UP034 autofix
// removing any brackets in the same linter pass - doing both at the same time could // removing any brackets in the same linter pass - doing both at the same time could
// lead to a syntax error. // lead to a syntax error.
let contents = locator.slice(Range::new(missing_comma.0, missing_comma.2)); let contents = locator.slice(missing_comma.1);
diagnostic.set_fix(Edit::replacement( diagnostic.set_fix(Edit::range_replacement(
format!("{contents},"), format!("{contents},"),
missing_comma.0, missing_comma.1,
missing_comma.2,
)); ));
} }
diagnostics.push(diagnostic); diagnostics.push(diagnostic);

View file

@ -35,7 +35,7 @@ pub fn fix_unnecessary_generator_list(
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(GeneratorExp)))) -> Expr(ListComp))) // Expr(Call(GeneratorExp)))) -> Expr(ListComp)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -67,11 +67,7 @@ pub fn fix_unnecessary_generator_list(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C401) Convert `set(x for x in y)` to `{x for x in y}`. /// (C401) Convert `set(x for x in y)` to `{x for x in y}`.
@ -82,7 +78,7 @@ pub fn fix_unnecessary_generator_set(
parent: Option<&rustpython_parser::ast::Expr>, parent: Option<&rustpython_parser::ast::Expr>,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(GeneratorExp)))) -> Expr(SetComp))) // Expr(Call(GeneratorExp)))) -> Expr(SetComp)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -124,11 +120,7 @@ pub fn fix_unnecessary_generator_set(
} }
} }
Ok(Edit::replacement( Ok(Edit::range_replacement(content, expr.range()))
content,
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C402) Convert `dict((x, x) for x in range(3))` to `{x: x for x in /// (C402) Convert `dict((x, x) for x in range(3))` to `{x: x for x in
@ -139,7 +131,7 @@ pub fn fix_unnecessary_generator_dict(
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
parent: Option<&rustpython_parser::ast::Expr>, parent: Option<&rustpython_parser::ast::Expr>,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -198,11 +190,7 @@ pub fn fix_unnecessary_generator_dict(
} }
} }
Ok(Edit::replacement( Ok(Edit::range_replacement(content, expr.range()))
content,
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C403) Convert `set([x for x in y])` to `{x for x in y}`. /// (C403) Convert `set([x for x in y])` to `{x for x in y}`.
@ -213,7 +201,7 @@ pub fn fix_unnecessary_list_comprehension_set(
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(ListComp)))) -> // Expr(Call(ListComp)))) ->
// Expr(SetComp))) // Expr(SetComp)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -243,11 +231,7 @@ pub fn fix_unnecessary_list_comprehension_set(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C404) Convert `dict([(i, i) for i in range(3)])` to `{i: i for i in /// (C404) Convert `dict([(i, i) for i in range(3)])` to `{i: i for i in
@ -257,7 +241,7 @@ pub fn fix_unnecessary_list_comprehension_dict(
stylist: &Stylist, stylist: &Stylist,
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -299,11 +283,7 @@ pub fn fix_unnecessary_list_comprehension_dict(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// Drop a trailing comma from a list of tuple elements. /// Drop a trailing comma from a list of tuple elements.
@ -356,7 +336,7 @@ pub fn fix_unnecessary_literal_set(
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(List|Tuple)))) -> Expr(Set))) // Expr(Call(List|Tuple)))) -> Expr(Set)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let mut call = match_call(body)?; let mut call = match_call(body)?;
@ -393,11 +373,7 @@ pub fn fix_unnecessary_literal_set(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C406) Convert `dict([(1, 2)])` to `{1: 2}`. /// (C406) Convert `dict([(1, 2)])` to `{1: 2}`.
@ -407,7 +383,7 @@ pub fn fix_unnecessary_literal_dict(
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(List|Tuple)))) -> Expr(Dict))) // Expr(Call(List|Tuple)))) -> Expr(Dict)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -466,11 +442,7 @@ pub fn fix_unnecessary_literal_dict(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C408) /// (C408)
@ -480,7 +452,7 @@ pub fn fix_unnecessary_collection_call(
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call("list" | "tuple" | "dict")))) -> Expr(List|Tuple|Dict) // Expr(Call("list" | "tuple" | "dict")))) -> Expr(List|Tuple|Dict)
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -582,11 +554,7 @@ pub fn fix_unnecessary_collection_call(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C409) Convert `tuple([1, 2])` to `tuple(1, 2)` /// (C409) Convert `tuple([1, 2])` to `tuple(1, 2)`
@ -595,7 +563,7 @@ pub fn fix_unnecessary_literal_within_tuple_call(
stylist: &Stylist, stylist: &Stylist,
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -641,11 +609,7 @@ pub fn fix_unnecessary_literal_within_tuple_call(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C410) Convert `list([1, 2])` to `[1, 2]` /// (C410) Convert `list([1, 2])` to `[1, 2]`
@ -654,7 +618,7 @@ pub fn fix_unnecessary_literal_within_list_call(
stylist: &Stylist, stylist: &Stylist,
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -702,11 +666,7 @@ pub fn fix_unnecessary_literal_within_list_call(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C411) Convert `list([i * i for i in x])` to `[i * i for i in x]`. /// (C411) Convert `list([i * i for i in x])` to `[i * i for i in x]`.
@ -716,7 +676,7 @@ pub fn fix_unnecessary_list_call(
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(List|Tuple)))) -> Expr(List|Tuple))) // Expr(Call(List|Tuple)))) -> Expr(List|Tuple)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -731,11 +691,7 @@ pub fn fix_unnecessary_list_call(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C413) Convert `list(sorted([2, 3, 1]))` to `sorted([2, 3, 1])`. /// (C413) Convert `list(sorted([2, 3, 1]))` to `sorted([2, 3, 1])`.
@ -746,7 +702,7 @@ pub fn fix_unnecessary_call_around_sorted(
stylist: &Stylist, stylist: &Stylist,
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let outer_call = match_call(body)?; let outer_call = match_call(body)?;
@ -860,11 +816,7 @@ pub fn fix_unnecessary_call_around_sorted(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C414) Convert `sorted(list(foo))` to `sorted(foo)` /// (C414) Convert `sorted(list(foo))` to `sorted(foo)`
@ -873,7 +825,7 @@ pub fn fix_unnecessary_double_cast_or_process(
stylist: &Stylist, stylist: &Stylist,
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let body = match_expr(&mut tree)?; let body = match_expr(&mut tree)?;
let mut outer_call = match_call(body)?; let mut outer_call = match_call(body)?;
@ -901,11 +853,7 @@ pub fn fix_unnecessary_double_cast_or_process(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C416) Convert `[i for i in x]` to `list(x)`. /// (C416) Convert `[i for i in x]` to `list(x)`.
@ -914,7 +862,7 @@ pub fn fix_unnecessary_comprehension(
stylist: &Stylist, stylist: &Stylist,
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
@ -997,11 +945,7 @@ pub fn fix_unnecessary_comprehension(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C417) Convert `map(lambda x: x * 2, bar)` to `(x * 2 for x in bar)`. /// (C417) Convert `map(lambda x: x * 2, bar)` to `(x * 2 for x in bar)`.
@ -1012,7 +956,7 @@ pub fn fix_unnecessary_map(
parent: Option<&rustpython_parser::ast::Expr>, parent: Option<&rustpython_parser::ast::Expr>,
kind: &str, kind: &str,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -1164,11 +1108,7 @@ pub fn fix_unnecessary_map(
} }
} }
Ok(Edit::replacement( Ok(Edit::range_replacement(content, expr.range()))
content,
expr.location,
expr.end_location.unwrap(),
))
} else { } else {
bail!("Should have two arguments"); bail!("Should have two arguments");
} }
@ -1180,7 +1120,7 @@ pub fn fix_unnecessary_literal_within_dict_call(
stylist: &Stylist, stylist: &Stylist,
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?; let mut body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -1195,11 +1135,7 @@ pub fn fix_unnecessary_literal_within_dict_call(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }
/// (C419) Convert `[i for i in a]` into `i for i in a` /// (C419) Convert `[i for i in a]` into `i for i in a`
@ -1209,7 +1145,7 @@ pub fn fix_unnecessary_comprehension_any_all(
expr: &rustpython_parser::ast::Expr, expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(ListComp) -> Expr(GeneratorExp) // Expr(ListComp) -> Expr(GeneratorExp)
let module_text = locator.slice(expr); let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?; let mut tree = match_module(module_text)?;
let body = match_expr(&mut tree)?; let body = match_expr(&mut tree)?;
let call = match_call(body)?; let call = match_call(body)?;
@ -1239,9 +1175,5 @@ pub fn fix_unnecessary_comprehension_any_all(
}; };
tree.codegen(&mut state); tree.codegen(&mut state);
Ok(Edit::replacement( Ok(Edit::range_replacement(state.to_string(), expr.range()))
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
} }

View file

@ -1,12 +1,10 @@
use rustpython_parser::ast::{Expr, ExprKind}; use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
use crate::rules::flake8_comprehensions::fixes; use crate::rules::flake8_comprehensions::fixes;
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation};
use super::helpers; use super::helpers;
@ -83,7 +81,7 @@ pub fn unnecessary_call_around_sorted(
UnnecessaryCallAroundSorted { UnnecessaryCallAroundSorted {
func: outer.to_string(), func: outer.to_string(),
}, },
Range::from(expr), expr.range(),
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| { diagnostic.try_set_fix(|| {

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -87,7 +86,7 @@ pub fn unnecessary_collection_call(
UnnecessaryCollectionCall { UnnecessaryCollectionCall {
obj_type: id.to_string(), obj_type: id.to_string(),
}, },
Range::from(expr), expr.range(),
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| { diagnostic.try_set_fix(|| {

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Comprehension, Expr, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -64,7 +63,7 @@ fn add_diagnostic(checker: &mut Checker, expr: &Expr) {
UnnecessaryComprehension { UnnecessaryComprehension {
obj_type: id.to_string(), obj_type: id.to_string(),
}, },
Range::from(expr), expr.range(),
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| { diagnostic.try_set_fix(|| {

View file

@ -4,7 +4,6 @@ use ruff_diagnostics::AlwaysAutofixableViolation;
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::any_over_expr; use ruff_python_ast::helpers::any_over_expr;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -78,7 +77,7 @@ pub fn unnecessary_comprehension_any_all(
if !checker.ctx.is_builtin(id) { if !checker.ctx.is_builtin(id) {
return; return;
} }
let mut diagnostic = Diagnostic::new(UnnecessaryComprehensionAnyAll, Range::from(&args[0])); let mut diagnostic = Diagnostic::new(UnnecessaryComprehensionAnyAll, args[0].range());
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| { diagnostic.try_set_fix(|| {
fixes::fix_unnecessary_comprehension_any_all(checker.locator, checker.stylist, expr) fixes::fix_unnecessary_comprehension_any_all(checker.locator, checker.stylist, expr)

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -108,7 +107,7 @@ pub fn unnecessary_double_cast_or_process(
inner: inner.to_string(), inner: inner.to_string(),
outer: outer.to_string(), outer: outer.to_string(),
}, },
Range::from(expr), expr.range(),
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| { diagnostic.try_set_fix(|| {

Some files were not shown because too many files have changed in this diff Show more