Replace row/column based Location with byte-offsets. (#3931)

This commit is contained in:
Micha Reiser 2023-04-26 20:11:02 +02:00 committed by GitHub
parent ee91598835
commit cab65b25da
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
418 changed files with 6203 additions and 7040 deletions

View file

@ -12,3 +12,6 @@ indent_size = 2
[*.{rs,py}]
indent_size = 4
[*.snap]
trim_trailing_whitespace = false

36
Cargo.lock generated
View file

@ -2123,6 +2123,7 @@ dependencies = [
"ruff_diagnostics",
"ruff_python_ast",
"ruff_python_stdlib",
"ruff_text_size",
"rustc-hash",
"serde",
"serde_json",
@ -2165,8 +2166,7 @@ version = "0.0.0"
dependencies = [
"anyhow",
"log",
"ruff_python_ast",
"rustpython-parser",
"ruff_text_size",
"serde",
]
@ -2248,6 +2248,7 @@ dependencies = [
"nohash-hasher",
"ruff_python_ast",
"ruff_python_stdlib",
"ruff_text_size",
"rustc-hash",
"rustpython-parser",
"smallvec",
@ -2284,11 +2285,10 @@ dependencies = [
[[package]]
name = "ruff_text_size"
version = "0.0.0"
source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [
"schemars",
"serde",
"serde_test",
"static_assertions",
]
[[package]]
@ -2356,27 +2356,28 @@ dependencies = [
[[package]]
name = "rustpython-ast"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [
"num-bigint",
"rustpython-compiler-core",
"ruff_text_size",
]
[[package]]
name = "rustpython-common"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [
"ascii",
"bitflags 1.3.2",
"bstr 0.2.17",
"cfg-if",
"getrandom",
"hexf-parse",
"itertools",
"lexical-parse-float",
"libc",
"lock_api",
"num-bigint",
"num-complex",
"num-traits",
"once_cell",
"radium",
@ -2390,23 +2391,21 @@ dependencies = [
[[package]]
name = "rustpython-compiler-core"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [
"bitflags 1.3.2",
"bstr 0.2.17",
"itertools",
"lz4_flex",
"num-bigint",
"num-complex",
"serde",
"ruff_text_size",
]
[[package]]
name = "rustpython-parser"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/charliermarsh/RustPython.git?rev=c3147d2c1524ebd0e90cf1c2938d770314fd5a5a#c3147d2c1524ebd0e90cf1c2938d770314fd5a5a"
dependencies = [
"ahash",
"anyhow",
"itertools",
"lalrpop",
@ -2416,10 +2415,10 @@ dependencies = [
"num-traits",
"phf",
"phf_codegen",
"ruff_text_size",
"rustc-hash",
"rustpython-ast",
"rustpython-compiler-core",
"serde",
"tiny-keccak",
"unic-emoji-char",
"unic-ucd-ident",
@ -2568,15 +2567,6 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_test"
version = "1.0.160"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c95a500e3923258f7fc3a16bf29934e403aef5ca1096e184d85e3b1926675e8"
dependencies = [
"serde",
]
[[package]]
name = "shellexpand"
version = "3.1.0"

View file

@ -30,12 +30,10 @@ path-absolutize = { version = "3.0.14" }
proc-macro2 = { version = "1.0.51" }
quote = { version = "1.0.23" }
regex = { version = "1.7.1" }
ruff_text_size = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
rustc-hash = { version = "1.1.0" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
rustpython-parser = { features = [
"lalrpop",
"serde",
], git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
rustpython-common = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
rustpython-parser = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
schemars = { version = "0.8.12" }
serde = { version = "1.0.152", features = ["derive"] }
serde_json = { version = "1.0.93", features = ["preserve_order"] }

View file

@ -17,11 +17,11 @@ name = "ruff"
ruff_cache = { path = "../ruff_cache" }
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
ruff_macros = { path = "../ruff_macros" }
ruff_python_ast = { path = "../ruff_python_ast" }
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
ruff_python_semantic = { path = "../ruff_python_semantic" }
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
ruff_rustpython = { path = "../ruff_rustpython" }
ruff_text_size = { path = "../ruff_text_size" }
ruff_text_size = { workspace = true }
annotate-snippets = { version = "0.9.1", features = ["color"] }
anyhow = { workspace = true }

View file

@ -9,6 +9,9 @@ def f():
# Here's a standalone comment that's over the limit.
x = 2
# Another standalone that is preceded by a newline and indent toke and is over the limit.
print("Here's a string that's over the limit, but it's not a docstring.")

View file

@ -4,12 +4,12 @@ use itertools::Itertools;
use libcst_native::{
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
};
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Stmt, StmtKind};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
use ruff_python_ast::helpers;
use ruff_python_ast::helpers::to_absolute;
use ruff_python_ast::imports::{AnyImport, Import};
use ruff_python_ast::newlines::NewlineWithTrailingNewline;
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
@ -102,20 +102,17 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
/// Return the location of a trailing semicolon following a `Stmt`, if it's part
/// of a multi-statement line.
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
let contents = locator.after(stmt.end_location.unwrap());
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
let trimmed = line.trim();
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<TextSize> {
let contents = locator.after(stmt.end());
for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim_start();
if trimmed.starts_with(';') {
let column = line
.char_indices()
.find_map(|(column, char)| if char == ';' { Some(column) } else { None })
.unwrap();
return Some(to_absolute(
Location::new(row + 1, column),
stmt.end_location.unwrap(),
));
let colon_offset = line.text_len() - trimmed.text_len();
return Some(stmt.end() + line.start() + colon_offset);
}
if !trimmed.starts_with('\\') {
break;
}
@ -124,42 +121,36 @@ fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
}
/// Find the next valid break for a `Stmt` after a semicolon.
fn next_stmt_break(semicolon: Location, locator: &Locator) -> Location {
let start_location = Location::new(semicolon.row(), semicolon.column() + 1);
let contents = locator.after(start_location);
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
fn next_stmt_break(semicolon: TextSize, locator: &Locator) -> TextSize {
let start_location = semicolon + TextSize::from(1);
let contents = &locator.contents()[usize::from(start_location)..];
for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim();
// Skip past any continuations.
if trimmed.starts_with('\\') {
continue;
}
return if trimmed.is_empty() {
// If the line is empty, then despite the previous statement ending in a
// semicolon, we know that it's not a multi-statement line.
to_absolute(Location::new(row + 1, 0), start_location)
} else {
// Otherwise, find the start of the next statement. (Or, anything that isn't
// whitespace.)
let column = line
.char_indices()
.find_map(|(column, char)| {
if char.is_whitespace() {
None
} else {
Some(column)
}
})
.unwrap();
to_absolute(Location::new(row + 1, column), start_location)
};
return start_location
+ if trimmed.is_empty() {
// If the line is empty, then despite the previous statement ending in a
// semicolon, we know that it's not a multi-statement line.
line.start()
} else {
// Otherwise, find the start of the next statement. (Or, anything that isn't
// whitespace.)
let relative_offset = line.find(|c: char| !c.is_whitespace()).unwrap();
line.start() + TextSize::try_from(relative_offset).unwrap()
};
}
Location::new(start_location.row() + 1, 0)
locator.line_end(start_location)
}
/// Return `true` if a `Stmt` occurs at the end of a file.
fn is_end_of_file(stmt: &Stmt, locator: &Locator) -> bool {
let contents = locator.after(stmt.end_location.unwrap());
contents.is_empty()
stmt.end() == locator.contents().text_len()
}
/// Return the `Fix` to use when deleting a `Stmt`.
@ -190,33 +181,23 @@ pub fn delete_stmt(
{
// If removing this node would lead to an invalid syntax tree, replace
// it with a `pass`.
Ok(Edit::replacement(
"pass".to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
Ok(Edit::range_replacement("pass".to_string(), stmt.range()))
} else {
Ok(if let Some(semicolon) = trailing_semicolon(stmt, locator) {
let next = next_stmt_break(semicolon, locator);
Edit::deletion(stmt.location, next)
} else if helpers::match_leading_content(stmt, locator) {
Edit::deletion(stmt.location, stmt.end_location.unwrap())
} else if helpers::preceded_by_continuation(stmt, indexer) {
if is_end_of_file(stmt, locator) && stmt.location.column() == 0 {
Edit::deletion(stmt.start(), next)
} else if helpers::has_leading_content(stmt, locator) {
Edit::range_deletion(stmt.range())
} else if helpers::preceded_by_continuation(stmt, indexer, locator) {
if is_end_of_file(stmt, locator) && locator.is_at_start_of_line(stmt.start()) {
// Special-case: a file can't end in a continuation.
Edit::replacement(
stylist.line_ending().to_string(),
stmt.location,
stmt.end_location.unwrap(),
)
Edit::range_replacement(stylist.line_ending().to_string(), stmt.range())
} else {
Edit::deletion(stmt.location, stmt.end_location.unwrap())
Edit::range_deletion(stmt.range())
}
} else {
Edit::deletion(
Location::new(stmt.location.row(), 0),
Location::new(stmt.end_location.unwrap().row() + 1, 0),
)
let range = locator.full_lines_range(stmt.range());
Edit::range_deletion(range)
})
}
}
@ -231,7 +212,7 @@ pub fn remove_unused_imports<'a>(
indexer: &Indexer,
stylist: &Stylist,
) -> Result<Edit> {
let module_text = locator.slice(stmt);
let module_text = locator.slice(stmt.range());
let mut tree = match_module(module_text)?;
let Some(Statement::Simple(body)) = tree.body.first_mut() else {
@ -337,11 +318,7 @@ pub fn remove_unused_imports<'a>(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), stmt.range()))
}
}
@ -353,9 +330,8 @@ pub fn remove_unused_imports<'a>(
/// For this behavior, set `remove_parentheses` to `true`.
pub fn remove_argument(
locator: &Locator,
call_at: Location,
expr_at: Location,
expr_end: Location,
call_at: TextSize,
expr_range: TextRange,
args: &[Expr],
keywords: &[Keyword],
remove_parentheses: bool,
@ -374,13 +350,13 @@ pub fn remove_argument(
if n_arguments == 1 {
// Case 1: there is only one argument.
let mut count: usize = 0;
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if matches!(tok, Tok::Lpar) {
if count == 0 {
fix_start = Some(if remove_parentheses {
start
range.start()
} else {
Location::new(start.row(), start.column() + 1)
range.start() + TextSize::from(1)
});
}
count += 1;
@ -390,9 +366,9 @@ pub fn remove_argument(
count -= 1;
if count == 0 {
fix_end = Some(if remove_parentheses {
end
range.end()
} else {
Location::new(end.row(), end.column() - 1)
range.end() - TextSize::from(1)
});
break;
}
@ -400,27 +376,27 @@ pub fn remove_argument(
}
} else if args
.iter()
.map(|node| node.location)
.chain(keywords.iter().map(|node| node.location))
.any(|location| location > expr_at)
.map(Expr::start)
.chain(keywords.iter().map(Keyword::start))
.any(|location| location > expr_range.start())
{
// Case 2: argument or keyword is _not_ the last node.
let mut seen_comma = false;
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if seen_comma {
if matches!(tok, Tok::NonLogicalNewline) {
// Also delete any non-logical newlines after the comma.
continue;
}
fix_end = Some(if matches!(tok, Tok::Newline) {
end
range.end()
} else {
start
range.start()
});
break;
}
if start == expr_at {
fix_start = Some(start);
if range.start() == expr_range.start() {
fix_start = Some(range.start());
}
if fix_start.is_some() && matches!(tok, Tok::Comma) {
seen_comma = true;
@ -429,13 +405,13 @@ pub fn remove_argument(
} else {
// Case 3: argument or keyword is the last node, so we have to find the last
// comma in the stmt.
for (start, tok, _) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if start == expr_at {
fix_end = Some(expr_end);
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if range.start() == expr_range.start() {
fix_end = Some(expr_range.end());
break;
}
if matches!(tok, Tok::Comma) {
fix_start = Some(start);
fix_start = Some(range.start());
}
}
}
@ -482,11 +458,8 @@ pub fn get_or_import_symbol(
//
// By adding this no-op edit, we force the `unused-imports` fix to conflict with the
// `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass.
let import_edit = Edit::replacement(
locator.slice(source).to_string(),
source.location,
source.end_location.unwrap(),
);
let import_edit =
Edit::range_replacement(locator.slice(source.range()).to_string(), source.range());
Ok((import_edit, binding))
} else {
if let Some(stmt) = importer.get_import_from(module) {
@ -527,8 +500,8 @@ pub fn get_or_import_symbol(
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_text_size::TextSize;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use ruff_python_ast::source_code::Locator;
@ -546,19 +519,13 @@ mod tests {
let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt, &locator),
Some(Location::new(1, 5))
);
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(5)));
let contents = "x = 1 ; y = 1";
let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt, &locator),
Some(Location::new(1, 6))
);
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(6)));
let contents = r#"
x = 1 \
@ -568,10 +535,7 @@ x = 1 \
let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt, &locator),
Some(Location::new(2, 2))
);
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(10)));
Ok(())
}
@ -581,15 +545,15 @@ x = 1 \
let contents = "x = 1; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(Location::new(1, 4), &locator),
Location::new(1, 5)
next_stmt_break(TextSize::from(4), &locator),
TextSize::from(5)
);
let contents = "x = 1 ; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(Location::new(1, 5), &locator),
Location::new(1, 6)
next_stmt_break(TextSize::from(5), &locator),
TextSize::from(6)
);
let contents = r#"
@ -599,8 +563,8 @@ x = 1 \
.trim();
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(Location::new(2, 2), &locator),
Location::new(2, 4)
next_stmt_break(TextSize::from(10), &locator),
TextSize::from(12)
);
}
}

View file

@ -1,12 +1,11 @@
use std::collections::BTreeSet;
use itertools::Itertools;
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap;
use rustpython_parser::ast::Location;
use ruff_diagnostics::{Diagnostic, Edit, Fix};
use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::linter::FixTable;
use crate::registry::{AsRule, Rule};
@ -33,7 +32,7 @@ fn apply_fixes<'a>(
locator: &'a Locator<'a>,
) -> (String, FixTable) {
let mut output = String::with_capacity(locator.len());
let mut last_pos: Option<Location> = None;
let mut last_pos: Option<TextSize> = None;
let mut applied: BTreeSet<&Edit> = BTreeSet::default();
let mut fixed = FxHashMap::default();
@ -57,7 +56,7 @@ fn apply_fixes<'a>(
// Best-effort approach: if this fix overlaps with a fix we've already applied,
// skip it.
if last_pos.map_or(false, |last_pos| {
fix.min_location()
fix.min_start()
.map_or(false, |fix_location| last_pos >= fix_location)
}) {
continue;
@ -65,14 +64,14 @@ fn apply_fixes<'a>(
for edit in fix.edits() {
// Add all contents from `last_pos` to `fix.location`.
let slice = locator.slice(Range::new(last_pos.unwrap_or_default(), edit.location()));
let slice = locator.slice(TextRange::new(last_pos.unwrap_or_default(), edit.start()));
output.push_str(slice);
// Add the patch itself.
output.push_str(edit.content().unwrap_or_default());
// Track that the edit was applied.
last_pos = Some(edit.end_location());
last_pos = Some(edit.end());
applied.insert(edit);
}
@ -88,8 +87,8 @@ fn apply_fixes<'a>(
/// Compare two fixes.
fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
fix1.min_location()
.cmp(&fix2.min_location())
fix1.min_start()
.cmp(&fix2.min_start())
.then_with(|| match (&rule1, &rule2) {
// Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes.
(Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less,
@ -100,7 +99,7 @@ fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Orderi
#[cfg(test)]
mod tests {
use rustpython_parser::ast::Location;
use ruff_text_size::TextSize;
use ruff_diagnostics::Diagnostic;
use ruff_diagnostics::Edit;
@ -114,8 +113,7 @@ mod tests {
.map(|edit| Diagnostic {
// The choice of rule here is arbitrary.
kind: MissingNewlineAtEndOfFile.into(),
location: edit.location(),
end_location: edit.end_location(),
range: edit.range(),
fix: edit.into(),
parent: None,
})
@ -142,8 +140,8 @@ class A(object):
);
let diagnostics = create_diagnostics([Edit::replacement(
"Bar".to_string(),
Location::new(1, 8),
Location::new(1, 14),
TextSize::new(8),
TextSize::new(14),
)]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
@ -166,8 +164,7 @@ class A(object):
"#
.trim(),
);
let diagnostics =
create_diagnostics([Edit::deletion(Location::new(1, 7), Location::new(1, 15))]);
let diagnostics = create_diagnostics([Edit::deletion(TextSize::new(7), TextSize::new(15))]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
contents,
@ -190,8 +187,8 @@ class A(object, object, object):
.trim(),
);
let diagnostics = create_diagnostics([
Edit::deletion(Location::new(1, 8), Location::new(1, 16)),
Edit::deletion(Location::new(1, 22), Location::new(1, 30)),
Edit::deletion(TextSize::from(8), TextSize::from(16)),
Edit::deletion(TextSize::from(22), TextSize::from(30)),
]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
@ -216,12 +213,8 @@ class A(object):
.trim(),
);
let diagnostics = create_diagnostics([
Edit::deletion(Location::new(1, 7), Location::new(1, 15)),
Edit::replacement(
"ignored".to_string(),
Location::new(1, 9),
Location::new(1, 11),
),
Edit::deletion(TextSize::from(7), TextSize::from(15)),
Edit::replacement("ignored".to_string(), TextSize::from(9), TextSize::from(11)),
]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(

View file

@ -1,7 +1,7 @@
use ruff_python_semantic::scope::ScopeStack;
use ruff_text_size::TextRange;
use rustpython_parser::ast::{Expr, Stmt};
use ruff_python_ast::types::Range;
use ruff_python_ast::types::RefEquality;
use ruff_python_semantic::analyze::visibility::{Visibility, VisibleScope};
@ -16,7 +16,7 @@ type Context<'a> = (ScopeStack, Vec<RefEquality<'a, Stmt>>);
#[derive(Default)]
pub struct Deferred<'a> {
pub definitions: Vec<(Definition<'a>, Visibility, Context<'a>)>,
pub string_type_definitions: Vec<(Range, &'a str, AnnotationContext, Context<'a>)>,
pub string_type_definitions: Vec<(TextRange, &'a str, AnnotationContext, Context<'a>)>,
pub type_definitions: Vec<(&'a Expr, AnnotationContext, Context<'a>)>,
pub functions: Vec<(&'a Stmt, Context<'a>, VisibleScope)>,
pub lambdas: Vec<(&'a Expr, Context<'a>)>,

View file

@ -3,20 +3,19 @@ use std::path::Path;
use itertools::Itertools;
use log::error;
use nohash_hasher::IntMap;
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_common::cformat::{CFormatError, CFormatErrorType};
use rustpython_parser::ast::{
Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext,
ExprKind, KeywordData, Located, Location, Operator, Pattern, PatternKind, Stmt, StmtKind,
Suite,
ExprKind, KeywordData, Located, Operator, Pattern, PatternKind, Stmt, StmtKind, Suite,
};
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::all::{extract_all_names, AllNamesFlags};
use ruff_python_ast::helpers::{extract_handled_exceptions, to_module_path};
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use ruff_python_ast::types::{Node, Range, RefEquality};
use ruff_python_ast::types::{Node, RefEquality};
use ruff_python_ast::typing::parse_type_annotation;
use ruff_python_ast::visitor::{walk_excepthandler, walk_pattern, Visitor};
use ruff_python_ast::{branch_detection, cast, helpers, str, visitor};
@ -39,6 +38,7 @@ use crate::docstrings::definition::{
};
use crate::fs::relativize_path;
use crate::importer::Importer;
use crate::noqa::NoqaMapping;
use crate::registry::{AsRule, Rule};
use crate::rules::{
flake8_2020, flake8_annotations, flake8_bandit, flake8_blind_except, flake8_boolean_trap,
@ -67,7 +67,7 @@ pub struct Checker<'a> {
autofix: flags::Autofix,
noqa: flags::Noqa,
pub settings: &'a Settings,
pub noqa_line_for: &'a IntMap<usize, usize>,
pub noqa_line_for: &'a NoqaMapping,
pub locator: &'a Locator<'a>,
pub stylist: &'a Stylist<'a>,
pub indexer: &'a Indexer,
@ -85,7 +85,7 @@ impl<'a> Checker<'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
settings: &'a Settings,
noqa_line_for: &'a IntMap<usize, usize>,
noqa_line_for: &'a NoqaMapping,
autofix: flags::Autofix,
noqa: flags::Noqa,
path: &'a Path,
@ -126,7 +126,7 @@ impl<'a> Checker<'a> {
}
/// Return `true` if a `Rule` is disabled by a `noqa` directive.
pub fn rule_is_ignored(&self, code: Rule, lineno: usize) -> bool {
pub fn rule_is_ignored(&self, code: Rule, offset: TextSize) -> bool {
// TODO(charlie): `noqa` directives are mostly enforced in `check_lines.rs`.
// However, in rare cases, we need to check them here. For example, when
// removing unused imports, we create a single fix that's applied to all
@ -137,7 +137,7 @@ impl<'a> Checker<'a> {
if !self.noqa.to_bool() {
return false;
}
noqa::rule_is_ignored(code, lineno, self.noqa_line_for, self.locator)
noqa::rule_is_ignored(code, offset, self.noqa_line_for, self.locator)
}
}
@ -221,13 +221,13 @@ where
match &stmt.node {
StmtKind::Global { names } => {
let scope_index = self.ctx.scope_id();
let ranges: Vec<Range> = helpers::find_names(stmt, self.locator).collect();
let ranges: Vec<TextRange> = helpers::find_names(stmt, self.locator).collect();
if !scope_index.is_global() {
// Add the binding to the current scope.
let context = self.ctx.execution_context();
let exceptions = self.ctx.exceptions();
let scope = &mut self.ctx.scopes[scope_index];
let usage = Some((scope.id, Range::from(stmt)));
let usage = Some((scope.id, stmt.range()));
for (name, range) in names.iter().zip(ranges.iter()) {
let id = self.ctx.bindings.push(Binding {
kind: BindingKind::Global,
@ -252,12 +252,12 @@ where
}
StmtKind::Nonlocal { names } => {
let scope_index = self.ctx.scope_id();
let ranges: Vec<Range> = helpers::find_names(stmt, self.locator).collect();
let ranges: Vec<TextRange> = helpers::find_names(stmt, self.locator).collect();
if !scope_index.is_global() {
let context = self.ctx.execution_context();
let exceptions = self.ctx.exceptions();
let scope = &mut self.ctx.scopes[scope_index];
let usage = Some((scope.id, Range::from(stmt)));
let usage = Some((scope.id, stmt.range()));
for (name, range) in names.iter().zip(ranges.iter()) {
// Add a binding to the current scope.
let id = self.ctx.bindings.push(Binding {
@ -695,7 +695,7 @@ where
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(stmt),
range: stmt.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -880,7 +880,7 @@ where
.rules
.enabled(Rule::ModuleImportNotAtTopOfFile)
{
pycodestyle::rules::module_import_not_at_top_of_file(self, stmt);
pycodestyle::rules::module_import_not_at_top_of_file(self, stmt, self.locator);
}
if self.settings.rules.enabled(Rule::GlobalStatement) {
@ -909,9 +909,9 @@ where
kind: BindingKind::FutureImportation,
runtime_usage: None,
// Always mark `__future__` imports as used.
synthetic_usage: Some((self.ctx.scope_id(), Range::from(alias))),
synthetic_usage: Some((self.ctx.scope_id(), alias.range())),
typing_usage: None,
range: Range::from(alias),
range: alias.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -923,7 +923,7 @@ where
{
self.diagnostics.push(Diagnostic::new(
pyflakes::rules::LateFutureImport,
Range::from(stmt),
stmt.range(),
));
}
} else if alias.node.name.contains('.') && alias.node.asname.is_none() {
@ -941,7 +941,7 @@ where
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(alias),
range: alias.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -964,12 +964,12 @@ where
kind: BindingKind::Importation(Importation { name, full_name }),
runtime_usage: None,
synthetic_usage: if is_explicit_reexport {
Some((self.ctx.scope_id(), Range::from(alias)))
Some((self.ctx.scope_id(), alias.range()))
} else {
None
},
typing_usage: None,
range: Range::from(alias),
range: alias.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -1135,7 +1135,7 @@ where
.rules
.enabled(Rule::ModuleImportNotAtTopOfFile)
{
pycodestyle::rules::module_import_not_at_top_of_file(self, stmt);
pycodestyle::rules::module_import_not_at_top_of_file(self, stmt, self.locator);
}
if self.settings.rules.enabled(Rule::GlobalStatement) {
@ -1220,9 +1220,9 @@ where
kind: BindingKind::FutureImportation,
runtime_usage: None,
// Always mark `__future__` imports as used.
synthetic_usage: Some((self.ctx.scope_id(), Range::from(alias))),
synthetic_usage: Some((self.ctx.scope_id(), alias.range())),
typing_usage: None,
range: Range::from(alias),
range: alias.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -1242,7 +1242,7 @@ where
{
self.diagnostics.push(Diagnostic::new(
pyflakes::rules::LateFutureImport,
Range::from(stmt),
stmt.range(),
));
}
} else if alias.node.name == "*" {
@ -1265,7 +1265,7 @@ where
module.as_deref(),
),
},
Range::from(stmt),
stmt.range(),
));
}
}
@ -1279,7 +1279,7 @@ where
pyflakes::rules::UndefinedLocalWithImportStar {
name: helpers::format_import_from(*level, module.as_deref()),
},
Range::from(stmt),
stmt.range(),
));
}
} else {
@ -1313,12 +1313,12 @@ where
}),
runtime_usage: None,
synthetic_usage: if is_explicit_reexport {
Some((self.ctx.scope_id(), Range::from(alias)))
Some((self.ctx.scope_id(), alias.range()))
} else {
None
},
typing_usage: None,
range: Range::from(alias),
range: alias.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -2004,7 +2004,7 @@ where
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(stmt),
range: stmt.range(),
source: Some(RefEquality(stmt)),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -2067,7 +2067,7 @@ where
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(*stmt),
range: stmt.range(),
source: Some(RefEquality(stmt)),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -2228,7 +2228,7 @@ where
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(stmt),
range: stmt.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -2261,7 +2261,7 @@ where
} = &expr.node
{
self.deferred.string_type_definitions.push((
Range::from(expr),
expr.range(),
value,
(self.ctx.in_annotation, self.ctx.in_type_checking_block),
(self.ctx.scope_stack.clone(), self.ctx.parents.clone()),
@ -2336,7 +2336,7 @@ where
elts,
check_too_many_expressions,
check_two_starred_expressions,
Range::from(expr),
expr.range(),
) {
self.diagnostics.push(diagnostic);
}
@ -2369,7 +2369,7 @@ where
ExprContext::Store => {
if self.settings.rules.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) =
pycodestyle::rules::ambiguous_variable_name(id, Range::from(expr))
pycodestyle::rules::ambiguous_variable_name(id, expr.range())
{
self.diagnostics.push(diagnostic);
}
@ -2455,7 +2455,7 @@ where
{
if attr == "format" {
// "...".format(...) call
let location = Range::from(expr);
let location = expr.range();
match pyflakes::format::FormatSummary::try_from(value.as_ref()) {
Err(e) => {
if self
@ -2895,14 +2895,14 @@ where
func,
args,
keywords,
Range::from(expr),
expr.range(),
);
}
if self.settings.rules.enabled(Rule::CallDatetimeToday) {
flake8_datetimez::rules::call_datetime_today(self, func, Range::from(expr));
flake8_datetimez::rules::call_datetime_today(self, func, expr.range());
}
if self.settings.rules.enabled(Rule::CallDatetimeUtcnow) {
flake8_datetimez::rules::call_datetime_utcnow(self, func, Range::from(expr));
flake8_datetimez::rules::call_datetime_utcnow(self, func, expr.range());
}
if self
.settings
@ -2912,7 +2912,7 @@ where
flake8_datetimez::rules::call_datetime_utcfromtimestamp(
self,
func,
Range::from(expr),
expr.range(),
);
}
if self
@ -2925,7 +2925,7 @@ where
func,
args,
keywords,
Range::from(expr),
expr.range(),
);
}
if self.settings.rules.enabled(Rule::CallDatetimeFromtimestamp) {
@ -2934,7 +2934,7 @@ where
func,
args,
keywords,
Range::from(expr),
expr.range(),
);
}
if self
@ -2946,14 +2946,14 @@ where
self,
func,
args,
Range::from(expr),
expr.range(),
);
}
if self.settings.rules.enabled(Rule::CallDateToday) {
flake8_datetimez::rules::call_date_today(self, func, Range::from(expr));
flake8_datetimez::rules::call_date_today(self, func, expr.range());
}
if self.settings.rules.enabled(Rule::CallDateFromtimestamp) {
flake8_datetimez::rules::call_date_fromtimestamp(self, func, Range::from(expr));
flake8_datetimez::rules::call_date_fromtimestamp(self, func, expr.range());
}
// pygrep-hooks
@ -3207,7 +3207,7 @@ where
Rule::PercentFormatStarRequiresSequence,
Rule::PercentFormatUnsupportedFormatCharacter,
]) {
let location = Range::from(expr);
let location = expr.range();
match pyflakes::cformat::CFormatSummary::try_from(value.as_str()) {
Err(CFormatError {
typ: CFormatErrorType::UnsupportedFormatChar(c),
@ -3309,7 +3309,7 @@ where
}
if self.settings.rules.enabled(Rule::PrintfStringFormatting) {
pyupgrade::rules::printf_string_formatting(self, expr, right);
pyupgrade::rules::printf_string_formatting(self, expr, right, self.locator);
}
if self.settings.rules.enabled(Rule::BadStringFormatType) {
pylint::rules::bad_string_format_type(self, expr, right);
@ -3417,7 +3417,7 @@ where
left,
ops,
comparators,
Range::from(expr),
expr.range(),
);
}
@ -3495,7 +3495,7 @@ where
} => {
if self.ctx.in_type_definition && !self.ctx.in_literal && !self.ctx.in_f_string {
self.deferred.string_type_definitions.push((
Range::from(expr),
expr.range(),
value,
(self.ctx.in_annotation, self.ctx.in_type_checking_block),
(self.ctx.scope_stack.clone(), self.ctx.parents.clone()),
@ -3506,10 +3506,9 @@ where
.rules
.enabled(Rule::HardcodedBindAllInterfaces)
{
if let Some(diagnostic) = flake8_bandit::rules::hardcoded_bind_all_interfaces(
value,
&Range::from(expr),
) {
if let Some(diagnostic) =
flake8_bandit::rules::hardcoded_bind_all_interfaces(value, expr.range())
{
self.diagnostics.push(diagnostic);
}
}
@ -3979,13 +3978,12 @@ where
if self.ctx.scope().defines(name.as_str()) {
self.handle_node_store(
name,
&Expr::new(
name_range.location,
name_range.end_location,
&Expr::with_range(
ExprKind::Name {
id: name.to_string(),
ctx: ExprContext::Store,
},
name_range,
),
);
}
@ -3993,13 +3991,12 @@ where
let definition = self.ctx.scope().get(name.as_str()).copied();
self.handle_node_store(
name,
&Expr::new(
name_range.location,
name_range.end_location,
&Expr::with_range(
ExprKind::Name {
id: name.to_string(),
ctx: ExprContext::Store,
},
name_range,
),
);
@ -4108,7 +4105,7 @@ where
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(arg),
range: arg.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -4117,7 +4114,7 @@ where
if self.settings.rules.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) =
pycodestyle::rules::ambiguous_variable_name(&arg.node.arg, Range::from(arg))
pycodestyle::rules::ambiguous_variable_name(&arg.node.arg, arg.range())
{
self.diagnostics.push(diagnostic);
}
@ -4152,7 +4149,7 @@ where
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(pattern),
range: pattern.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -4220,10 +4217,13 @@ impl<'a> Checker<'a> {
);
if binding.kind.is_loop_var() && existing_is_import {
if self.settings.rules.enabled(Rule::ImportShadowedByLoopVar) {
#[allow(deprecated)]
let line = self.locator.compute_line_index(existing.range.start());
self.diagnostics.push(Diagnostic::new(
pyflakes::rules::ImportShadowedByLoopVar {
name: name.to_string(),
line: existing.range.location.row(),
line,
},
binding.range,
));
@ -4239,10 +4239,13 @@ impl<'a> Checker<'a> {
))
{
if self.settings.rules.enabled(Rule::RedefinedWhileUnused) {
#[allow(deprecated)]
let line = self.locator.compute_line_index(existing.range.start());
let mut diagnostic = Diagnostic::new(
pyflakes::rules::RedefinedWhileUnused {
name: name.to_string(),
line: existing.range.location.row(),
line,
},
matches!(
binding.kind,
@ -4257,9 +4260,9 @@ impl<'a> Checker<'a> {
);
if let Some(parent) = binding.source.as_ref() {
if matches!(parent.node, StmtKind::ImportFrom { .. })
&& parent.location.row() != binding.range.location.row()
&& parent.range().contains_range(binding.range)
{
diagnostic.set_parent(parent.location);
diagnostic.set_parent(parent.start());
}
}
self.diagnostics.push(diagnostic);
@ -4327,9 +4330,9 @@ impl<'a> Checker<'a> {
{
let id = self.ctx.bindings.push(Binding {
kind: BindingKind::Builtin,
range: Range::default(),
range: TextRange::default(),
runtime_usage: None,
synthetic_usage: Some((ScopeId::global(), Range::default())),
synthetic_usage: Some((ScopeId::global(), TextRange::default())),
typing_usage: None,
source: None,
context: ExecutionContext::Runtime,
@ -4363,7 +4366,7 @@ impl<'a> Checker<'a> {
if let Some(index) = scope.get(id.as_str()) {
// Mark the binding as used.
let context = self.ctx.execution_context();
self.ctx.bindings[*index].mark_used(scope_id, Range::from(expr), context);
self.ctx.bindings[*index].mark_used(scope_id, expr.range(), context);
if self.ctx.bindings[*index].kind.is_annotation()
&& self.ctx.in_deferred_string_type_definition.is_none()
@ -4394,7 +4397,7 @@ impl<'a> Checker<'a> {
if let Some(index) = scope.get(full_name) {
self.ctx.bindings[*index].mark_used(
scope_id,
Range::from(expr),
expr.range(),
context,
);
}
@ -4411,7 +4414,7 @@ impl<'a> Checker<'a> {
if let Some(index) = scope.get(full_name.as_str()) {
self.ctx.bindings[*index].mark_used(
scope_id,
Range::from(expr),
expr.range(),
context,
);
}
@ -4451,7 +4454,7 @@ impl<'a> Checker<'a> {
name: id.to_string(),
sources,
},
Range::from(expr),
expr.range(),
));
}
return;
@ -4482,7 +4485,7 @@ impl<'a> Checker<'a> {
self.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedName { name: id.clone() },
Range::from(expr),
expr.range(),
));
}
}
@ -4557,7 +4560,7 @@ impl<'a> Checker<'a> {
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(expr),
range: expr.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -4577,7 +4580,7 @@ impl<'a> Checker<'a> {
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(expr),
range: expr.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -4594,7 +4597,7 @@ impl<'a> Checker<'a> {
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(expr),
range: expr.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -4676,7 +4679,7 @@ impl<'a> Checker<'a> {
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(expr),
range: expr.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -4693,7 +4696,7 @@ impl<'a> Checker<'a> {
runtime_usage: None,
synthetic_usage: None,
typing_usage: None,
range: Range::from(expr),
range: expr.range(),
source: Some(*self.ctx.current_stmt()),
context: self.ctx.execution_context(),
exceptions: self.ctx.exceptions(),
@ -4721,7 +4724,7 @@ impl<'a> Checker<'a> {
pyflakes::rules::UndefinedName {
name: id.to_string(),
},
Range::from(expr),
expr.range(),
));
}
@ -4948,9 +4951,9 @@ impl<'a> Checker<'a> {
}
// Mark anything referenced in `__all__` as used.
let all_bindings: Option<(Vec<BindingId>, Range)> = {
let all_bindings: Option<(Vec<BindingId>, TextRange)> = {
let global_scope = self.ctx.global_scope();
let all_names: Option<(&Vec<&str>, Range)> = global_scope
let all_names: Option<(&Vec<&str>, TextRange)> = global_scope
.get("__all__")
.map(|index| &self.ctx.bindings[*index])
.and_then(|binding| match &binding.kind {
@ -4980,7 +4983,7 @@ impl<'a> Checker<'a> {
}
// Extract `__all__` names from the global scope.
let all_names: Option<(&[&str], Range)> = self
let all_names: Option<(&[&str], TextRange)> = self
.ctx
.global_scope()
.get("__all__")
@ -5023,7 +5026,7 @@ impl<'a> Checker<'a> {
// F822
if self.settings.rules.enabled(Rule::UndefinedExport) {
if !self.path.ends_with("__init__.py") {
if let Some((names, range)) = &all_names {
if let Some((names, range)) = all_names {
diagnostics
.extend(pyflakes::rules::undefined_export(names, range, scope));
}
@ -5107,10 +5110,13 @@ impl<'a> Checker<'a> {
if let Some(indices) = self.ctx.shadowed_bindings.get(index) {
for index in indices {
let rebound = &self.ctx.bindings[*index];
#[allow(deprecated)]
let line = self.locator.compute_line_index(binding.range.start());
let mut diagnostic = Diagnostic::new(
pyflakes::rules::RedefinedWhileUnused {
name: (*name).to_string(),
line: binding.range.location.row(),
line,
},
matches!(
rebound.kind,
@ -5126,9 +5132,9 @@ impl<'a> Checker<'a> {
);
if let Some(parent) = &rebound.source {
if matches!(parent.node, StmtKind::ImportFrom { .. })
&& parent.location.row() != rebound.range.location.row()
&& parent.range().contains_range(rebound.range)
{
diagnostic.set_parent(parent.location);
diagnostic.set_parent(parent.start());
}
};
diagnostics.push(diagnostic);
@ -5178,7 +5184,7 @@ impl<'a> Checker<'a> {
if self.settings.rules.enabled(Rule::UnusedImport) {
// Collect all unused imports by location. (Multiple unused imports at the same
// location indicates an `import from`.)
type UnusedImport<'a> = (&'a str, &'a Range);
type UnusedImport<'a> = (&'a str, &'a TextRange);
type BindingContext<'a, 'b> = (
&'a RefEquality<'b, Stmt>,
Option<&'a RefEquality<'b, Stmt>>,
@ -5213,16 +5219,16 @@ impl<'a> Checker<'a> {
let exceptions = binding.exceptions;
let child: &Stmt = defined_by.into();
let diagnostic_lineno = binding.range.location.row();
let parent_lineno = if matches!(child.node, StmtKind::ImportFrom { .. }) {
Some(child.location.row())
let diagnostic_offset = binding.range.start();
let parent_offset = if matches!(child.node, StmtKind::ImportFrom { .. }) {
Some(child.start())
} else {
None
};
if self.rule_is_ignored(Rule::UnusedImport, diagnostic_lineno)
|| parent_lineno.map_or(false, |parent_lineno| {
self.rule_is_ignored(Rule::UnusedImport, parent_lineno)
if self.rule_is_ignored(Rule::UnusedImport, diagnostic_offset)
|| parent_offset.map_or(false, |parent_offset| {
self.rule_is_ignored(Rule::UnusedImport, parent_offset)
})
{
ignored
@ -5241,7 +5247,7 @@ impl<'a> Checker<'a> {
self.settings.ignore_init_module_imports && self.path.ends_with("__init__.py");
for ((defined_by, defined_in, exceptions), unused_imports) in unused
.into_iter()
.sorted_by_key(|((defined_by, ..), ..)| defined_by.location)
.sorted_by_key(|((defined_by, ..), ..)| defined_by.start())
{
let child: &Stmt = defined_by.into();
let parent: Option<&Stmt> = defined_in.map(Into::into);
@ -5291,9 +5297,9 @@ impl<'a> Checker<'a> {
*range,
);
if matches!(child.node, StmtKind::ImportFrom { .. }) {
diagnostic.set_parent(child.location);
diagnostic.set_parent(child.start());
}
if let Some(fix) = fix.as_ref() {
if let Some(fix) = &fix {
diagnostic.set_fix(fix.clone());
}
diagnostics.push(diagnostic);
@ -5301,7 +5307,7 @@ impl<'a> Checker<'a> {
}
for ((defined_by, .., exceptions), unused_imports) in ignored
.into_iter()
.sorted_by_key(|((defined_by, ..), ..)| defined_by.location)
.sorted_by_key(|((defined_by, ..), ..)| defined_by.start())
{
let child: &Stmt = defined_by.into();
let multiple = unused_imports.len() > 1;
@ -5323,7 +5329,7 @@ impl<'a> Checker<'a> {
*range,
);
if matches!(child.node, StmtKind::ImportFrom { .. }) {
diagnostic.set_parent(child.location);
diagnostic.set_parent(child.start());
}
diagnostics.push(diagnostic);
}
@ -5451,30 +5457,33 @@ impl<'a> Checker<'a> {
// Extract a `Docstring` from a `Definition`.
let expr = definition.docstring.unwrap();
let contents = self.locator.slice(expr);
let indentation = self.locator.slice(Range::new(
Location::new(expr.location.row(), 0),
Location::new(expr.location.row(), expr.location.column()),
let contents = self.locator.slice(expr.range());
let indentation = self.locator.slice(TextRange::new(
self.locator.line_start(expr.start()),
expr.start(),
));
if pydocstyle::helpers::should_ignore_docstring(contents) {
#[allow(deprecated)]
let location = self.locator.compute_source_location(expr.start());
warn_user!(
"Docstring at {}:{}:{} contains implicit string concatenation; ignoring...",
relativize_path(self.path),
expr.location.row(),
expr.location.column() + 1
);
"Docstring at {}:{}:{} contains implicit string concatenation; ignoring...",
relativize_path(self.path),
location.row,
location.column
);
continue;
}
// SAFETY: Safe for docstrings that pass `should_ignore_docstring`.
let body = str::raw_contents(contents).unwrap();
let body_range = str::raw_contents_range(contents).unwrap();
let docstring = Docstring {
kind: definition.kind,
expr,
contents,
indentation,
body,
body_range,
};
if !pydocstyle::rules::not_empty(self, &docstring) {
@ -5624,7 +5633,7 @@ pub fn check_ast(
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
noqa_line_for: &IntMap<usize, usize>,
noqa_line_for: &NoqaMapping,
settings: &Settings,
autofix: flags::Autofix,
noqa: flags::Noqa,

View file

@ -30,13 +30,11 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
for stmt in blocks.iter().flat_map(|block| &block.imports) {
match &stmt.node {
StmtKind::Import { names } => {
module_imports.extend(names.iter().map(|name| {
ModuleImport::new(
name.node.name.clone(),
stmt.location,
stmt.end_location.unwrap(),
)
}));
module_imports.extend(
names
.iter()
.map(|name| ModuleImport::new(name.node.name.clone(), stmt.range())),
);
}
StmtKind::ImportFrom {
module,
@ -61,11 +59,7 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
Cow::Owned(module_path[..module_path.len() - level].join("."))
};
module_imports.extend(names.iter().map(|name| {
ModuleImport::new(
format!("{}.{}", module, name.node.name),
name.location,
name.end_location.unwrap(),
)
ModuleImport::new(format!("{}.{}", module, name.node.name), name.range())
}));
}
_ => panic!("Expected StmtKind::Import | StmtKind::ImportFrom"),

View file

@ -1,9 +1,9 @@
use rustpython_parser::ast::Location;
use ruff_text_size::TextRange;
use rustpython_parser::lexer::LexResult;
use ruff_diagnostics::{Diagnostic, Fix};
use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_ast::types::Range;
use ruff_python_ast::token_kind::TokenKind;
use crate::registry::{AsRule, Rule};
use crate::rules::pycodestyle::rules::logical_lines::{
@ -63,8 +63,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
range: TextRange::empty(location),
fix: Fix::empty(),
parent: None,
});
@ -75,8 +74,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
range: TextRange::empty(location),
fix: Fix::empty(),
parent: None,
});
@ -86,8 +84,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
range: TextRange::empty(location),
fix: Fix::empty(),
parent: None,
});
@ -108,8 +105,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
range: TextRange::empty(location),
fix: Fix::empty(),
parent: None,
});
@ -121,8 +117,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
range: TextRange::empty(location),
fix: Fix::empty(),
parent: None,
});
@ -133,8 +128,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
range: TextRange::empty(location),
fix: Fix::empty(),
parent: None,
});
@ -142,12 +136,13 @@ pub fn check_logical_lines(
}
}
if line.flags().contains(TokenFlags::COMMENT) {
for (range, kind) in whitespace_before_comment(&line.tokens(), locator) {
for (range, kind) in
whitespace_before_comment(&line.tokens(), locator, prev_line.is_none())
{
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location: range.location,
end_location: range.end_location,
range,
fix: Fix::empty(),
parent: None,
});
@ -167,12 +162,21 @@ pub fn check_logical_lines(
}
// Extract the indentation level.
let Some(start_loc) = line.first_token_location() else { continue; };
let start_line = locator.slice(Range::new(Location::new(start_loc.row(), 0), start_loc));
let indent_level = expand_indent(start_line);
let Some(first_token) = line.first_token() else {
continue;
};
let range = if first_token.kind() == TokenKind::Indent {
first_token.range()
} else {
TextRange::new(locator.line_start(first_token.start()), first_token.start())
};
let indent_level = expand_indent(locator.slice(range));
let indent_size = 4;
for (location, kind) in indentation(
for kind in indentation(
&line,
prev_line.as_ref(),
indent_char,
@ -183,8 +187,7 @@ pub fn check_logical_lines(
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location: Location::new(start_loc.row(), 0),
end_location: location,
range,
fix: Fix::empty(),
parent: None,
});

View file

@ -1,15 +1,13 @@
//! `NoQA` enforcement and validation.
use nohash_hasher::IntMap;
use rustpython_parser::ast::Location;
use itertools::Itertools;
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_diagnostics::{Diagnostic, Edit};
use ruff_python_ast::newlines::StrExt;
use ruff_python_ast::types::Range;
use ruff_python_ast::source_code::Locator;
use crate::codes::NoqaCode;
use crate::noqa;
use crate::noqa::{Directive, FileExemption};
use crate::noqa::{Directive, FileExemption, NoqaDirectives, NoqaMapping};
use crate::registry::{AsRule, Rule};
use crate::rule_redirects::get_redirect_target;
use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA};
@ -17,37 +15,25 @@ use crate::settings::{flags, Settings};
pub fn check_noqa(
diagnostics: &mut Vec<Diagnostic>,
contents: &str,
commented_lines: &[usize],
noqa_line_for: &IntMap<usize, usize>,
locator: &Locator,
comment_ranges: &[TextRange],
noqa_line_for: &NoqaMapping,
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<usize> {
let enforce_noqa = settings.rules.enabled(Rule::UnusedNOQA);
let lines: Vec<&str> = contents.universal_newlines().collect();
// Identify any codes that are globally exempted (within the current file).
let exemption = noqa::file_exemption(&lines, commented_lines);
// Map from line number to `noqa` directive on that line, along with any codes
// that were matched by the directive.
let mut noqa_directives: IntMap<usize, (Directive, Vec<NoqaCode>)> = IntMap::default();
let exemption = noqa::file_exemption(locator.contents(), comment_ranges);
// Extract all `noqa` directives.
if enforce_noqa {
for lineno in commented_lines {
noqa_directives
.entry(lineno - 1)
.or_insert_with(|| (noqa::extract_noqa_directive(lines[lineno - 1]), vec![]));
}
}
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, locator);
// Indices of diagnostics that were ignored by a `noqa` directive.
let mut ignored_diagnostics = vec![];
// Remove any ignored diagnostics.
for (index, diagnostic) in diagnostics.iter().enumerate() {
'outer: for (index, diagnostic) in diagnostics.iter().enumerate() {
if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) {
continue;
}
@ -68,92 +54,65 @@ pub fn check_noqa(
FileExemption::None => {}
}
let diagnostic_lineno = diagnostic.location.row();
let noqa_offsets = diagnostic
.parent
.into_iter()
.chain(std::iter::once(diagnostic.start()))
.map(|position| noqa_line_for.resolve(position))
.unique();
// Is the violation ignored by a `noqa` directive on the parent line?
if let Some(parent_lineno) = diagnostic.parent.map(|location| location.row()) {
if parent_lineno != diagnostic_lineno {
let noqa_lineno = noqa_line_for.get(&parent_lineno).unwrap_or(&parent_lineno);
if commented_lines.contains(noqa_lineno) {
let noqa = noqa_directives.entry(noqa_lineno - 1).or_insert_with(|| {
(noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![])
});
match noqa {
(Directive::All(..), matches) => {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
}
(Directive::None, ..) => {}
}
}
}
}
// Is the diagnostic ignored by a `noqa` directive on the same line?
let noqa_lineno = noqa_line_for
.get(&diagnostic_lineno)
.unwrap_or(&diagnostic_lineno);
if commented_lines.contains(noqa_lineno) {
let noqa = noqa_directives
.entry(noqa_lineno - 1)
.or_insert_with(|| (noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![]));
match noqa {
(Directive::All(..), matches) => {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
for noqa_offset in noqa_offsets {
if let Some(directive_line) = noqa_directives.find_line_with_directive_mut(noqa_offset)
{
let suppressed = match &directive_line.directive {
Directive::All(..) => {
directive_line
.matches
.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
true
}
Directive::Codes(.., codes, _) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
directive_line
.matches
.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
true
} else {
false
}
}
Directive::None => unreachable!(),
};
if suppressed {
continue 'outer;
}
(Directive::None, ..) => {}
}
}
}
// Enforce that the noqa directive was actually used (RUF100).
if enforce_noqa {
for (row, (directive, matches)) in noqa_directives {
match directive {
Directive::All(leading_spaces, start_byte, end_byte, trailing_spaces) => {
if matches.is_empty() {
let start_char = lines[row][..start_byte].chars().count();
let end_char =
start_char + lines[row][start_byte..end_byte].chars().count();
let mut diagnostic = Diagnostic::new(
UnusedNOQA { codes: None },
Range::new(
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
),
);
for line in noqa_directives.lines() {
match &line.directive {
Directive::All(leading_spaces, noqa_range, trailing_spaces) => {
if line.matches.is_empty() {
let mut diagnostic =
Diagnostic::new(UnusedNOQA { codes: None }, *noqa_range);
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
diagnostic.set_fix(delete_noqa(
row,
lines[row],
leading_spaces,
start_byte,
end_byte,
trailing_spaces,
*leading_spaces,
*noqa_range,
*trailing_spaces,
locator,
));
}
diagnostics.push(diagnostic);
}
}
Directive::Codes(leading_spaces, start_byte, end_byte, codes, trailing_spaces) => {
Directive::Codes(leading_spaces, range, codes, trailing_spaces) => {
let mut disabled_codes = vec![];
let mut unknown_codes = vec![];
let mut unmatched_codes = vec![];
@ -166,7 +125,9 @@ pub fn check_noqa(
break;
}
if matches.iter().any(|m| *m == code) || settings.external.contains(code) {
if line.matches.iter().any(|m| *m == code)
|| settings.external.contains(code)
{
valid_codes.push(code);
} else {
if let Ok(rule) = Rule::from_code(code) {
@ -189,10 +150,6 @@ pub fn check_noqa(
&& unknown_codes.is_empty()
&& unmatched_codes.is_empty())
{
let start_char = lines[row][..start_byte].chars().count();
let end_char =
start_char + lines[row][start_byte..end_byte].chars().count();
let mut diagnostic = Diagnostic::new(
UnusedNOQA {
codes: Some(UnusedCodes {
@ -210,26 +167,20 @@ pub fn check_noqa(
.collect(),
}),
},
Range::new(
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
),
*range,
);
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
if valid_codes.is_empty() {
diagnostic.set_fix(delete_noqa(
row,
lines[row],
leading_spaces,
start_byte,
end_byte,
trailing_spaces,
*leading_spaces,
*range,
*trailing_spaces,
locator,
));
} else {
diagnostic.set_fix(Edit::replacement(
diagnostic.set_fix(Edit::range_replacement(
format!("# noqa: {}", valid_codes.join(", ")),
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
*range,
));
}
}
@ -247,39 +198,37 @@ pub fn check_noqa(
/// Generate a [`Edit`] to delete a `noqa` directive.
fn delete_noqa(
row: usize,
line: &str,
leading_spaces: usize,
start_byte: usize,
end_byte: usize,
trailing_spaces: usize,
leading_spaces: TextSize,
noqa_range: TextRange,
trailing_spaces: TextSize,
locator: &Locator,
) -> Edit {
if start_byte - leading_spaces == 0 && end_byte == line.len() {
// Ex) `# noqa`
Edit::deletion(Location::new(row + 1, 0), Location::new(row + 2, 0))
} else if end_byte == line.len() {
// Ex) `x = 1 # noqa`
let start_char = line[..start_byte].chars().count();
let end_char = start_char + line[start_byte..end_byte].chars().count();
Edit::deletion(
Location::new(row + 1, start_char - leading_spaces),
Location::new(row + 1, end_char + trailing_spaces),
let line_range = locator.line_range(noqa_range.start());
// Ex) `# noqa`
if line_range
== TextRange::new(
noqa_range.start() - leading_spaces,
noqa_range.end() + trailing_spaces,
)
} else if line[end_byte..].trim_start().starts_with('#') {
// Ex) `x = 1 # noqa # type: ignore`
let start_char = line[..start_byte].chars().count();
let end_char = start_char + line[start_byte..end_byte].chars().count();
{
let full_line_end = locator.full_line_end(line_range.end());
Edit::deletion(line_range.start(), full_line_end)
}
// Ex) `x = 1 # noqa`
else if noqa_range.end() + trailing_spaces == line_range.end() {
Edit::deletion(noqa_range.start() - leading_spaces, line_range.end())
}
// Ex) `x = 1 # noqa # type: ignore`
else if locator.contents()[usize::from(noqa_range.end() + trailing_spaces)..].starts_with('#')
{
Edit::deletion(noqa_range.start(), noqa_range.end() + trailing_spaces)
}
// Ex) `x = 1 # noqa here`
else {
Edit::deletion(
Location::new(row + 1, start_char),
Location::new(row + 1, end_char + trailing_spaces),
)
} else {
// Ex) `x = 1 # noqa here`
let start_char = line[..start_byte].chars().count();
let end_char = start_char + line[start_byte..end_byte].chars().count();
Edit::deletion(
Location::new(row + 1, start_char + 1 + 1),
Location::new(row + 1, end_char + trailing_spaces),
noqa_range.start() + "# ".text_len(),
noqa_range.end() + trailing_spaces,
)
}
}

View file

@ -1,5 +1,6 @@
//! Lint rules based on checking physical lines.
use ruff_text_size::TextSize;
use std::path::Path;
use ruff_diagnostics::Diagnostic;
@ -25,7 +26,7 @@ pub fn check_physical_lines(
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
doc_lines: &[usize],
doc_lines: &[TextSize],
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<Diagnostic> {
@ -55,20 +56,19 @@ pub fn check_physical_lines(
let fix_shebang_whitespace =
autofix.into() && settings.rules.should_fix(Rule::ShebangLeadingWhitespace);
let mut commented_lines_iter = indexer.commented_lines().iter().peekable();
let mut commented_lines_iter = indexer.comment_ranges().iter().peekable();
let mut doc_lines_iter = doc_lines.iter().peekable();
let string_lines = indexer.string_ranges();
let string_lines = indexer.triple_quoted_string_ranges();
for (index, line) in locator.contents().universal_newlines().enumerate() {
while commented_lines_iter
.next_if(|lineno| &(index + 1) == *lineno)
.next_if(|comment_range| line.range().contains_range(**comment_range))
.is_some()
{
if enforce_unnecessary_coding_comment {
if index < 2 {
if let Some(diagnostic) =
unnecessary_coding_comment(index, line, fix_unnecessary_coding_comment)
unnecessary_coding_comment(&line, fix_unnecessary_coding_comment)
{
diagnostics.push(diagnostic);
}
@ -76,11 +76,11 @@ pub fn check_physical_lines(
}
if enforce_blanket_type_ignore {
blanket_type_ignore(&mut diagnostics, index, line);
blanket_type_ignore(&mut diagnostics, &line);
}
if enforce_blanket_noqa {
blanket_noqa(&mut diagnostics, index, line);
blanket_noqa(&mut diagnostics, &line);
}
if enforce_shebang_missing
@ -89,31 +89,31 @@ pub fn check_physical_lines(
|| enforce_shebang_newline
|| enforce_shebang_python
{
let shebang = extract_shebang(line);
let shebang = extract_shebang(&line);
if enforce_shebang_not_executable {
if let Some(diagnostic) = shebang_not_executable(path, index, &shebang) {
if let Some(diagnostic) = shebang_not_executable(path, line.range(), &shebang) {
diagnostics.push(diagnostic);
}
}
if enforce_shebang_missing {
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(_, _, _, _)) {
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(..)) {
has_any_shebang = true;
}
}
if enforce_shebang_whitespace {
if let Some(diagnostic) =
shebang_whitespace(index, &shebang, fix_shebang_whitespace)
shebang_whitespace(line.range(), &shebang, fix_shebang_whitespace)
{
diagnostics.push(diagnostic);
}
}
if enforce_shebang_newline {
if let Some(diagnostic) = shebang_newline(index, &shebang) {
if let Some(diagnostic) = shebang_newline(line.range(), &shebang, index == 0) {
diagnostics.push(diagnostic);
}
}
if enforce_shebang_python {
if let Some(diagnostic) = shebang_python(index, &shebang) {
if let Some(diagnostic) = shebang_python(line.range(), &shebang) {
diagnostics.push(diagnostic);
}
}
@ -121,40 +121,40 @@ pub fn check_physical_lines(
}
while doc_lines_iter
.next_if(|lineno| &(index + 1) == *lineno)
.next_if(|doc_line_start| line.range().contains(**doc_line_start))
.is_some()
{
if enforce_doc_line_too_long {
if let Some(diagnostic) = doc_line_too_long(index, line, settings) {
if let Some(diagnostic) = doc_line_too_long(&line, settings) {
diagnostics.push(diagnostic);
}
}
}
if enforce_mixed_spaces_and_tabs {
if let Some(diagnostic) = mixed_spaces_and_tabs(index, line) {
if let Some(diagnostic) = mixed_spaces_and_tabs(&line) {
diagnostics.push(diagnostic);
}
}
if enforce_line_too_long {
if let Some(diagnostic) = line_too_long(index, line, settings) {
if let Some(diagnostic) = line_too_long(&line, settings) {
diagnostics.push(diagnostic);
}
}
if enforce_bidirectional_unicode {
diagnostics.extend(pylint::rules::bidirectional_unicode(index, line));
diagnostics.extend(pylint::rules::bidirectional_unicode(&line));
}
if enforce_trailing_whitespace || enforce_blank_line_contains_whitespace {
if let Some(diagnostic) = trailing_whitespace(index, line, settings, autofix) {
if let Some(diagnostic) = trailing_whitespace(&line, settings, autofix) {
diagnostics.push(diagnostic);
}
}
if enforce_tab_indentation {
if let Some(diagnostic) = tab_indentation(index + 1, line, string_lines) {
if let Some(diagnostic) = tab_indentation(&line, string_lines) {
diagnostics.push(diagnostic);
}
}
@ -197,7 +197,7 @@ mod tests {
let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8.
let locator = Locator::new(line);
let tokens: Vec<_> = lex(line, Mode::Module).collect();
let indexer: Indexer = tokens.as_slice().into();
let indexer = Indexer::from_tokens(&tokens, &locator);
let stylist = Stylist::from_tokens(&tokens, &locator);
let check_with_max_line_length = |line_length: usize| {

View file

@ -64,7 +64,7 @@ pub fn check_tokens(
// RUF001, RUF002, RUF003
if enforce_ambiguous_unicode_character {
let mut state_machine = StateMachine::default();
for &(start, ref tok, end) in tokens.iter().flatten() {
for &(ref tok, range) in tokens.iter().flatten() {
let is_docstring = if enforce_ambiguous_unicode_character {
state_machine.consume(tok)
} else {
@ -74,8 +74,7 @@ pub fn check_tokens(
if matches!(tok, Tok::String { .. } | Tok::Comment(_)) {
diagnostics.extend(ruff::rules::ambiguous_unicode_character(
locator,
start,
end,
range,
if matches!(tok, Tok::String { .. }) {
if is_docstring {
Context::Docstring
@ -94,10 +93,10 @@ pub fn check_tokens(
// ERA001
if enforce_commented_out_code {
for (start, tok, end) in tokens.iter().flatten() {
for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::Comment(_)) {
if let Some(diagnostic) =
eradicate::rules::commented_out_code(locator, *start, *end, settings, autofix)
eradicate::rules::commented_out_code(locator, *range, settings, autofix)
{
diagnostics.push(diagnostic);
}
@ -107,12 +106,11 @@ pub fn check_tokens(
// W605
if enforce_invalid_escape_sequence {
for (start, tok, end) in tokens.iter().flatten() {
for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::String { .. }) {
diagnostics.extend(pycodestyle::rules::invalid_escape_sequence(
locator,
*start,
*end,
*range,
autofix.into() && settings.rules.should_fix(Rule::InvalidEscapeSequence),
));
}
@ -120,10 +118,10 @@ pub fn check_tokens(
}
// PLE2510, PLE2512, PLE2513
if enforce_invalid_string_character {
for (start, tok, end) in tokens.iter().flatten() {
for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::String { .. }) {
diagnostics.extend(
pylint::rules::invalid_string_characters(locator, *start, *end, autofix.into())
pylint::rules::invalid_string_characters(locator, *range, autofix.into())
.into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
);
@ -155,6 +153,7 @@ pub fn check_tokens(
flake8_implicit_str_concat::rules::implicit(
tokens,
&settings.flake8_implicit_str_concat,
locator,
)
.into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),

View file

@ -1,8 +1,15 @@
use crate::registry::{Linter, Rule};
use std::fmt::Formatter;
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct NoqaCode(&'static str, &'static str);
impl std::fmt::Debug for NoqaCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for NoqaCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{}{}", self.0, self.1)

View file

@ -1,8 +1,9 @@
//! Extract `# noqa` and `# isort: skip` directives from tokenized source.
use crate::noqa::NoqaMapping;
use bitflags::bitflags;
use nohash_hasher::{IntMap, IntSet};
use rustpython_parser::ast::Location;
use ruff_python_ast::source_code::{Indexer, Locator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
@ -11,7 +12,7 @@ use crate::settings::Settings;
bitflags! {
#[derive(Debug, Copy, Clone)]
pub struct Flags: u8 {
const NOQA = 0b0000_0001;
const NOQA = 0b0000_0001;
const ISORT = 0b0000_0010;
}
}
@ -30,27 +31,50 @@ impl Flags {
}
}
#[derive(Default)]
#[derive(Default, Debug)]
pub struct IsortDirectives {
pub exclusions: IntSet<usize>,
pub splits: Vec<usize>,
/// Ranges for which sorting is disabled
pub exclusions: Vec<TextRange>,
/// Text positions at which splits should be inserted
pub splits: Vec<TextSize>,
pub skip_file: bool,
}
impl IsortDirectives {
pub fn is_excluded(&self, offset: TextSize) -> bool {
for range in &self.exclusions {
if range.contains(offset) {
return true;
}
if range.start() > offset {
break;
}
}
false
}
}
pub struct Directives {
pub noqa_line_for: IntMap<usize, usize>,
pub noqa_line_for: NoqaMapping,
pub isort: IsortDirectives,
}
pub fn extract_directives(lxr: &[LexResult], flags: Flags) -> Directives {
pub fn extract_directives(
lxr: &[LexResult],
flags: Flags,
locator: &Locator,
indexer: &Indexer,
) -> Directives {
Directives {
noqa_line_for: if flags.contains(Flags::NOQA) {
extract_noqa_line_for(lxr)
extract_noqa_line_for(lxr, locator, indexer)
} else {
IntMap::default()
NoqaMapping::default()
},
isort: if flags.contains(Flags::ISORT) {
extract_isort_directives(lxr)
extract_isort_directives(lxr, locator)
} else {
IsortDirectives::default()
},
@ -58,48 +82,92 @@ pub fn extract_directives(lxr: &[LexResult], flags: Flags) -> Directives {
}
/// Extract a mapping from logical line to noqa line.
pub fn extract_noqa_line_for(lxr: &[LexResult]) -> IntMap<usize, usize> {
let mut noqa_line_for: IntMap<usize, usize> = IntMap::default();
let mut prev_non_newline: Option<(&Location, &Tok, &Location)> = None;
for (start, tok, end) in lxr.iter().flatten() {
if matches!(tok, Tok::EndOfFile) {
break;
}
// For multi-line strings, we expect `noqa` directives on the last line of the
// string.
if matches!(tok, Tok::String { .. }) && end.row() > start.row() {
for i in start.row()..end.row() {
noqa_line_for.insert(i, end.row());
pub fn extract_noqa_line_for(
lxr: &[LexResult],
locator: &Locator,
indexer: &Indexer,
) -> NoqaMapping {
let mut string_mappings = Vec::new();
for (tok, range) in lxr.iter().flatten() {
match tok {
Tok::EndOfFile => {
break;
}
}
// For continuations, we expect `noqa` directives on the last line of the
// continuation.
if matches!(
tok,
Tok::Newline | Tok::NonLogicalNewline | Tok::Comment(..)
) {
if let Some((.., end)) = prev_non_newline {
for i in end.row()..start.row() {
noqa_line_for.insert(i, start.row());
// For multi-line strings, we expect `noqa` directives on the last line of the
// string.
Tok::String {
triple_quoted: true,
..
} => {
if locator.contains_line_break(*range) {
string_mappings.push(*range);
}
}
prev_non_newline = None;
} else if prev_non_newline.is_none() {
prev_non_newline = Some((start, tok, end));
_ => {}
}
}
noqa_line_for
let mut continuation_mappings = Vec::new();
// For continuations, we expect `noqa` directives on the last line of the
// continuation.
let mut last: Option<TextRange> = None;
for continuation_line in indexer.continuation_line_starts() {
let line_end = locator.full_line_end(*continuation_line);
if let Some(last_range) = last.take() {
if last_range.end() == *continuation_line {
last = Some(TextRange::new(last_range.start(), line_end));
continue;
}
// new continuation
continuation_mappings.push(last_range);
}
last = Some(TextRange::new(*continuation_line, line_end));
}
if let Some(last_range) = last.take() {
continuation_mappings.push(last_range);
}
// Merge the mappings in sorted order
let mut mappings =
NoqaMapping::with_capacity(continuation_mappings.len() + string_mappings.len());
let mut continuation_mappings = continuation_mappings.into_iter().peekable();
let mut string_mappings = string_mappings.into_iter().peekable();
while let (Some(continuation), Some(string)) =
(continuation_mappings.peek(), string_mappings.peek())
{
if continuation.start() <= string.start() {
mappings.push_mapping(continuation_mappings.next().unwrap());
} else {
mappings.push_mapping(string_mappings.next().unwrap());
}
}
for mapping in continuation_mappings {
mappings.push_mapping(mapping);
}
for mapping in string_mappings {
mappings.push_mapping(mapping);
}
mappings
}
/// Extract a set of lines over which to disable isort.
pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
let mut exclusions: IntSet<usize> = IntSet::default();
let mut splits: Vec<usize> = Vec::default();
let mut off: Option<Location> = None;
let mut last: Option<Location> = None;
for &(start, ref tok, end) in lxr.iter().flatten() {
last = Some(end);
/// Extract a set of ranges over which to disable isort.
pub fn extract_isort_directives(lxr: &[LexResult], locator: &Locator) -> IsortDirectives {
let mut exclusions: Vec<TextRange> = Vec::default();
let mut splits: Vec<TextSize> = Vec::default();
let mut off: Option<TextSize> = None;
for &(ref tok, range) in lxr.iter().flatten() {
let Tok::Comment(comment_text) = tok else {
continue;
};
@ -109,7 +177,7 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
// required to include the space, and must appear on their own lines.
let comment_text = comment_text.trim_end();
if matches!(comment_text, "# isort: split" | "# ruff: isort: split") {
splits.push(start.row());
splits.push(range.start());
} else if matches!(
comment_text,
"# isort: skip_file"
@ -123,30 +191,25 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
};
} else if off.is_some() {
if comment_text == "# isort: on" || comment_text == "# ruff: isort: on" {
if let Some(start) = off {
for row in start.row() + 1..=end.row() {
exclusions.insert(row);
}
if let Some(exclusion_start) = off {
exclusions.push(TextRange::new(exclusion_start, range.start()));
}
off = None;
}
} else {
if comment_text.contains("isort: skip") || comment_text.contains("isort:skip") {
exclusions.insert(start.row());
exclusions.push(locator.line_range(range.start()));
} else if comment_text == "# isort: off" || comment_text == "# ruff: isort: off" {
off = Some(start);
off = Some(range.start());
}
}
}
if let Some(start) = off {
// Enforce unterminated `isort: off`.
if let Some(end) = last {
for row in start.row() + 1..=end.row() {
exclusions.insert(row);
}
}
exclusions.push(TextRange::new(start, locator.contents().text_len()));
}
IsortDirectives {
exclusions,
splits,
@ -156,120 +219,98 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
#[cfg(test)]
mod tests {
use nohash_hasher::{IntMap, IntSet};
use ruff_python_ast::source_code::{Indexer, Locator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
use crate::noqa::NoqaMapping;
fn noqa_mappings(contents: &str) -> NoqaMapping {
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
extract_noqa_line_for(&lxr, &locator, &indexer)
}
#[test]
fn noqa_extraction() {
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
z = x + 1",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let contents = "x = 1
y = 2 \
+ 1
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(
"
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let contents = "
x = 1
y = 2
z = x + 1",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
z = x + 1";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = 2
z = x + 1
",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = 2
z = x + 1
",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let lxr: Vec<LexResult> = lexer::lex(
"x = '''abc
let contents = "x = '''abc
def
ghi
'''
y = 2
z = x + 1",
Mode::Module,
)
.collect();
z = x + 1";
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(1, 4), (2, 4), (3, 4)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(4), TextSize::from(22)),])
);
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = '''abc
def
ghi
'''
z = 2",
Mode::Module,
)
.collect();
z = 2";
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
);
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = '''abc
def
ghi
'''",
Mode::Module,
)
.collect();
'''";
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
);
let lxr: Vec<LexResult> = lexer::lex(
r#"x = \
1"#,
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)]));
let contents = r#"x = \
1"#;
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(6))])
);
let lxr: Vec<LexResult> = lexer::lex(
r#"from foo import \
let contents = r#"from foo import \
bar as baz, \
qux as quux"#,
Mode::Module,
)
.collect();
qux as quux"#;
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(1, 3), (2, 3)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(36))])
);
let lxr: Vec<LexResult> = lexer::lex(
r#"
let contents = r#"
# Foo
from foo import \
bar as baz, \
@ -277,13 +318,14 @@ from foo import \
x = \
1
y = \
2"#,
Mode::Module,
)
.collect();
2"#;
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(3, 5), (4, 5), (6, 7), (8, 9)])
noqa_mappings(contents),
NoqaMapping::from_iter([
TextRange::new(TextSize::from(7), TextSize::from(43)),
TextRange::new(TextSize::from(65), TextSize::from(71)),
TextRange::new(TextSize::from(77), TextSize::from(83)),
])
);
}
@ -293,7 +335,10 @@ y = \
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off
x = 1
@ -302,8 +347,8 @@ y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(25))])
);
let contents = "# isort: off
@ -315,8 +360,8 @@ z = x + 1
# isort: on";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4, 5])
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(38))])
);
let contents = "# isort: off
@ -325,8 +370,8 @@ y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::at(TextSize::from(0), contents.text_len())])
);
let contents = "# isort: skip_file
@ -334,7 +379,10 @@ x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off
x = 1
@ -343,7 +391,10 @@ y = 2
# isort: skip_file
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
}
#[test]
@ -352,19 +403,28 @@ z = x + 1";
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, Vec::<usize>::new());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
Vec::new()
);
let contents = "x = 1
y = 2
# isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![3]);
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(12)]
);
let contents = "x = 1
y = 2 # isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![2]);
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(13)]
);
}
}

View file

@ -1,8 +1,10 @@
//! Doc line extraction. In this context, a doc line is a line consisting of a
//! standalone comment or a constant string statement.
use ruff_text_size::{TextRange, TextSize};
use std::iter::FusedIterator;
use ruff_python_ast::source_code::Locator;
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
@ -11,46 +13,56 @@ use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor;
/// Extract doc lines (standalone comments) from a token sequence.
pub fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {
DocLines::new(lxr)
pub fn doc_lines_from_tokens<'a>(lxr: &'a [LexResult], locator: &'a Locator<'a>) -> DocLines<'a> {
DocLines::new(lxr, locator)
}
pub struct DocLines<'a> {
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
prev: Option<usize>,
locator: &'a Locator<'a>,
prev: TextSize,
}
impl<'a> DocLines<'a> {
fn new(lxr: &'a [LexResult]) -> Self {
fn new(lxr: &'a [LexResult], locator: &'a Locator) -> Self {
Self {
inner: lxr.iter().flatten(),
prev: None,
locator,
prev: TextSize::default(),
}
}
}
impl Iterator for DocLines<'_> {
type Item = usize;
type Item = TextSize;
fn next(&mut self) -> Option<Self::Item> {
let mut at_start_of_line = true;
loop {
let (start, tok, end) = self.inner.next()?;
let (tok, range) = self.inner.next()?;
match tok {
Tok::Indent | Tok::Dedent | Tok::Newline => continue,
Tok::Comment(..) => {
if let Some(prev) = self.prev {
if start.row() > prev {
break Some(start.row());
}
} else {
break Some(start.row());
if at_start_of_line
|| self
.locator
.contains_line_break(TextRange::new(self.prev, range.start()))
{
break Some(range.start());
}
}
_ => {}
Tok::Newline => {
at_start_of_line = true;
}
Tok::Indent | Tok::Dedent => {
// ignore
}
_ => {
at_start_of_line = false;
}
}
self.prev = Some(end.row());
self.prev = range.end();
}
}
}
@ -59,7 +71,7 @@ impl FusedIterator for DocLines<'_> {}
#[derive(Default)]
struct StringLinesVisitor {
string_lines: Vec<usize>,
string_lines: Vec<TextSize>,
}
impl Visitor<'_> for StringLinesVisitor {
@ -70,16 +82,15 @@ impl Visitor<'_> for StringLinesVisitor {
..
} = &value.node
{
self.string_lines
.extend(value.location.row()..=value.end_location.unwrap().row());
self.string_lines.push(value.start());
}
}
visitor::walk_stmt(self, stmt);
}
}
/// Extract doc lines (standalone strings) from an AST.
pub fn doc_lines_from_ast(python_ast: &Suite) -> Vec<usize> {
/// Extract doc lines (standalone strings) start positions from an AST.
pub fn doc_lines_from_ast(python_ast: &Suite) -> Vec<TextSize> {
let mut visitor = StringLinesVisitor::default();
visitor.visit_body(python_ast);
visitor.string_lines

View file

@ -1,4 +1,7 @@
use ruff_text_size::{TextRange, TextSize};
use rustpython_parser::ast::{Expr, Stmt};
use std::fmt::{Debug, Formatter};
use std::ops::Deref;
use ruff_python_semantic::analyze::visibility::{
class_visibility, function_visibility, method_visibility, Modifier, Visibility, VisibleScope,
@ -25,11 +28,78 @@ pub struct Definition<'a> {
pub struct Docstring<'a> {
pub kind: DefinitionKind<'a>,
pub expr: &'a Expr,
/// The content of the docstring, including the leading and trailing quotes.
pub contents: &'a str,
pub body: &'a str,
/// The range of the docstring body (without the quotes). The range is relative to [`Self::contents`].
pub body_range: TextRange,
pub indentation: &'a str,
}
impl<'a> Docstring<'a> {
pub fn body(&self) -> DocstringBody {
DocstringBody { docstring: self }
}
pub const fn start(&self) -> TextSize {
self.expr.start()
}
pub const fn end(&self) -> TextSize {
self.expr.end()
}
pub const fn range(&self) -> TextRange {
self.expr.range()
}
pub fn leading_quote(&self) -> &'a str {
&self.contents[TextRange::up_to(self.body_range.start())]
}
}
#[derive(Copy, Clone)]
pub struct DocstringBody<'a> {
docstring: &'a Docstring<'a>,
}
impl<'a> DocstringBody<'a> {
#[inline]
pub fn start(self) -> TextSize {
self.range().start()
}
#[inline]
pub fn end(self) -> TextSize {
self.range().end()
}
pub fn range(self) -> TextRange {
self.docstring.body_range + self.docstring.start()
}
pub fn as_str(self) -> &'a str {
&self.docstring.contents[self.docstring.body_range]
}
}
impl Deref for DocstringBody<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl Debug for DocstringBody<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DocstringBody")
.field("text", &self.as_str())
.field("range", &self.range())
.finish()
}
}
#[derive(Copy, Clone)]
pub enum Documentable {
Class,

View file

@ -1,5 +1,10 @@
use ruff_python_ast::newlines::{StrExt, UniversalNewlineIterator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use std::fmt::{Debug, Formatter};
use std::iter::FusedIterator;
use strum_macros::EnumIter;
use crate::docstrings::definition::{Docstring, DocstringBody};
use crate::docstrings::styles::SectionStyle;
use ruff_python_ast::whitespace;
@ -125,17 +130,259 @@ impl SectionKind {
}
}
pub(crate) struct SectionContexts<'a> {
contexts: Vec<SectionContextData>,
docstring: &'a Docstring<'a>,
}
impl<'a> SectionContexts<'a> {
/// Extract all `SectionContext` values from a docstring.
pub fn from_docstring(docstring: &'a Docstring<'a>, style: SectionStyle) -> Self {
let contents = docstring.body();
let mut contexts = Vec::new();
let mut last: Option<SectionContextData> = None;
let mut previous_line = None;
for line in contents.universal_newlines() {
if previous_line.is_none() {
// skip the first line
previous_line = Some(line.as_str());
continue;
}
if let Some(section_kind) = suspected_as_section(&line, style) {
let indent = whitespace::leading_space(&line);
let section_name = whitespace::leading_words(&line);
let section_name_range = TextRange::at(indent.text_len(), section_name.text_len());
if is_docstring_section(
&line,
section_name_range,
previous_line.unwrap_or_default(),
) {
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.range.start(), line.start());
contexts.push(last);
}
last = Some(SectionContextData {
kind: section_kind,
name_range: section_name_range + line.start(),
range: TextRange::empty(line.start()),
summary_full_end: line.full_end(),
});
}
}
previous_line = Some(line.as_str());
}
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.range.start(), contents.text_len());
contexts.push(last);
}
Self {
contexts,
docstring,
}
}
pub fn len(&self) -> usize {
self.contexts.len()
}
pub fn iter(&self) -> SectionContextsIter {
SectionContextsIter {
docstring_body: self.docstring.body(),
inner: self.contexts.iter(),
}
}
}
impl<'a> IntoIterator for &'a SectionContexts<'a> {
type Item = SectionContext<'a>;
type IntoIter = SectionContextsIter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl Debug for SectionContexts<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
pub struct SectionContextsIter<'a> {
docstring_body: DocstringBody<'a>,
inner: std::slice::Iter<'a, SectionContextData>,
}
impl<'a> Iterator for SectionContextsIter<'a> {
type Item = SectionContext<'a>;
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner.next()?;
Some(SectionContext {
data: next,
docstring_body: self.docstring_body,
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a> DoubleEndedIterator for SectionContextsIter<'a> {
fn next_back(&mut self) -> Option<Self::Item> {
let back = self.inner.next_back()?;
Some(SectionContext {
data: back,
docstring_body: self.docstring_body,
})
}
}
impl FusedIterator for SectionContextsIter<'_> {}
impl ExactSizeIterator for SectionContextsIter<'_> {}
#[derive(Debug)]
pub(crate) struct SectionContext<'a> {
/// The "kind" of the section, e.g. "SectionKind::Args" or "SectionKind::Returns".
pub(crate) kind: SectionKind,
struct SectionContextData {
kind: SectionKind,
/// Range of the section name, relative to the [`Docstring::body`]
name_range: TextRange,
/// Range from the start to the end of the section, relative to the [`Docstring::body`]
range: TextRange,
/// End of the summary, relative to the [`Docstring::body`]
summary_full_end: TextSize,
}
pub struct SectionContext<'a> {
data: &'a SectionContextData,
docstring_body: DocstringBody<'a>,
}
impl<'a> SectionContext<'a> {
pub fn is_last(&self) -> bool {
self.range().end() == self.docstring_body.end()
}
/// The `kind` of the section, e.g. [`SectionKind::Args`] or [`SectionKind::Returns`].
pub const fn kind(&self) -> SectionKind {
self.data.kind
}
/// The name of the section as it appears in the docstring, e.g. "Args" or "Returns".
pub(crate) section_name: &'a str,
pub(crate) previous_line: &'a str,
pub(crate) line: &'a str,
pub(crate) following_lines: &'a [&'a str],
pub(crate) is_last_section: bool,
pub(crate) original_index: usize,
pub fn section_name(&self) -> &'a str {
&self.docstring_body.as_str()[self.data.name_range]
}
/// Returns the rest of the summary line after the section name.
pub fn summary_after_section_name(&self) -> &'a str {
&self.summary_line()[usize::from(self.data.name_range.end() - self.data.range.start())..]
}
fn offset(&self) -> TextSize {
self.docstring_body.start()
}
/// The absolute range of the section name
pub fn section_name_range(&self) -> TextRange {
self.data.name_range + self.offset()
}
/// Summary range relative to the start of the document. Includes the trailing newline.
pub fn summary_full_range(&self) -> TextRange {
self.summary_full_range_relative() + self.offset()
}
/// The absolute range of the summary line, excluding any trailing newline character.
pub fn summary_range(&self) -> TextRange {
TextRange::at(self.range().start(), self.summary_line().text_len())
}
/// Range of the summary line relative to [`Docstring::body`], including the trailing newline character.
fn summary_full_range_relative(&self) -> TextRange {
TextRange::new(self.range_relative().start(), self.data.summary_full_end)
}
/// Returns the range of this section relative to [`Docstring::body`]
const fn range_relative(&self) -> TextRange {
self.data.range
}
/// The absolute range of the full-section.
pub fn range(&self) -> TextRange {
self.range_relative() + self.offset()
}
/// Summary line without the trailing newline characters
pub fn summary_line(&self) -> &'a str {
let full_summary = &self.docstring_body.as_str()[self.summary_full_range_relative()];
let mut bytes = full_summary.bytes().rev();
let newline_width = match bytes.next() {
Some(b'\n') => {
if bytes.next() == Some(b'\r') {
2
} else {
1
}
}
Some(b'\r') => 1,
_ => 0,
};
&full_summary[..full_summary.len() - newline_width]
}
/// Returns the text of the last line of the previous section or an empty string if it is the first section.
pub fn previous_line(&self) -> Option<&'a str> {
let previous =
&self.docstring_body.as_str()[TextRange::up_to(self.range_relative().start())];
previous.universal_newlines().last().map(|l| l.as_str())
}
/// Returns the lines belonging to this section after the summary line.
pub fn following_lines(&self) -> UniversalNewlineIterator<'a> {
let lines = self.following_lines_str();
UniversalNewlineIterator::with_offset(lines, self.offset() + self.data.summary_full_end)
}
fn following_lines_str(&self) -> &'a str {
&self.docstring_body.as_str()[self.following_range_relative()]
}
/// Returns the range to the following lines relative to [`Docstring::body`].
const fn following_range_relative(&self) -> TextRange {
TextRange::new(self.data.summary_full_end, self.range_relative().end())
}
/// Returns the absolute range of the following lines.
pub fn following_range(&self) -> TextRange {
self.following_range_relative() + self.offset()
}
}
impl Debug for SectionContext<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SectionContext")
.field("kind", &self.kind())
.field("section_name", &self.section_name())
.field("summary_line", &self.summary_line())
.field("following_lines", &&self.following_lines_str())
.finish()
}
}
fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind> {
@ -148,20 +395,15 @@ fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind>
}
/// Check if the suspected context is really a section header.
fn is_docstring_section(context: &SectionContext) -> bool {
let section_name_suffix = context
.line
.trim()
.strip_prefix(context.section_name)
.unwrap()
.trim();
fn is_docstring_section(line: &str, section_name_range: TextRange, previous_lines: &str) -> bool {
let section_name_suffix = line[usize::from(section_name_range.end())..].trim();
let this_looks_like_a_section_name =
section_name_suffix == ":" || section_name_suffix.is_empty();
if !this_looks_like_a_section_name {
return false;
}
let prev_line = context.previous_line.trim();
let prev_line = previous_lines.trim();
let prev_line_ends_with_punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
.into_iter()
.any(|char| prev_line.ends_with(char));
@ -173,50 +415,3 @@ fn is_docstring_section(context: &SectionContext) -> bool {
true
}
/// Extract all `SectionContext` values from a docstring.
pub(crate) fn section_contexts<'a>(
lines: &'a [&'a str],
style: SectionStyle,
) -> Vec<SectionContext<'a>> {
let mut contexts = vec![];
for (kind, lineno) in lines
.iter()
.enumerate()
.skip(1)
.filter_map(|(lineno, line)| suspected_as_section(line, style).map(|kind| (kind, lineno)))
{
let context = SectionContext {
kind,
section_name: whitespace::leading_words(lines[lineno]),
previous_line: lines[lineno - 1],
line: lines[lineno],
following_lines: &lines[lineno + 1..],
original_index: lineno,
is_last_section: false,
};
if is_docstring_section(&context) {
contexts.push(context);
}
}
let mut truncated_contexts = Vec::with_capacity(contexts.len());
let mut end: Option<usize> = None;
for context in contexts.into_iter().rev() {
let next_end = context.original_index;
truncated_contexts.push(SectionContext {
kind: context.kind,
section_name: context.section_name,
previous_line: context.previous_line,
line: context.line,
following_lines: end.map_or(context.following_lines, |end| {
&lines[context.original_index + 1..end]
}),
original_index: context.original_index,
is_last_section: end.is_none(),
});
end = Some(next_end);
}
truncated_contexts.reverse();
truncated_contexts
}

View file

@ -2,8 +2,9 @@
use anyhow::Result;
use libcst_native::{Codegen, CodegenState, ImportAlias, Name, NameOrAttribute};
use ruff_text_size::TextSize;
use rustc_hash::FxHashMap;
use rustpython_parser::ast::{Location, Stmt, StmtKind, Suite};
use rustpython_parser::ast::{Stmt, StmtKind, Suite};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
@ -95,7 +96,7 @@ impl<'a> Importer<'a> {
/// Add the given member to an existing `StmtKind::ImportFrom` statement.
pub fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
let mut tree = match_module(self.locator.slice(stmt))?;
let mut tree = match_module(self.locator.slice(stmt.range()))?;
let import_from = match_import_from(&mut tree)?;
let aliases = match_aliases(import_from)?;
aliases.push(ImportAlias {
@ -113,11 +114,7 @@ impl<'a> Importer<'a> {
..CodegenState::default()
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), stmt.range()))
}
}
@ -126,13 +123,13 @@ struct Insertion {
/// The content to add before the insertion.
prefix: &'static str,
/// The location at which to insert.
location: Location,
location: TextSize,
/// The content to add after the insertion.
suffix: &'static str,
}
impl Insertion {
fn new(prefix: &'static str, location: Location, suffix: &'static str) -> Self {
fn new(prefix: &'static str, location: TextSize, suffix: &'static str) -> Self {
Self {
prefix,
location,
@ -142,7 +139,7 @@ impl Insertion {
}
/// Find the end of the last docstring.
fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
fn match_docstring_end(body: &[Stmt]) -> Option<TextSize> {
let mut iter = body.iter();
let Some(mut stmt) = iter.next() else {
return None;
@ -156,7 +153,7 @@ fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
}
stmt = next;
}
Some(stmt.end_location.unwrap())
Some(stmt.end())
}
/// Find the location at which a "top-of-file" import should be inserted,
@ -173,17 +170,17 @@ fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
/// The location returned will be the start of the `import os` statement,
/// along with a trailing newline suffix.
fn end_of_statement_insertion(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> Insertion {
let location = stmt.end_location.unwrap();
let location = stmt.end();
let mut tokens = lexer::lex_located(locator.after(location), Mode::Module, location).flatten();
if let Some((.., Tok::Semi, end)) = tokens.next() {
if let Some((Tok::Semi, range)) = tokens.next() {
// If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement;
Insertion::new(" ", end, ";")
Insertion::new(" ", range.end(), ";")
} else {
// Otherwise, insert on the next line.
Insertion::new(
"",
Location::new(location.row() + 1, 0),
locator.line_end(location),
stylist.line_ending().as_str(),
)
}
@ -210,22 +207,22 @@ fn top_of_file_insertion(body: &[Stmt], locator: &Locator, stylist: &Stylist) ->
let first_token = lexer::lex_located(locator.after(location), Mode::Module, location)
.flatten()
.next();
if let Some((.., Tok::Semi, end)) = first_token {
return Insertion::new(" ", end, ";");
if let Some((Tok::Semi, range)) = first_token {
return Insertion::new(" ", range.end(), ";");
}
// Otherwise, advance to the next row.
Location::new(location.row() + 1, 0)
locator.full_line_end(location)
} else {
Location::default()
TextSize::default()
};
// Skip over any comments and empty lines.
for (.., tok, end) in
for (tok, range) in
lexer::lex_located(locator.after(location), Mode::Module, location).flatten()
{
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
location = Location::new(end.row() + 1, 0);
location = locator.full_line_end(range.end());
} else {
break;
}
@ -237,8 +234,8 @@ fn top_of_file_insertion(body: &[Stmt], locator: &Locator, stylist: &Stylist) ->
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_text_size::TextSize;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::LexResult;
use ruff_python_ast::source_code::{LineEnding, Locator, Stylist};
@ -258,7 +255,7 @@ mod tests {
let contents = "";
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(1, 0), LineEnding::default().as_str())
Insertion::new("", TextSize::from(0), LineEnding::default().as_str())
);
let contents = r#"
@ -266,7 +263,7 @@ mod tests {
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(2, 0), LineEnding::default().as_str())
Insertion::new("", TextSize::from(19), LineEnding::default().as_str())
);
let contents = r#"
@ -275,7 +272,7 @@ mod tests {
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(2, 0), "\n")
Insertion::new("", TextSize::from(20), "\n")
);
let contents = r#"
@ -285,7 +282,7 @@ mod tests {
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n")
Insertion::new("", TextSize::from(40), "\n")
);
let contents = r#"
@ -294,7 +291,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(1, 0), "\n")
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
@ -303,7 +300,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(2, 0), "\n")
Insertion::new("", TextSize::from(23), "\n")
);
let contents = r#"
@ -313,7 +310,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n")
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
@ -323,7 +320,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n")
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
@ -332,7 +329,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(1, 0), "\n")
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
@ -341,7 +338,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", Location::new(1, 20), ";")
Insertion::new(" ", TextSize::from(20), ";")
);
let contents = r#"
@ -351,7 +348,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", Location::new(1, 20), ";")
Insertion::new(" ", TextSize::from(20), ";")
);
Ok(())

View file

@ -1,3 +1,4 @@
use ruff_text_size::TextRange;
use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::iter;
@ -7,7 +8,6 @@ use serde::Serialize;
use serde_json::error::Category;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::types::Range;
use crate::jupyter::{CellType, JupyterNotebook, SourceValue};
use crate::rules::pycodestyle::rules::SyntaxError;
@ -18,7 +18,7 @@ pub const JUPYTER_NOTEBOOK_EXT: &str = "ipynb";
/// Jupyter Notebook indexing table
///
/// When we lint a jupyter notebook, we have to translate the row/column based on
/// [`crate::message::Location`]
/// [`ruff_text_size::TextSize`]
/// to jupyter notebook cell/row/column.
#[derive(Debug, Eq, PartialEq)]
pub struct JupyterIndex {
@ -46,7 +46,7 @@ impl JupyterNotebook {
IOError {
message: format!("{err}"),
},
Range::default(),
TextRange::default(),
)
})?);
let notebook: JupyterNotebook = match serde_json::from_reader(reader) {
@ -59,7 +59,7 @@ impl JupyterNotebook {
IOError {
message: format!("{err}"),
},
Range::default(),
TextRange::default(),
),
Category::Syntax | Category::Eof => {
// Maybe someone saved the python sources (those with the `# %%` separator)
@ -69,7 +69,7 @@ impl JupyterNotebook {
IOError {
message: format!("{err}"),
},
Range::default(),
TextRange::default(),
)
})?;
// Check if tokenizing was successful and the file is non-empty
@ -84,7 +84,7 @@ impl JupyterNotebook {
but this file isn't valid JSON: {err}"
),
},
Range::default(),
TextRange::default(),
)
} else {
Diagnostic::new(
@ -95,7 +95,7 @@ impl JupyterNotebook {
but found a Python source file: {err}"
),
},
Range::default(),
TextRange::default(),
)
}
}
@ -108,7 +108,7 @@ impl JupyterNotebook {
"This file does not match the schema expected of Jupyter Notebooks: {err}"
),
},
Range::default(),
TextRange::default(),
)
}
}
@ -126,7 +126,7 @@ impl JupyterNotebook {
notebook.nbformat
),
},
Range::default(),
TextRange::default(),
)));
}

View file

@ -6,7 +6,6 @@
//! [Ruff]: https://github.com/charliermarsh/ruff
pub use ruff_python_ast::source_code::round_trip;
pub use ruff_python_ast::types::Range;
pub use rule_selector::RuleSelector;
pub use rules::pycodestyle::rules::IOError;

View file

@ -23,6 +23,7 @@ use crate::checkers::physical_lines::check_physical_lines;
use crate::checkers::tokens::check_tokens;
use crate::directives::Directives;
use crate::doc_lines::{doc_lines_from_ast, doc_lines_from_tokens};
use crate::logging::DisplayParseError;
use crate::message::Message;
use crate::noqa::add_noqa;
use crate::registry::{AsRule, Rule};
@ -68,7 +69,6 @@ pub struct FixerResult<'a> {
pub fn check_path(
path: &Path,
package: Option<&Path>,
contents: &str,
tokens: Vec<LexResult>,
locator: &Locator,
stylist: &Stylist,
@ -88,7 +88,7 @@ pub fn check_path(
let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong);
let mut doc_lines = vec![];
if use_doc_lines {
doc_lines.extend(doc_lines_from_tokens(&tokens));
doc_lines.extend(doc_lines_from_tokens(&tokens, locator));
}
// Run the token-based rules.
@ -178,7 +178,7 @@ pub fn check_path(
// if it's disabled via any of the usual mechanisms (e.g., `noqa`,
// `per-file-ignores`), and the easiest way to detect that suppression is
// to see if the diagnostic persists to the end of the function.
pycodestyle::rules::syntax_error(&mut diagnostics, &parse_error);
pycodestyle::rules::syntax_error(&mut diagnostics, &parse_error, locator);
error = Some(parse_error);
}
}
@ -218,8 +218,8 @@ pub fn check_path(
{
let ignored = check_noqa(
&mut diagnostics,
contents,
indexer.commented_lines(),
locator,
indexer.comment_ranges(),
&directives.noqa_line_for,
settings,
error.as_ref().map_or(autofix, |_| flags::Autofix::Disabled),
@ -268,11 +268,15 @@ pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings
let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code.
let indexer: Indexer = tokens.as_slice().into();
let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives =
directives::extract_directives(&tokens, directives::Flags::from_settings(settings));
let directives = directives::extract_directives(
&tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics, ignoring any existing `noqa` directives.
let LinterResult {
@ -281,7 +285,6 @@ pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings
} = check_path(
path,
package,
&contents,
tokens,
&locator,
&stylist,
@ -294,20 +297,15 @@ pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings
// Log any parse errors.
if let Some(err) = error {
error!(
"{}{}{} {err:?}",
"Failed to parse ".bold(),
fs::relativize_path(path).bold(),
":".bold()
);
error!("{}", DisplayParseError::new(err, locator.to_source_code()));
}
// Add any missing `# noqa` pragmas.
add_noqa(
path,
&diagnostics.0,
&contents,
indexer.commented_lines(),
&locator,
indexer.comment_ranges(),
&directives.noqa_line_for,
stylist.line_ending(),
)
@ -333,17 +331,20 @@ pub fn lint_only(
let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code.
let indexer: Indexer = tokens.as_slice().into();
let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives =
directives::extract_directives(&tokens, directives::Flags::from_settings(settings));
let directives = directives::extract_directives(
&tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics.
let result = check_path(
path,
package,
contents,
tokens,
&locator,
&stylist,
@ -356,7 +357,7 @@ pub fn lint_only(
result.map(|(diagnostics, imports)| {
(
diagnostics_to_messages(diagnostics, path, settings, &locator, &directives),
diagnostics_to_messages(diagnostics, path, &locator, &directives),
imports,
)
})
@ -366,14 +367,15 @@ pub fn lint_only(
fn diagnostics_to_messages(
diagnostics: Vec<Diagnostic>,
path: &Path,
settings: &Settings,
locator: &Locator,
directives: &Directives,
) -> Vec<Message> {
let file = once_cell::unsync::Lazy::new(|| {
let mut builder = SourceFileBuilder::new(&path.to_string_lossy());
if settings.show_source {
builder.set_source_code(&locator.to_source_code());
let mut builder =
SourceFileBuilder::new(path.to_string_lossy().as_ref(), locator.contents());
if let Some(line_index) = locator.line_index() {
builder.set_line_index(line_index.clone());
}
builder.finish()
@ -382,9 +384,8 @@ fn diagnostics_to_messages(
diagnostics
.into_iter()
.map(|diagnostic| {
let lineno = diagnostic.location.row();
let noqa_row = *directives.noqa_line_for.get(&lineno).unwrap_or(&lineno);
Message::from_diagnostic(diagnostic, file.deref().clone(), noqa_row)
let noqa_offset = directives.noqa_line_for.resolve(diagnostic.start());
Message::from_diagnostic(diagnostic, file.deref().clone(), noqa_offset)
})
.collect()
}
@ -421,17 +422,20 @@ pub fn lint_fix<'a>(
let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code.
let indexer: Indexer = tokens.as_slice().into();
let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives =
directives::extract_directives(&tokens, directives::Flags::from_settings(settings));
let directives = directives::extract_directives(
&tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics.
let result = check_path(
path,
package,
&transformed,
tokens,
&locator,
&stylist,
@ -513,7 +517,7 @@ This indicates a bug in `{}`. If you could open an issue at:
return Ok(FixerResult {
result: result.map(|(diagnostics, imports)| {
(
diagnostics_to_messages(diagnostics, path, settings, &locator, &directives),
diagnostics_to_messages(diagnostics, path, &locator, &directives),
imports,
)
}),

View file

@ -1,10 +1,15 @@
use std::fmt::{Display, Formatter};
use std::path::Path;
use std::sync::Mutex;
use crate::fs;
use anyhow::Result;
use colored::Colorize;
use fern;
use log::Level;
use once_cell::sync::Lazy;
use ruff_python_ast::source_code::SourceCode;
use rustpython_parser::ParseError;
pub(crate) static WARNINGS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default);
@ -42,13 +47,13 @@ macro_rules! warn_user_once {
#[macro_export]
macro_rules! warn_user {
($($arg:tt)*) => {
($($arg:tt)*) => {{
use colored::Colorize;
use log::warn;
let message = format!("{}", format_args!($($arg)*));
warn!("{}", message.bold());
};
}};
}
#[macro_export]
@ -127,6 +132,34 @@ pub fn set_up_logging(level: &LogLevel) -> Result<()> {
Ok(())
}
pub struct DisplayParseError<'a> {
error: ParseError,
source_code: SourceCode<'a, 'a>,
}
impl<'a> DisplayParseError<'a> {
pub fn new(error: ParseError, source_code: SourceCode<'a, 'a>) -> Self {
Self { error, source_code }
}
}
impl Display for DisplayParseError<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let source_location = self.source_code.source_location(self.error.location);
write!(
f,
"{header} {path}{colon}{row}{colon}{column}{colon} {inner}",
header = "Failed to parse ".bold(),
path = fs::relativize_path(Path::new(&self.error.source_path)).bold(),
row = source_location.row,
column = source_location.column,
colon = ":".cyan(),
inner = &self.error.error
)
}
}
#[cfg(test)]
mod tests {
use crate::logging::LogLevel;

View file

@ -1,5 +1,6 @@
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use std::io::Write;
/// Generate error logging commands for Azure Pipelines format.
@ -15,12 +16,15 @@ impl Emitter for AzureEmitter {
context: &EmitterContext,
) -> anyhow::Result<()> {
for message in messages {
let (line, col) = if context.is_jupyter_notebook(message.filename()) {
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
(1, 0)
SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
} else {
(message.location.row(), message.location.column())
message.compute_start_location()
};
writeln!(
@ -28,6 +32,8 @@ impl Emitter for AzureEmitter {
"##vso[task.logissue type=error\
;sourcepath={filename};linenumber={line};columnnumber={col};code={code};]{body}",
filename = message.filename(),
line = location.row,
col = location.column,
code = message.kind.rule().noqa_code(),
body = message.kind.body,
)?;

View file

@ -1,8 +1,7 @@
use crate::message::Message;
use colored::{Color, ColoredString, Colorize, Styles};
use ruff_diagnostics::Fix;
use ruff_python_ast::source_code::{OneIndexed, SourceCode};
use ruff_python_ast::types::Range;
use ruff_python_ast::source_code::{OneIndexed, SourceFile};
use ruff_text_size::{TextRange, TextSize};
use similar::{ChangeTag, TextDiff};
use std::fmt::{Display, Formatter};
@ -18,38 +17,39 @@ use std::num::NonZeroUsize;
/// * Compute the diff from the [`Edit`] because diff calculation is expensive.
pub(super) struct Diff<'a> {
fix: &'a Fix,
source_code: SourceCode<'a, 'a>,
source_code: &'a SourceFile,
}
impl<'a> Diff<'a> {
pub fn from_message(message: &'a Message) -> Option<Diff> {
match message.file.source_code() {
Some(source_code) if !message.fix.is_empty() => Some(Diff {
source_code,
if message.fix.is_empty() {
None
} else {
Some(Diff {
source_code: &message.file,
fix: &message.fix,
}),
_ => None,
})
}
}
}
impl Display for Diff<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut output = String::with_capacity(self.source_code.text().len());
let mut output = String::with_capacity(self.source_code.source_text().len());
let mut last_end = TextSize::default();
for edit in self.fix.edits() {
let edit_range = self
.source_code
.text_range(Range::new(edit.location(), edit.end_location()));
output.push_str(&self.source_code.text()[TextRange::new(last_end, edit_range.start())]);
output.push_str(
self.source_code
.slice(TextRange::new(last_end, edit.start())),
);
output.push_str(edit.content().unwrap_or_default());
last_end = edit_range.end();
last_end = edit.end();
}
output.push_str(&self.source_code.text()[usize::from(last_end)..]);
output.push_str(&self.source_code.source_text()[usize::from(last_end)..]);
let diff = TextDiff::from_lines(self.source_code.text(), &output);
let diff = TextDiff::from_lines(self.source_code.source_text(), &output);
writeln!(f, "{}", " Suggested fix".blue())?;

View file

@ -1,6 +1,7 @@
use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use std::io::Write;
/// Generate error workflow command in GitHub Actions format.
@ -16,30 +17,38 @@ impl Emitter for GithubEmitter {
context: &EmitterContext,
) -> anyhow::Result<()> {
for message in messages {
let (row, column) = if context.is_jupyter_notebook(message.filename()) {
let source_location = message.compute_start_location();
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
(1, 0)
SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
} else {
(message.location.row(), message.location.column())
source_location.clone()
};
let end_location = message.compute_end_location();
write!(
writer,
"::error title=Ruff \
({code}),file={file},line={row},col={column},endLine={end_row},endColumn={end_column}::",
code = message.kind.rule().noqa_code(),
file = message.filename(),
row = message.location.row(),
column = message.location.column(),
end_row = message.end_location.row(),
end_column = message.end_location.column(),
row = source_location.row,
column = source_location.column,
end_row = end_location.row,
end_column = end_location.column,
)?;
writeln!(
writer,
"{path}:{row}:{column}: {code} {body}",
path = relativize_path(message.filename()),
row = location.row,
column = location.column,
code = message.kind.rule().noqa_code(),
body = message.kind.body,
)?;

View file

@ -64,9 +64,11 @@ impl Serialize for SerializedMessages<'_> {
"end": 1
})
} else {
let start_location = message.compute_start_location();
let end_location = message.compute_end_location();
json!({
"begin": message.location.row(),
"end": message.end_location.row()
"begin": start_location.row,
"end": end_location.row
})
};
@ -96,20 +98,16 @@ impl Serialize for SerializedMessages<'_> {
fn fingerprint(message: &Message) -> String {
let Message {
kind,
location,
end_location,
range,
fix: _fix,
file,
noqa_row: _noqa_row,
noqa_offset: _,
} = message;
let mut hasher = DefaultHasher::new();
kind.rule().hash(&mut hasher);
location.row().hash(&mut hasher);
location.column().hash(&mut hasher);
end_location.row().hash(&mut hasher);
end_location.column().hash(&mut hasher);
range.hash(&mut hasher);
file.name().hash(&mut hasher);
format!("{:x}", hasher.finish())

View file

@ -1,14 +1,20 @@
use crate::fs::relativize_path;
use crate::jupyter::JupyterIndex;
use crate::message::diff::calculate_print_width;
use crate::message::text::{MessageCodeFrame, RuleCodeAndBody};
use crate::message::{group_messages_by_filename, Emitter, EmitterContext, Message};
use crate::message::{
group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation,
};
use colored::Colorize;
use ruff_python_ast::source_code::OneIndexed;
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::num::NonZeroUsize;
#[derive(Default)]
pub struct GroupedEmitter {
show_fix_status: bool,
show_source: bool,
}
impl GroupedEmitter {
@ -17,6 +23,12 @@ impl GroupedEmitter {
self.show_fix_status = show_fix_status;
self
}
#[must_use]
pub fn with_show_source(mut self, show_source: bool) -> Self {
self.show_source = show_source;
self
}
}
impl Emitter for GroupedEmitter {
@ -29,20 +41,17 @@ impl Emitter for GroupedEmitter {
for (filename, messages) in group_messages_by_filename(messages) {
// Compute the maximum number of digits in the row and column, for messages in
// this file.
let row_length = num_digits(
messages
.iter()
.map(|message| message.location.row())
.max()
.unwrap(),
);
let column_length = num_digits(
messages
.iter()
.map(|message| message.location.column())
.max()
.unwrap(),
);
let mut max_row_length = OneIndexed::MIN;
let mut max_column_length = OneIndexed::MIN;
for message in &messages {
max_row_length = max_row_length.max(message.start_location.row);
max_column_length = max_column_length.max(message.start_location.column);
}
let row_length = calculate_print_width(max_row_length);
let column_length = calculate_print_width(max_column_length);
// Print the filename.
writeln!(writer, "{}:", relativize_path(filename).underline())?;
@ -53,11 +62,12 @@ impl Emitter for GroupedEmitter {
writer,
"{}",
DisplayGroupedMessage {
jupyter_index: context.jupyter_index(message.filename()),
message,
show_fix_status: self.show_fix_status,
show_source: self.show_source,
row_length,
column_length,
jupyter_index: context.jupyter_index(message.filename()),
}
)?;
}
@ -69,21 +79,26 @@ impl Emitter for GroupedEmitter {
}
struct DisplayGroupedMessage<'a> {
message: &'a Message,
message: MessageWithLocation<'a>,
show_fix_status: bool,
row_length: usize,
column_length: usize,
show_source: bool,
row_length: NonZeroUsize,
column_length: NonZeroUsize,
jupyter_index: Option<&'a JupyterIndex>,
}
impl Display for DisplayGroupedMessage<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let message = self.message;
let MessageWithLocation {
message,
start_location,
} = &self.message;
write!(
f,
" {row_padding}",
row_padding = " ".repeat(self.row_length - num_digits(message.location.row()))
row_padding =
" ".repeat(self.row_length.get() - calculate_print_width(start_location.row).get())
)?;
// Check if we're working on a jupyter notebook and translate positions with cell accordingly
@ -91,29 +106,31 @@ impl Display for DisplayGroupedMessage<'_> {
write!(
f,
"cell {cell}{sep}",
cell = jupyter_index.row_to_cell[message.location.row()],
cell = jupyter_index.row_to_cell[start_location.row.get()],
sep = ":".cyan()
)?;
(
jupyter_index.row_to_row_in_cell[message.location.row()] as usize,
message.location.column(),
jupyter_index.row_to_row_in_cell[start_location.row.get()] as usize,
start_location.column.get(),
)
} else {
(message.location.row(), message.location.column())
(start_location.row.get(), start_location.column.get())
};
writeln!(
f,
"{row}{sep}{col}{col_padding} {code_and_body}",
sep = ":".cyan(),
col_padding = " ".repeat(self.column_length - num_digits(message.location.column())),
col_padding = " ".repeat(
self.column_length.get() - calculate_print_width(start_location.column).get()
),
code_and_body = RuleCodeAndBody {
message_kind: &message.kind,
show_fix_status: self.show_fix_status
},
)?;
{
if self.show_source {
use std::fmt::Write;
let mut padded = PadAdapter::new(f);
write!(padded, "{}", MessageCodeFrame { message })?;
@ -125,16 +142,6 @@ impl Display for DisplayGroupedMessage<'_> {
}
}
fn num_digits(n: usize) -> usize {
std::iter::successors(Some(n), |n| {
let next = n / 10;
(next > 0).then_some(next)
})
.count()
.max(1)
}
/// Adapter that adds a ' ' at the start of every line without the need to copy the string.
/// Inspired by Rust's `debug_struct()` internal implementation that also uses a `PadAdapter`.
struct PadAdapter<'buf> {
@ -174,7 +181,7 @@ mod tests {
#[test]
fn default() {
let mut emitter = GroupedEmitter::default();
let mut emitter = GroupedEmitter::default().with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
@ -182,7 +189,9 @@ mod tests {
#[test]
fn fix_status() {
let mut emitter = GroupedEmitter::default().with_show_fix_status(true);
let mut emitter = GroupedEmitter::default()
.with_show_fix_status(true)
.with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);

View file

@ -1,9 +1,10 @@
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
use ruff_diagnostics::Edit;
use ruff_python_ast::source_code::{SourceCode, SourceLocation};
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
use serde_json::json;
use serde_json::{json, Value};
use std::io::Write;
#[derive(Default)]
@ -34,23 +35,29 @@ impl Serialize for ExpandedMessages<'_> {
let mut s = serializer.serialize_seq(Some(self.messages.len()))?;
for message in self.messages {
let source_code = message.file.to_source_code();
let fix = if message.fix.is_empty() {
None
} else {
Some(json!({
"message": message.kind.suggestion.as_deref(),
"edits": &ExpandedEdits { edits: message.fix.edits() },
"edits": &ExpandedEdits { edits: message.fix.edits(), source_code: &source_code },
}))
};
let start_location = source_code.source_location(message.start());
let end_location = source_code.source_location(message.end());
let noqa_location = source_code.source_location(message.noqa_offset);
let value = json!({
"code": message.kind.rule().noqa_code().to_string(),
"message": message.kind.body,
"fix": fix,
"location": message.location,
"end_location": message.end_location,
"location": start_location,
"end_location": end_location,
"filename": message.filename(),
"noqa_row": message.noqa_row
"noqa_row": noqa_location.row
});
s.serialize_element(&value)?;
@ -62,6 +69,7 @@ impl Serialize for ExpandedMessages<'_> {
struct ExpandedEdits<'a> {
edits: &'a [Edit],
source_code: &'a SourceCode<'a, 'a>,
}
impl Serialize for ExpandedEdits<'_> {
@ -72,10 +80,12 @@ impl Serialize for ExpandedEdits<'_> {
let mut s = serializer.serialize_seq(Some(self.edits.len()))?;
for edit in self.edits {
let start_location = self.source_code.source_location(edit.start());
let end_location = self.source_code.source_location(edit.end());
let value = json!({
"content": edit.content().unwrap_or_default(),
"location": edit.location(),
"end_location": edit.end_location()
"location": to_zero_indexed_column(&start_location),
"end_location": to_zero_indexed_column(&end_location)
});
s.serialize_element(&value)?;
@ -85,6 +95,13 @@ impl Serialize for ExpandedEdits<'_> {
}
}
fn to_zero_indexed_column(location: &SourceLocation) -> Value {
json!({
"row": location.row,
"column": location.column.to_zero_indexed()
})
}
#[cfg(test)]
mod tests {
use crate::message::tests::{capture_emitter_output, create_messages};

View file

@ -1,6 +1,9 @@
use crate::message::{group_messages_by_filename, Emitter, EmitterContext, Message};
use crate::message::{
group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation,
};
use crate::registry::AsRule;
use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use std::io::Write;
use std::path::Path;
@ -23,17 +26,29 @@ impl Emitter for JunitEmitter {
.insert("package".to_string(), "org.ruff".to_string());
for message in messages {
let MessageWithLocation {
message,
start_location,
} = message;
let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure);
status.set_message(message.kind.body.clone());
let (row, col) = if context.is_jupyter_notebook(message.filename()) {
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
(1, 0)
SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
} else {
(message.location.row(), message.location.column())
start_location
};
status.set_description(format!("line {row}, col {col}, {}", message.kind.body));
status.set_description(format!(
"line {row}, col {col}, {body}",
row = location.row,
col = location.column,
body = message.kind.body
));
let mut case = TestCase::new(
format!("org.ruff.{}", message.kind.rule().noqa_code()),
status,
@ -43,9 +58,9 @@ impl Emitter for JunitEmitter {
let classname = file_path.parent().unwrap().join(file_stem);
case.set_classname(classname.to_str().unwrap());
case.extra
.insert("line".to_string(), message.location.row().to_string());
.insert("line".to_string(), location.row.to_string());
case.extra
.insert("column".to_string(), message.location.column().to_string());
.insert("column".to_string(), location.column.to_string());
test_suite.add_test_case(case);
}

View file

@ -8,10 +8,12 @@ mod junit;
mod pylint;
mod text;
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::io::Write;
use std::ops::Deref;
pub use azure::AzureEmitter;
pub use github::GithubEmitter;
@ -20,49 +22,64 @@ pub use grouped::GroupedEmitter;
pub use json::JsonEmitter;
pub use junit::JunitEmitter;
pub use pylint::PylintEmitter;
pub use rustpython_parser::ast::Location;
pub use text::TextEmitter;
use crate::jupyter::JupyterIndex;
use crate::registry::AsRule;
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix};
use ruff_python_ast::source_code::SourceFile;
use ruff_python_ast::source_code::{SourceFile, SourceLocation};
#[derive(Debug, PartialEq, Eq)]
pub struct Message {
pub kind: DiagnosticKind,
pub location: Location,
pub end_location: Location,
pub range: TextRange,
pub fix: Fix,
pub file: SourceFile,
pub noqa_row: usize,
pub noqa_offset: TextSize,
}
impl Message {
pub fn from_diagnostic(diagnostic: Diagnostic, file: SourceFile, noqa_row: usize) -> Self {
pub fn from_diagnostic(
diagnostic: Diagnostic,
file: SourceFile,
noqa_offset: TextSize,
) -> Self {
Self {
range: diagnostic.range(),
kind: diagnostic.kind,
location: Location::new(diagnostic.location.row(), diagnostic.location.column() + 1),
end_location: Location::new(
diagnostic.end_location.row(),
diagnostic.end_location.column() + 1,
),
fix: diagnostic.fix,
file,
noqa_row,
noqa_offset,
}
}
pub fn filename(&self) -> &str {
self.file.name()
}
pub fn compute_start_location(&self) -> SourceLocation {
self.file.to_source_code().source_location(self.start())
}
pub fn compute_end_location(&self) -> SourceLocation {
self.file.to_source_code().source_location(self.end())
}
pub const fn start(&self) -> TextSize {
self.range.start()
}
pub const fn end(&self) -> TextSize {
self.range.end()
}
}
impl Ord for Message {
fn cmp(&self, other: &Self) -> Ordering {
(self.filename(), self.location.row(), self.location.column()).cmp(&(
(self.filename(), self.start(), self.kind.rule()).cmp(&(
other.filename(),
other.location.row(),
other.location.column(),
other.start(),
other.kind.rule(),
))
}
}
@ -73,13 +90,28 @@ impl PartialOrd for Message {
}
}
fn group_messages_by_filename(messages: &[Message]) -> BTreeMap<&str, Vec<&Message>> {
struct MessageWithLocation<'a> {
message: &'a Message,
start_location: SourceLocation,
}
impl Deref for MessageWithLocation<'_> {
type Target = Message;
fn deref(&self) -> &Self::Target {
self.message
}
}
fn group_messages_by_filename(messages: &[Message]) -> BTreeMap<&str, Vec<MessageWithLocation>> {
let mut grouped_messages = BTreeMap::default();
for message in messages {
grouped_messages
.entry(message.filename())
.or_insert_with(Vec::new)
.push(message);
.push(MessageWithLocation {
message,
start_location: message.compute_start_location(),
});
}
grouped_messages
}
@ -120,11 +152,11 @@ impl<'a> EmitterContext<'a> {
#[cfg(test)]
mod tests {
use crate::message::{Emitter, EmitterContext, Location, Message};
use crate::message::{Emitter, EmitterContext, Message};
use crate::rules::pyflakes::rules::{UndefinedName, UnusedImport, UnusedVariable};
use ruff_diagnostics::{Diagnostic, Edit, Fix};
use ruff_python_ast::source_code::SourceFileBuilder;
use ruff_python_ast::types::Range;
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap;
pub(super) fn create_messages() -> Vec<Message> {
@ -148,20 +180,20 @@ def fibonacci(n):
context: None,
multiple: false,
},
Range::new(Location::new(1, 7), Location::new(1, 9)),
TextRange::new(TextSize::from(7), TextSize::from(9)),
);
let fib_source = SourceFileBuilder::new("fib.py").source_text(fib).finish();
let fib_source = SourceFileBuilder::new("fib.py", fib).finish();
let unused_variable = Diagnostic::new(
UnusedVariable {
name: "x".to_string(),
},
Range::new(Location::new(6, 4), Location::new(6, 5)),
TextRange::new(TextSize::from(94), TextSize::from(95)),
)
.with_fix(Fix::new(vec![Edit::deletion(
Location::new(6, 4),
Location::new(6, 9),
TextSize::from(94),
TextSize::from(99),
)]));
let file_2 = r#"if a == 1: pass"#;
@ -170,17 +202,18 @@ def fibonacci(n):
UndefinedName {
name: "a".to_string(),
},
Range::new(Location::new(1, 3), Location::new(1, 4)),
TextRange::new(TextSize::from(3), TextSize::from(4)),
);
let file_2_source = SourceFileBuilder::new("undef.py")
.source_text(file_2)
.finish();
let file_2_source = SourceFileBuilder::new("undef.py", file_2).finish();
let unused_import_start = unused_import.start();
let unused_variable_start = unused_variable.start();
let undefined_name_start = undefined_name.start();
vec![
Message::from_diagnostic(unused_import, fib_source.clone(), 1),
Message::from_diagnostic(unused_variable, fib_source, 1),
Message::from_diagnostic(undefined_name, file_2_source, 1),
Message::from_diagnostic(unused_import, fib_source.clone(), unused_import_start),
Message::from_diagnostic(unused_variable, fib_source, unused_variable_start),
Message::from_diagnostic(undefined_name, file_2_source, undefined_name_start),
]
}

View file

@ -1,6 +1,7 @@
use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
use ruff_python_ast::source_code::OneIndexed;
use std::io::Write;
/// Generate violations in Pylint format.
@ -19,9 +20,9 @@ impl Emitter for PylintEmitter {
let row = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
1
OneIndexed::from_zero_indexed(0)
} else {
message.location.row()
message.compute_start_location().row
};
writeln!(

View file

@ -46,7 +46,7 @@ expression: content
"column": 6
},
"filename": "fib.py",
"noqa_row": 1
"noqa_row": 6
},
{
"code": "F821",

View file

@ -4,30 +4,45 @@ use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
use annotate_snippets::display_list::{DisplayList, FormatOptions};
use annotate_snippets::snippet::{Annotation, AnnotationType, Slice, Snippet, SourceAnnotation};
use bitflags::bitflags;
use colored::Colorize;
use ruff_diagnostics::DiagnosticKind;
use ruff_python_ast::source_code::OneIndexed;
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use ruff_text_size::TextRange;
use std::cmp;
use std::fmt::{Display, Formatter};
use std::io::Write;
bitflags! {
#[derive(Default)]
struct EmitterFlags: u8 {
const SHOW_FIX_STATUS = 0b0000_0001;
const SHOW_FIX = 0b0000_0010;
const SHOW_SOURCE = 0b0000_0100;
}
}
#[derive(Default)]
pub struct TextEmitter {
show_fix_status: bool,
show_fix: bool,
flags: EmitterFlags,
}
impl TextEmitter {
#[must_use]
pub fn with_show_fix_status(mut self, show_fix_status: bool) -> Self {
self.show_fix_status = show_fix_status;
self.flags
.set(EmitterFlags::SHOW_FIX_STATUS, show_fix_status);
self
}
#[must_use]
pub fn with_show_fix(mut self, show_fix: bool) -> Self {
self.show_fix = show_fix;
self.flags.set(EmitterFlags::SHOW_FIX, show_fix);
self
}
#[must_use]
pub fn with_show_source(mut self, show_source: bool) -> Self {
self.flags.set(EmitterFlags::SHOW_SOURCE, show_source);
self
}
}
@ -47,41 +62,48 @@ impl Emitter for TextEmitter {
sep = ":".cyan(),
)?;
// Check if we're working on a jupyter notebook and translate positions with cell accordingly
let (row, col) = if let Some(jupyter_index) = context.jupyter_index(message.filename())
{
write!(
writer,
"cell {cell}{sep}",
cell = jupyter_index.row_to_cell[message.location.row()],
sep = ":".cyan(),
)?;
let start_location = message.compute_start_location();
(
jupyter_index.row_to_row_in_cell[message.location.row()] as usize,
message.location.column(),
)
} else {
(message.location.row(), message.location.column())
};
// Check if we're working on a jupyter notebook and translate positions with cell accordingly
let diagnostic_location =
if let Some(jupyter_index) = context.jupyter_index(message.filename()) {
write!(
writer,
"cell {cell}{sep}",
cell = jupyter_index.row_to_cell[start_location.row.get()],
sep = ":".cyan(),
)?;
SourceLocation {
row: OneIndexed::new(
jupyter_index.row_to_row_in_cell[start_location.row.get()] as usize,
)
.unwrap(),
column: start_location.column,
}
} else {
start_location
};
writeln!(
writer,
"{row}{sep}{col}{sep} {code_and_body}",
row = diagnostic_location.row,
col = diagnostic_location.column,
sep = ":".cyan(),
code_and_body = RuleCodeAndBody {
message_kind: &message.kind,
show_fix_status: self.show_fix_status
show_fix_status: self.flags.contains(EmitterFlags::SHOW_FIX_STATUS)
}
)?;
if message.file.source_code().is_some() {
if self.flags.contains(EmitterFlags::SHOW_SOURCE) {
writeln!(writer, "{}", MessageCodeFrame { message })?;
}
if self.show_fix {
if let Some(diff) = Diff::from_message(message) {
writeln!(writer, "{diff}")?;
}
if self.flags.contains(EmitterFlags::SHOW_FIX) {
if let Some(diff) = Diff::from_message(message) {
writeln!(writer, "{diff}")?;
}
}
}
@ -135,105 +157,91 @@ pub(super) struct MessageCodeFrame<'a> {
impl Display for MessageCodeFrame<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let Message {
kind,
file,
location,
end_location,
..
kind, file, range, ..
} = self.message;
if let Some(source_code) = file.source_code() {
let suggestion = kind.suggestion.as_deref();
let footer = if suggestion.is_some() {
vec![Annotation {
id: None,
label: suggestion,
annotation_type: AnnotationType::Help,
}]
} else {
Vec::new()
};
let suggestion = kind.suggestion.as_deref();
let footer = if suggestion.is_some() {
vec![Annotation {
id: None,
label: suggestion,
annotation_type: AnnotationType::Help,
}]
} else {
Vec::new()
};
let mut start_index =
OneIndexed::new(cmp::max(1, location.row().saturating_sub(2))).unwrap();
let content_start_index = OneIndexed::new(location.row()).unwrap();
let source_code = file.to_source_code();
let content_start_index = source_code.line_index(range.start());
let mut start_index = content_start_index.saturating_sub(2);
// Trim leading empty lines.
while start_index < content_start_index {
if !source_code.line_text(start_index).trim().is_empty() {
break;
}
start_index = start_index.saturating_add(1);
// Trim leading empty lines.
while start_index < content_start_index {
if !source_code.line_text(start_index).trim().is_empty() {
break;
}
start_index = start_index.saturating_add(1);
}
let content_end_index = source_code.line_index(range.end());
let mut end_index = content_end_index
.saturating_add(2)
.min(OneIndexed::from_zero_indexed(source_code.line_count()));
// Trim trailing empty lines
while end_index > content_end_index {
if !source_code.line_text(end_index).trim().is_empty() {
break;
}
let mut end_index = OneIndexed::new(cmp::min(
end_location.row().saturating_add(2),
source_code.line_count() + 1,
))
.unwrap();
end_index = end_index.saturating_sub(1);
}
let content_end_index = OneIndexed::new(end_location.row()).unwrap();
let start_offset = source_code.line_start(start_index);
let end_offset = source_code.line_end(end_index);
// Trim trailing empty lines
while end_index > content_end_index {
if !source_code.line_text(end_index).trim().is_empty() {
break;
}
let source_text = source_code.slice(TextRange::new(start_offset, end_offset));
end_index = end_index.saturating_sub(1);
}
let annotation_start_offset = range.start() - start_offset;
let annotation_end_offset = range.end() - start_offset;
let start_offset = source_code.line_start(start_index);
let end_offset = source_code.line_end(end_index);
let source_text = &source_code.text()[TextRange::new(start_offset, end_offset)];
let annotation_start_offset =
// Message columns are one indexed
source_code.offset(location.with_col_offset(-1)) - start_offset;
let annotation_end_offset =
source_code.offset(end_location.with_col_offset(-1)) - start_offset;
let start_char = source_text[TextRange::up_to(annotation_start_offset)]
.chars()
.count();
let char_length = source_text
[TextRange::new(annotation_start_offset, annotation_end_offset)]
let start_char = source_text[TextRange::up_to(annotation_start_offset)]
.chars()
.count();
let label = kind.rule().noqa_code().to_string();
let char_length = source_text
[TextRange::new(annotation_start_offset, annotation_end_offset)]
.chars()
.count();
let snippet = Snippet {
title: None,
slices: vec![Slice {
source: source_text,
line_start: location.row(),
annotations: vec![SourceAnnotation {
label: &label,
annotation_type: AnnotationType::Error,
range: (start_char, start_char + char_length),
}],
// The origin (file name, line number, and column number) is already encoded
// in the `label`.
origin: None,
fold: false,
let label = kind.rule().noqa_code().to_string();
let snippet = Snippet {
title: None,
slices: vec![Slice {
source: source_text,
line_start: content_start_index.get(),
annotations: vec![SourceAnnotation {
label: &label,
annotation_type: AnnotationType::Error,
range: (start_char, start_char + char_length),
}],
footer,
opt: FormatOptions {
#[cfg(test)]
color: false,
#[cfg(not(test))]
color: colored::control::SHOULD_COLORIZE.should_colorize(),
..FormatOptions::default()
},
};
// The origin (file name, line number, and column number) is already encoded
// in the `label`.
origin: None,
fold: false,
}],
footer,
opt: FormatOptions {
#[cfg(test)]
color: false,
#[cfg(not(test))]
color: colored::control::SHOULD_COLORIZE.should_colorize(),
..FormatOptions::default()
},
};
writeln!(f, "{message}", message = DisplayList::from(snippet))?;
}
Ok(())
writeln!(f, "{message}", message = DisplayList::from(snippet))
}
}
@ -245,7 +253,7 @@ mod tests {
#[test]
fn default() {
let mut emitter = TextEmitter::default();
let mut emitter = TextEmitter::default().with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
@ -253,7 +261,9 @@ mod tests {
#[test]
fn fix_status() {
let mut emitter = TextEmitter::default().with_show_fix_status(true);
let mut emitter = TextEmitter::default()
.with_show_fix_status(true)
.with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);

View file

@ -1,3 +1,4 @@
use std::collections::BTreeMap;
use std::fmt::{Display, Write};
use std::fs;
use std::path::Path;
@ -5,16 +6,12 @@ use std::path::Path;
use anyhow::Result;
use itertools::Itertools;
use log::warn;
use nohash_hasher::IntMap;
use once_cell::sync::Lazy;
use regex::Regex;
use rustc_hash::FxHashMap;
use rustpython_parser::ast::Location;
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::newlines::StrExt;
use ruff_python_ast::source_code::{LineEnding, Locator};
use ruff_python_ast::types::Range;
use crate::codes::NoqaCode;
use crate::registry::{AsRule, Rule, RuleSet};
@ -31,46 +28,52 @@ static SPLIT_COMMA_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"[,\s]").unwrap
#[derive(Debug)]
pub enum Directive<'a> {
None,
All(usize, usize, usize, usize),
Codes(usize, usize, usize, Vec<&'a str>, usize),
// (leading spaces, noqa_range, trailing_spaces)
All(TextSize, TextRange, TextSize),
// (leading spaces, start_offset, end_offset, codes, trailing_spaces)
Codes(TextSize, TextRange, Vec<&'a str>, TextSize),
}
/// Extract the noqa `Directive` from a line of Python source code.
pub fn extract_noqa_directive(line: &str) -> Directive {
match NOQA_LINE_REGEX.captures(line) {
Some(caps) => match caps.name("leading_spaces") {
Some(leading_spaces) => match caps.name("trailing_spaces") {
Some(trailing_spaces) => match caps.name("noqa") {
Some(noqa) => match caps.name("codes") {
Some(codes) => {
let codes: Vec<&str> = SPLIT_COMMA_REGEX
.split(codes.as_str().trim())
.map(str::trim)
.filter(|code| !code.is_empty())
.collect();
if codes.is_empty() {
warn!("Expected rule codes on `noqa` directive: \"{line}\"");
}
Directive::Codes(
leading_spaces.as_str().chars().count(),
noqa.start(),
noqa.end(),
codes,
trailing_spaces.as_str().chars().count(),
)
}
None => Directive::All(
leading_spaces.as_str().chars().count(),
noqa.start(),
noqa.end(),
trailing_spaces.as_str().chars().count(),
),
},
None => Directive::None,
},
None => Directive::None,
},
None => Directive::None,
pub fn extract_noqa_directive<'a>(range: TextRange, locator: &'a Locator) -> Directive<'a> {
let text = &locator.contents()[range];
match NOQA_LINE_REGEX.captures(text) {
Some(caps) => match (
caps.name("leading_spaces"),
caps.name("noqa"),
caps.name("codes"),
caps.name("trailing_spaces"),
) {
(Some(leading_spaces), Some(noqa), Some(codes), Some(trailing_spaces)) => {
let codes: Vec<&str> = SPLIT_COMMA_REGEX
.split(codes.as_str().trim())
.map(str::trim)
.filter(|code| !code.is_empty())
.collect();
let start = range.start() + TextSize::try_from(noqa.start()).unwrap();
if codes.is_empty() {
#[allow(deprecated)]
let line = locator.compute_line_index(start);
warn!("Expected rule codes on `noqa` directive: \"{line}\"");
}
Directive::Codes(
leading_spaces.as_str().text_len(),
TextRange::at(start, noqa.as_str().text_len()),
codes,
trailing_spaces.as_str().text_len(),
)
}
(Some(leading_spaces), Some(noqa), None, Some(trailing_spaces)) => Directive::All(
leading_spaces.as_str().text_len(),
TextRange::at(
range.start() + TextSize::try_from(noqa.start()).unwrap(),
noqa.as_str().text_len(),
),
trailing_spaces.as_str().text_len(),
),
_ => Directive::None,
},
None => Directive::None,
}
@ -129,16 +132,13 @@ pub fn includes(needle: Rule, haystack: &[&str]) -> bool {
/// Returns `true` if the given [`Rule`] is ignored at the specified `lineno`.
pub fn rule_is_ignored(
code: Rule,
lineno: usize,
noqa_line_for: &IntMap<usize, usize>,
offset: TextSize,
noqa_line_for: &NoqaMapping,
locator: &Locator,
) -> bool {
let noqa_lineno = noqa_line_for.get(&lineno).unwrap_or(&lineno);
let line = locator.slice(Range::new(
Location::new(*noqa_lineno, 0),
Location::new(noqa_lineno + 1, 0),
));
match extract_noqa_directive(line) {
let offset = noqa_line_for.resolve(offset);
let line_range = locator.line_range(offset);
match extract_noqa_directive(line_range, locator) {
Directive::None => false,
Directive::All(..) => true,
Directive::Codes(.., codes, _) => includes(code, &codes),
@ -153,11 +153,11 @@ pub enum FileExemption {
/// Extract the [`FileExemption`] for a given Python source file, enumerating any rules that are
/// globally ignored within the file.
pub fn file_exemption(lines: &[&str], commented_lines: &[usize]) -> FileExemption {
pub fn file_exemption(contents: &str, comment_ranges: &[TextRange]) -> FileExemption {
let mut exempt_codes: Vec<NoqaCode> = vec![];
for lineno in commented_lines {
match parse_file_exemption(lines[lineno - 1]) {
for range in comment_ranges {
match parse_file_exemption(&contents[*range]) {
ParsedExemption::All => {
return FileExemption::All;
}
@ -182,17 +182,18 @@ pub fn file_exemption(lines: &[&str], commented_lines: &[usize]) -> FileExemptio
}
}
/// Adds noqa comments to suppress all diagnostics of a file.
pub fn add_noqa(
path: &Path,
diagnostics: &[Diagnostic],
contents: &str,
commented_lines: &[usize],
noqa_line_for: &IntMap<usize, usize>,
locator: &Locator,
commented_lines: &[TextRange],
noqa_line_for: &NoqaMapping,
line_ending: LineEnding,
) -> Result<usize> {
let (count, output) = add_noqa_inner(
diagnostics,
contents,
locator,
commented_lines,
noqa_line_for,
line_ending,
@ -203,19 +204,19 @@ pub fn add_noqa(
fn add_noqa_inner(
diagnostics: &[Diagnostic],
contents: &str,
commented_lines: &[usize],
noqa_line_for: &IntMap<usize, usize>,
locator: &Locator,
commented_ranges: &[TextRange],
noqa_line_for: &NoqaMapping,
line_ending: LineEnding,
) -> (usize, String) {
// Map of line number to set of (non-ignored) diagnostic codes that are triggered on that line.
let mut matches_by_line: FxHashMap<usize, RuleSet> = FxHashMap::default();
let lines: Vec<&str> = contents.universal_newlines().collect();
// Map of line start offset to set of (non-ignored) diagnostic codes that are triggered on that line.
let mut matches_by_line: BTreeMap<TextSize, (RuleSet, Option<&Directive>)> =
BTreeMap::default();
// Whether the file is exempted from all checks.
// Codes that are globally exempted (within the current file).
let exemption = file_exemption(&lines, commented_lines);
let exemption = file_exemption(locator.contents(), commented_ranges);
let directives = NoqaDirectives::from_commented_ranges(commented_ranges, locator);
// Mark any non-ignored diagnostics.
for diagnostic in diagnostics {
@ -233,116 +234,122 @@ fn add_noqa_inner(
FileExemption::None => {}
}
let diagnostic_lineno = diagnostic.location.row();
// Is the violation ignored by a `noqa` directive on the parent line?
if let Some(parent_lineno) = diagnostic.parent.map(|location| location.row()) {
if parent_lineno != diagnostic_lineno {
let noqa_lineno = noqa_line_for.get(&parent_lineno).unwrap_or(&parent_lineno);
if commented_lines.contains(noqa_lineno) {
match extract_noqa_directive(lines[noqa_lineno - 1]) {
Directive::All(..) => {
if let Some(parent) = diagnostic.parent {
if let Some(directive_line) =
directives.find_line_with_directive(noqa_line_for.resolve(parent))
{
match &directive_line.directive {
Directive::All(..) => {
continue;
}
Directive::Codes(.., codes, _) => {
if includes(diagnostic.kind.rule(), codes) {
continue;
}
Directive::Codes(.., codes, _) => {
if includes(diagnostic.kind.rule(), &codes) {
continue;
}
}
Directive::None => {}
}
Directive::None => {}
}
}
}
// Is the diagnostic ignored by a `noqa` directive on the same line?
let noqa_lineno = noqa_line_for
.get(&diagnostic_lineno)
.unwrap_or(&diagnostic_lineno);
if commented_lines.contains(noqa_lineno) {
match extract_noqa_directive(lines[noqa_lineno - 1]) {
let noqa_offset = noqa_line_for.resolve(diagnostic.start());
// Or ignored by the directive itself
if let Some(directive_line) = directives.find_line_with_directive(noqa_offset) {
match &directive_line.directive {
Directive::All(..) => {
continue;
}
Directive::Codes(.., codes, _) => {
if includes(diagnostic.kind.rule(), &codes) {
continue;
let rule = diagnostic.kind.rule();
if !includes(rule, codes) {
matches_by_line
.entry(directive_line.range.start())
.or_insert_with(|| {
(RuleSet::default(), Some(&directive_line.directive))
})
.0
.insert(rule);
}
continue;
}
Directive::None => {}
}
}
// The diagnostic is not ignored by any `noqa` directive; add it to the list.
let lineno = diagnostic.location.row() - 1;
let noqa_lineno = noqa_line_for.get(&(lineno + 1)).unwrap_or(&(lineno + 1)) - 1;
// There's no existing noqa directive that suppresses the diagnostic.
matches_by_line
.entry(noqa_lineno)
.or_default()
.entry(locator.line_start(noqa_offset))
.or_insert_with(|| (RuleSet::default(), None))
.0
.insert(diagnostic.kind.rule());
}
let mut count: usize = 0;
let mut output = String::new();
for (lineno, line) in lines.into_iter().enumerate() {
match matches_by_line.get(&lineno) {
None => {
output.push_str(line);
let mut count = 0;
let mut output = String::with_capacity(locator.len());
let mut prev_end = TextSize::default();
for (offset, (rules, directive)) in matches_by_line {
output.push_str(&locator.contents()[TextRange::new(prev_end, offset)]);
let line = locator.full_line(offset);
match directive {
None | Some(Directive::None) => {
// Add existing content.
output.push_str(line.trim_end());
// Add `noqa` directive.
output.push_str(" # noqa: ");
// Add codes.
push_codes(&mut output, rules.iter().map(|rule| rule.noqa_code()));
output.push_str(&line_ending);
count += 1;
}
Some(Directive::All(..)) => {
// Does not get inserted into the map.
}
Some(Directive::Codes(_, noqa_range, existing, _)) => {
// Reconstruct the line based on the preserved rule codes.
// This enables us to tally the number of edits.
let output_start = output.len();
// Add existing content.
output.push_str(
locator
.slice(TextRange::new(offset, noqa_range.start()))
.trim_end(),
);
// Add `noqa` directive.
output.push_str(" # noqa: ");
// Add codes.
push_codes(
&mut output,
rules
.iter()
.map(|r| r.noqa_code().to_string())
.chain(existing.iter().map(ToString::to_string))
.sorted_unstable(),
);
// Only count if the new line is an actual edit.
if &output[output_start..] != line.trim_end() {
count += 1;
}
output.push_str(&line_ending);
}
Some(rules) => {
match extract_noqa_directive(line) {
Directive::None => {
// Add existing content.
output.push_str(line.trim_end());
// Add `noqa` directive.
output.push_str(" # noqa: ");
// Add codes.
push_codes(&mut output, rules.iter().map(|rule| rule.noqa_code()));
output.push_str(&line_ending);
count += 1;
}
Directive::All(..) => {
// Leave the line as-is.
output.push_str(line);
output.push_str(&line_ending);
}
Directive::Codes(_, start_byte, _, existing, _) => {
// Reconstruct the line based on the preserved rule codes.
// This enables us to tally the number of edits.
let mut formatted = String::with_capacity(line.len());
// Add existing content.
formatted.push_str(line[..start_byte].trim_end());
// Add `noqa` directive.
formatted.push_str(" # noqa: ");
// Add codes.
push_codes(
&mut formatted,
rules
.iter()
.map(|r| r.noqa_code().to_string())
.chain(existing.into_iter().map(ToString::to_string))
.sorted_unstable(),
);
output.push_str(&formatted);
output.push_str(&line_ending);
// Only count if the new line is an actual edit.
if formatted != line {
count += 1;
}
}
};
}
}
prev_end = offset + line.text_len();
}
output.push_str(&locator.contents()[usize::from(prev_end)..]);
(count, output)
}
@ -352,21 +359,161 @@ fn push_codes<I: Display>(str: &mut String, codes: impl Iterator<Item = I>) {
if !first {
str.push_str(", ");
}
_ = write!(str, "{code}");
write!(str, "{code}").unwrap();
first = false;
}
}
#[derive(Debug)]
pub(crate) struct NoqaDirectiveLine<'a> {
// The range of the text line for which the noqa directive applies.
pub range: TextRange,
pub directive: Directive<'a>,
pub matches: Vec<NoqaCode>,
}
#[derive(Debug, Default)]
pub(crate) struct NoqaDirectives<'a> {
inner: Vec<NoqaDirectiveLine<'a>>,
}
impl<'a> NoqaDirectives<'a> {
pub fn from_commented_ranges(comment_ranges: &[TextRange], locator: &'a Locator<'a>) -> Self {
let mut directives = Vec::new();
for comment_range in comment_ranges {
let line_range = locator.line_range(comment_range.start());
let directive = match extract_noqa_directive(line_range, locator) {
Directive::None => {
continue;
}
directive @ (Directive::All(..) | Directive::Codes(..)) => directive,
};
// noqa comments are guaranteed to be single line.
directives.push(NoqaDirectiveLine {
range: line_range,
directive,
matches: Vec::new(),
});
}
// Extend a mapping at the end of the file to also include the EOF token.
if let Some(last) = directives.last_mut() {
if last.range.end() == locator.contents().text_len() {
last.range = last.range.add_end(TextSize::from(1));
}
}
Self { inner: directives }
}
pub fn find_line_with_directive(&self, offset: TextSize) -> Option<&NoqaDirectiveLine> {
self.find_line_index(offset).map(|index| &self.inner[index])
}
pub fn find_line_with_directive_mut(
&mut self,
offset: TextSize,
) -> Option<&mut NoqaDirectiveLine<'a>> {
if let Some(index) = self.find_line_index(offset) {
Some(&mut self.inner[index])
} else {
None
}
}
fn find_line_index(&self, offset: TextSize) -> Option<usize> {
self.inner
.binary_search_by(|directive| {
if directive.range.end() < offset {
std::cmp::Ordering::Less
} else if directive.range.contains(offset) {
std::cmp::Ordering::Equal
} else {
std::cmp::Ordering::Greater
}
})
.ok()
}
pub fn lines(&self) -> &[NoqaDirectiveLine] {
&self.inner
}
}
/// Remaps offsets falling into one of the ranges to instead check for a noqa comment on the
/// line specified by the offset.
#[derive(Debug, Default, PartialEq, Eq)]
pub struct NoqaMapping {
ranges: Vec<TextRange>,
}
impl NoqaMapping {
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self {
ranges: Vec::with_capacity(capacity),
}
}
/// Returns the re-mapped position or `position` if no mapping exists.
pub fn resolve(&self, offset: TextSize) -> TextSize {
let index = self.ranges.binary_search_by(|range| {
if range.end() < offset {
std::cmp::Ordering::Less
} else if range.contains(offset) {
std::cmp::Ordering::Equal
} else {
std::cmp::Ordering::Greater
}
});
if let Ok(index) = index {
self.ranges[index].end()
} else {
offset
}
}
pub fn push_mapping(&mut self, range: TextRange) {
if let Some(last_range) = self.ranges.last_mut() {
// Strictly sorted insertion
if last_range.end() <= range.start() {
// OK
}
// Try merging with the last inserted range
else if let Some(intersected) = last_range.intersect(range) {
*last_range = intersected;
return;
} else {
panic!("Ranges must be inserted in sorted order")
}
}
self.ranges.push(range);
}
}
impl FromIterator<TextRange> for NoqaMapping {
fn from_iter<T: IntoIterator<Item = TextRange>>(iter: T) -> Self {
let mut mappings = NoqaMapping::default();
for range in iter {
mappings.push_mapping(range);
}
mappings
}
}
#[cfg(test)]
mod tests {
use nohash_hasher::IntMap;
use rustpython_parser::ast::Location;
use ruff_text_size::{TextRange, TextSize};
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::LineEnding;
use ruff_python_ast::types::Range;
use ruff_python_ast::source_code::{LineEnding, Locator};
use crate::noqa::{add_noqa_inner, NOQA_LINE_REGEX};
use crate::noqa::{add_noqa_inner, NoqaMapping, NOQA_LINE_REGEX};
use crate::rules::pycodestyle::rules::AmbiguousVariableName;
use crate::rules::pyflakes;
@ -386,87 +533,83 @@ mod tests {
#[test]
fn modification() {
let diagnostics = vec![];
let contents = "x = 1";
let commented_lines = vec![];
let noqa_line_for = IntMap::default();
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
&diagnostics,
contents,
&commented_lines,
&[],
&Locator::new(contents),
&[],
&noqa_line_for,
LineEnding::Lf,
);
assert_eq!(count, 0);
assert_eq!(output, format!("{contents}\n"));
assert_eq!(output, format!("{contents}"));
let diagnostics = vec![Diagnostic::new(
let diagnostics = [Diagnostic::new(
pyflakes::rules::UnusedVariable {
name: "x".to_string(),
},
Range::new(Location::new(1, 0), Location::new(1, 0)),
TextRange::new(TextSize::from(0), TextSize::from(0)),
)];
let contents = "x = 1";
let commented_lines = vec![1];
let noqa_line_for = IntMap::default();
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
&diagnostics,
contents,
&commented_lines,
&Locator::new(contents),
&[],
&noqa_line_for,
LineEnding::Lf,
);
assert_eq!(count, 1);
assert_eq!(output, "x = 1 # noqa: F841\n");
let diagnostics = vec![
let diagnostics = [
Diagnostic::new(
AmbiguousVariableName("x".to_string()),
Range::new(Location::new(1, 0), Location::new(1, 0)),
TextRange::new(TextSize::from(0), TextSize::from(0)),
),
Diagnostic::new(
pyflakes::rules::UnusedVariable {
name: "x".to_string(),
},
Range::new(Location::new(1, 0), Location::new(1, 0)),
TextRange::new(TextSize::from(0), TextSize::from(0)),
),
];
let contents = "x = 1 # noqa: E741\n";
let commented_lines = vec![1];
let noqa_line_for = IntMap::default();
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
&diagnostics,
contents,
&commented_lines,
&Locator::new(contents),
&[TextRange::new(TextSize::from(7), TextSize::from(19))],
&noqa_line_for,
LineEnding::Lf,
);
assert_eq!(count, 1);
assert_eq!(output, "x = 1 # noqa: E741, F841\n");
let diagnostics = vec![
let diagnostics = [
Diagnostic::new(
AmbiguousVariableName("x".to_string()),
Range::new(Location::new(1, 0), Location::new(1, 0)),
TextRange::new(TextSize::from(0), TextSize::from(0)),
),
Diagnostic::new(
pyflakes::rules::UnusedVariable {
name: "x".to_string(),
},
Range::new(Location::new(1, 0), Location::new(1, 0)),
TextRange::new(TextSize::from(0), TextSize::from(0)),
),
];
let contents = "x = 1 # noqa";
let commented_lines = vec![1];
let noqa_line_for = IntMap::default();
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
&diagnostics,
contents,
&commented_lines,
&Locator::new(contents),
&[TextRange::new(TextSize::from(7), TextSize::from(13))],
&noqa_line_for,
LineEnding::Lf,
);
assert_eq!(count, 0);
assert_eq!(output, "x = 1 # noqa\n");
assert_eq!(output, "x = 1 # noqa");
}
}

View file

@ -1,9 +1,8 @@
use rustpython_parser::ast::Location;
use ruff_text_size::{TextLen, TextRange};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::registry::Rule;
use crate::settings::{flags, Settings};
@ -49,20 +48,20 @@ fn is_standalone_comment(line: &str) -> bool {
/// ERA001
pub fn commented_out_code(
locator: &Locator,
start: Location,
end: Location,
range: TextRange,
settings: &Settings,
autofix: flags::Autofix,
) -> Option<Diagnostic> {
let location = Location::new(start.row(), 0);
let end_location = Location::new(end.row() + 1, 0);
let line = locator.slice(Range::new(location, end_location));
let line = locator.full_lines(range);
// Verify that the comment is on its own line, and that it contains code.
if is_standalone_comment(line) && comment_contains_code(line, &settings.task_tags[..]) {
let mut diagnostic = Diagnostic::new(CommentedOutCode, Range::new(start, end));
let mut diagnostic = Diagnostic::new(CommentedOutCode, range);
if autofix.into() && settings.rules.should_fix(Rule::CommentedOutCode) {
diagnostic.set_fix(Edit::deletion(location, end_location));
diagnostic.set_fix(Edit::range_deletion(TextRange::at(
range.start(),
line.text_len(),
)));
}
Some(diagnostic)
} else {

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
@ -141,13 +140,13 @@ pub fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) {
{
checker
.diagnostics
.push(Diagnostic::new(SysVersionSlice1, Range::from(value)));
.push(Diagnostic::new(SysVersionSlice1, value.range()));
} else if *i == BigInt::from(3)
&& checker.settings.rules.enabled(Rule::SysVersionSlice3)
{
checker
.diagnostics
.push(Diagnostic::new(SysVersionSlice3, Range::from(value)));
.push(Diagnostic::new(SysVersionSlice3, value.range()));
}
}
}
@ -159,12 +158,12 @@ pub fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) {
if *i == BigInt::from(2) && checker.settings.rules.enabled(Rule::SysVersion2) {
checker
.diagnostics
.push(Diagnostic::new(SysVersion2, Range::from(value)));
.push(Diagnostic::new(SysVersion2, value.range()));
} else if *i == BigInt::from(0) && checker.settings.rules.enabled(Rule::SysVersion0)
{
checker
.diagnostics
.push(Diagnostic::new(SysVersion0, Range::from(value)));
.push(Diagnostic::new(SysVersion0, value.range()));
}
}
@ -200,7 +199,7 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
{
checker
.diagnostics
.push(Diagnostic::new(SysVersionInfo0Eq3, Range::from(left)));
.push(Diagnostic::new(SysVersionInfo0Eq3, left.range()));
}
}
} else if *i == BigInt::from(1) {
@ -219,7 +218,7 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
if checker.settings.rules.enabled(Rule::SysVersionInfo1CmpInt) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionInfo1CmpInt, Range::from(left)));
.push(Diagnostic::new(SysVersionInfo1CmpInt, left.range()));
}
}
}
@ -246,10 +245,9 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
.rules
.enabled(Rule::SysVersionInfoMinorCmpInt)
{
checker.diagnostics.push(Diagnostic::new(
SysVersionInfoMinorCmpInt,
Range::from(left),
));
checker
.diagnostics
.push(Diagnostic::new(SysVersionInfoMinorCmpInt, left.range()));
}
}
}
@ -274,12 +272,12 @@ pub fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &
if checker.settings.rules.enabled(Rule::SysVersionCmpStr10) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionCmpStr10, Range::from(left)));
.push(Diagnostic::new(SysVersionCmpStr10, left.range()));
}
} else if checker.settings.rules.enabled(Rule::SysVersionCmpStr3) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionCmpStr3, Range::from(left)));
.push(Diagnostic::new(SysVersionCmpStr3, left.range()));
}
}
}
@ -294,6 +292,6 @@ pub fn name_or_attribute(checker: &mut Checker, expr: &Expr) {
{
checker
.diagnostics
.push(Diagnostic::new(SixPY3, Range::from(expr)));
.push(Diagnostic::new(SixPY3, expr.range()));
}
}

View file

@ -4,21 +4,19 @@ use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
/// ANN204
pub fn add_return_annotation(locator: &Locator, stmt: &Stmt, annotation: &str) -> Result<Edit> {
let range = Range::from(stmt);
let contents = locator.slice(range);
let contents = &locator.contents()[stmt.range()];
// Find the colon (following the `def` keyword).
let mut seen_lpar = false;
let mut seen_rpar = false;
let mut count: usize = 0;
for (start, tok, ..) in lexer::lex_located(contents, Mode::Module, range.location).flatten() {
for (tok, range) in lexer::lex_located(contents, Mode::Module, stmt.start()).flatten() {
if seen_lpar && seen_rpar {
if matches!(tok, Tok::Colon) {
return Ok(Edit::insertion(format!(" -> {annotation}"), start));
return Ok(Edit::insertion(format!(" -> {annotation}"), range.start()));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Stmt};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::ReturnStatementVisitor;
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{cast, helpers};
use ruff_python_semantic::analyze::visibility;
@ -446,7 +445,7 @@ fn check_dynamically_typed<F>(
if checker.ctx.match_typing_expr(annotation, "Any") {
diagnostics.push(Diagnostic::new(
AnyType { name: func() },
Range::from(annotation),
annotation.range(),
));
};
}
@ -513,7 +512,7 @@ pub fn definition(
MissingTypeFunctionArgument {
name: arg.node.arg.to_string(),
},
Range::from(arg),
arg.range(),
));
}
}
@ -544,7 +543,7 @@ pub fn definition(
MissingTypeArgs {
name: arg.node.arg.to_string(),
},
Range::from(arg),
arg.range(),
));
}
}
@ -575,7 +574,7 @@ pub fn definition(
MissingTypeKwargs {
name: arg.node.arg.to_string(),
},
Range::from(arg),
arg.range(),
));
}
}
@ -592,7 +591,7 @@ pub fn definition(
MissingTypeCls {
name: arg.node.arg.to_string(),
},
Range::from(arg),
arg.range(),
));
}
} else {
@ -601,7 +600,7 @@ pub fn definition(
MissingTypeSelf {
name: arg.node.arg.to_string(),
},
Range::from(arg),
arg.range(),
));
}
}

View file

@ -1,8 +1,8 @@
use ruff_text_size::{TextLen, TextRange};
use rustpython_parser::ast::Stmt;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
/// ## What it does
/// Checks for uses of the `assert` keyword.
@ -37,8 +37,5 @@ impl Violation for Assert {
/// S101
pub fn assert_used(stmt: &Stmt) -> Diagnostic {
Diagnostic::new(
Assert,
Range::new(stmt.location, stmt.location.with_col_offset("assert".len())),
)
Diagnostic::new(Assert, TextRange::at(stmt.start(), "assert".text_len()))
}

View file

@ -7,7 +7,6 @@ use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path::compose_call_path;
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -114,7 +113,7 @@ pub fn bad_file_permissions(
if (int_value & WRITE_WORLD > 0) || (int_value & EXECUTE_GROUP > 0) {
checker.diagnostics.push(Diagnostic::new(
BadFilePermissions { mask: int_value },
Range::from(mode_arg),
mode_arg.range(),
));
}
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
#[violation]
pub struct ExecBuiltin;
@ -22,5 +21,5 @@ pub fn exec_used(expr: &Expr, func: &Expr) -> Option<Diagnostic> {
if id != "exec" {
return None;
}
Some(Diagnostic::new(ExecBuiltin, Range::from(expr)))
Some(Diagnostic::new(ExecBuiltin, expr.range()))
}

View file

@ -1,6 +1,6 @@
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_text_size::TextRange;
#[violation]
pub struct HardcodedBindAllInterfaces;
@ -13,9 +13,9 @@ impl Violation for HardcodedBindAllInterfaces {
}
/// S104
pub fn hardcoded_bind_all_interfaces(value: &str, range: &Range) -> Option<Diagnostic> {
pub fn hardcoded_bind_all_interfaces(value: &str, range: TextRange) -> Option<Diagnostic> {
if value == "0.0.0.0" {
Some(Diagnostic::new(HardcodedBindAllInterfaces, *range))
Some(Diagnostic::new(HardcodedBindAllInterfaces, range))
} else {
None
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Arg, Arguments, Expr};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use super::super::helpers::{matches_password_name, string_literal};
@ -29,7 +28,7 @@ fn check_password_kwarg(arg: &Arg, default: &Expr) -> Option<Diagnostic> {
HardcodedPasswordDefault {
string: string.to_string(),
},
Range::from(default),
default.range(),
))
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::Keyword;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use super::super::helpers::{matches_password_name, string_literal};
@ -33,7 +32,7 @@ pub fn hardcoded_password_func_arg(keywords: &[Keyword]) -> Vec<Diagnostic> {
HardcodedPasswordFuncArg {
string: string.to_string(),
},
Range::from(keyword),
keyword.range(),
))
})
.collect()

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use super::super::helpers::{matches_password_name, string_literal};
@ -52,7 +51,7 @@ pub fn compare_to_hardcoded_password_string(left: &Expr, comparators: &[Expr]) -
HardcodedPasswordString {
string: string.to_string(),
},
Range::from(comp),
comp.range(),
))
})
.collect()
@ -67,7 +66,7 @@ pub fn assign_hardcoded_password_string(value: &Expr, targets: &[Expr]) -> Optio
HardcodedPasswordString {
string: string.to_string(),
},
Range::from(value),
value.range(),
));
}
}

View file

@ -5,7 +5,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Operator};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::{any_over_expr, unparse_expr};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -98,7 +97,7 @@ pub fn hardcoded_sql_expression(checker: &mut Checker, expr: &Expr) {
Some(string) if matches_sql_statement(&string) => {
checker
.diagnostics
.push(Diagnostic::new(HardcodedSQLExpression, Range::from(expr)));
.push(Diagnostic::new(HardcodedSQLExpression, expr.range()));
}
_ => (),
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::Expr;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
#[violation]
pub struct HardcodedTempFile {
@ -31,7 +30,7 @@ pub fn hardcoded_tmp_directory(
HardcodedTempFile {
string: value.to_string(),
},
Range::from(expr),
expr.range(),
))
} else {
None

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -74,7 +73,7 @@ pub fn hashlib_insecure_hash_functions(
HashlibInsecureHashFunction {
string: hash_func_name.to_string(),
},
Range::from(name_arg),
name_arg.range(),
));
}
}
@ -91,7 +90,7 @@ pub fn hashlib_insecure_hash_functions(
HashlibInsecureHashFunction {
string: (*func_name).to_string(),
},
Range::from(func),
func.range(),
));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -57,20 +56,20 @@ pub fn jinja2_autoescape_false(
if id.as_str() != "select_autoescape" {
checker.diagnostics.push(Diagnostic::new(
Jinja2AutoescapeFalse { value: true },
Range::from(autoescape_arg),
autoescape_arg.range(),
));
}
}
}
_ => checker.diagnostics.push(Diagnostic::new(
Jinja2AutoescapeFalse { value: true },
Range::from(autoescape_arg),
autoescape_arg.range(),
)),
}
} else {
checker.diagnostics.push(Diagnostic::new(
Jinja2AutoescapeFalse { value: false },
Range::from(func),
func.range(),
));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -34,10 +33,9 @@ pub fn logging_config_insecure_listen(
let call_args = SimpleCallArgs::new(args, keywords);
if call_args.keyword_argument("verify").is_none() {
checker.diagnostics.push(Diagnostic::new(
LoggingConfigInsecureListen,
Range::from(func),
));
checker
.diagnostics
.push(Diagnostic::new(LoggingConfigInsecureListen, func.range()));
}
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -66,7 +65,7 @@ pub fn request_with_no_cert_validation(
RequestWithNoCertValidation {
string: target.to_string(),
},
Range::from(verify_arg),
verify_arg.range(),
));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::{unparse_constant, SimpleCallArgs};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -56,13 +55,13 @@ pub fn request_without_timeout(
RequestWithoutTimeout {
timeout: Some(timeout),
},
Range::from(timeout_arg),
timeout_arg.range(),
));
}
} else {
checker.diagnostics.push(Diagnostic::new(
RequestWithoutTimeout { timeout: None },
Range::from(func),
func.range(),
));
}
}

View file

@ -7,7 +7,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::Truthiness;
use ruff_python_ast::types::Range;
use ruff_python_semantic::context::Context;
use crate::{
@ -202,7 +201,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
SubprocessPopenWithShellEqualsTrue {
seems_safe: shell_call_seems_safe(arg),
},
Range::from(keyword),
keyword.range(),
));
}
}
@ -218,7 +217,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{
checker.diagnostics.push(Diagnostic::new(
SubprocessWithoutShellEqualsTrue,
Range::from(keyword),
keyword.range(),
));
}
}
@ -231,7 +230,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{
checker.diagnostics.push(Diagnostic::new(
SubprocessWithoutShellEqualsTrue,
Range::from(arg),
arg.range(),
));
}
}
@ -248,10 +247,9 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
.rules
.enabled(Rule::CallWithShellEqualsTrue)
{
checker.diagnostics.push(Diagnostic::new(
CallWithShellEqualsTrue,
Range::from(keyword),
));
checker
.diagnostics
.push(Diagnostic::new(CallWithShellEqualsTrue, keyword.range()));
}
}
@ -263,7 +261,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
StartProcessWithAShell {
seems_safe: shell_call_seems_safe(arg),
},
Range::from(arg),
arg.range(),
));
}
}
@ -278,7 +276,7 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{
checker
.diagnostics
.push(Diagnostic::new(StartProcessWithNoShell, Range::from(func)));
.push(Diagnostic::new(StartProcessWithNoShell, func.range()));
}
}
@ -292,10 +290,9 @@ pub fn shell_injection(checker: &mut Checker, func: &Expr, args: &[Expr], keywor
{
if let Some(value) = try_string_literal(arg) {
if FULL_PATH_REGEX.find(value).is_none() {
checker.diagnostics.push(Diagnostic::new(
StartProcessWithPartialPath,
Range::from(arg),
));
checker
.diagnostics
.push(Diagnostic::new(StartProcessWithPartialPath, arg.range()));
}
}
}

View file

@ -4,7 +4,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -40,10 +39,9 @@ pub fn snmp_insecure_version(
} = &mp_model_arg.node
{
if value.is_zero() || value.is_one() {
checker.diagnostics.push(Diagnostic::new(
SnmpInsecureVersion,
Range::from(mp_model_arg),
));
checker
.diagnostics
.push(Diagnostic::new(SnmpInsecureVersion, mp_model_arg.range()));
}
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -38,7 +37,7 @@ pub fn snmp_weak_cryptography(
if call_args.len() < 3 {
checker
.diagnostics
.push(Diagnostic::new(SnmpWeakCryptography, Range::from(func)));
.push(Diagnostic::new(SnmpWeakCryptography, func.range()));
}
}
}

View file

@ -5,7 +5,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
@ -512,7 +511,7 @@ pub fn suspicious_function_call(checker: &mut Checker, expr: &Expr) {
Reason::Telnet => SuspiciousTelnetUsage.into(),
Reason::FTPLib => SuspiciousFTPLibUsage.into(),
};
let diagnostic = Diagnostic::new::<DiagnosticKind>(diagnostic_kind, Range::from(expr));
let diagnostic = Diagnostic::new::<DiagnosticKind>(diagnostic_kind, expr.range());
if checker.settings.rules.enabled(diagnostic.kind.rule()) {
checker.diagnostics.push(diagnostic);
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Excepthandler, Expr, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::rules::flake8_bandit::helpers::is_untyped_exception;
@ -30,9 +29,8 @@ pub fn try_except_continue(
&& body[0].node == StmtKind::Continue
&& (check_typed_exception || is_untyped_exception(type_, checker))
{
checker.diagnostics.push(Diagnostic::new(
TryExceptContinue,
Range::from(excepthandler),
));
checker
.diagnostics
.push(Diagnostic::new(TryExceptContinue, excepthandler.range()));
}
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Excepthandler, Expr, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::rules::flake8_bandit::helpers::is_untyped_exception;
@ -32,6 +31,6 @@ pub fn try_except_pass(
{
checker
.diagnostics
.push(Diagnostic::new(TryExceptPass, Range::from(excepthandler)));
.push(Diagnostic::new(TryExceptPass, excepthandler.range()));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -55,13 +54,13 @@ pub fn unsafe_yaml_load(checker: &mut Checker, func: &Expr, args: &[Expr], keywo
};
checker.diagnostics.push(Diagnostic::new(
UnsafeYAMLLoad { loader },
Range::from(loader_arg),
loader_arg.range(),
));
}
} else {
checker.diagnostics.push(Diagnostic::new(
UnsafeYAMLLoad { loader: None },
Range::from(func),
func.range(),
));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::{find_keyword, is_const_true};
use ruff_python_ast::types::Range;
use ruff_python_semantic::analyze::logging;
use crate::checkers::ast::Checker;
@ -84,7 +83,7 @@ pub fn blind_except(
BlindExcept {
name: id.to_string(),
},
Range::from(type_),
type_.range(),
));
}
}

View file

@ -4,7 +4,6 @@ use ruff_diagnostics::Violation;
use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path::collect_call_path;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -90,9 +89,7 @@ const fn is_boolean_arg(arg: &Expr) -> bool {
fn add_if_boolean(checker: &mut Checker, arg: &Expr, kind: DiagnosticKind) {
if is_boolean_arg(arg) {
checker
.diagnostics
.push(Diagnostic::new(kind, Range::from(arg)));
checker.diagnostics.push(Diagnostic::new(kind, arg.range()));
}
}
@ -134,7 +131,7 @@ pub fn check_positional_boolean_in_def(
}
checker.diagnostics.push(Diagnostic::new(
BooleanPositionalArgInFunctionDefinition,
Range::from(arg),
arg.range(),
));
}
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_semantic::analyze::visibility::{is_abstract, is_overload};
use crate::checkers::ast::Checker;
@ -128,7 +127,7 @@ pub fn abstract_base_class(
EmptyMethodWithoutAbstractDecorator {
name: format!("{name}.{method_name}"),
},
Range::from(stmt),
stmt.range(),
));
}
}
@ -142,7 +141,7 @@ pub fn abstract_base_class(
AbstractBaseClassWithoutAbstractMethod {
name: name.to_string(),
},
Range::from(stmt),
stmt.range(),
));
}
}

View file

@ -1,9 +1,9 @@
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Location, Stmt, StmtKind};
use ruff_text_size::TextSize;
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_stmt;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
@ -24,16 +24,16 @@ impl AlwaysAutofixableViolation for AssertFalse {
fn assertion_error(msg: Option<&Expr>) -> Stmt {
Stmt::new(
Location::default(),
Location::default(),
TextSize::default(),
TextSize::default(),
StmtKind::Raise {
exc: Some(Box::new(Expr::new(
Location::default(),
Location::default(),
TextSize::default(),
TextSize::default(),
ExprKind::Call {
func: Box::new(Expr::new(
Location::default(),
Location::default(),
TextSize::default(),
TextSize::default(),
ExprKind::Name {
id: "AssertionError".to_string(),
ctx: ExprContext::Load,
@ -61,12 +61,11 @@ pub fn assert_false(checker: &mut Checker, stmt: &Stmt, test: &Expr, msg: Option
return;
};
let mut diagnostic = Diagnostic::new(AssertFalse, Range::from(test));
let mut diagnostic = Diagnostic::new(AssertFalse, test.range());
if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement(
diagnostic.set_fix(Edit::range_replacement(
unparse_stmt(&assertion_error(msg), checker.stylist),
stmt.location,
stmt.end_location.unwrap(),
stmt.range(),
));
}
checker.diagnostics.push(diagnostic);

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{ExprKind, Stmt, Withitem};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -99,6 +98,6 @@ pub fn assert_raises_exception(checker: &mut Checker, stmt: &Stmt, items: &[With
checker.diagnostics.push(Diagnostic::new(
AssertRaisesException { kind },
Range::from(stmt),
stmt.range(),
));
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -35,5 +34,5 @@ pub fn assignment_to_os_environ(checker: &mut Checker, targets: &[Expr]) {
}
checker
.diagnostics
.push(Diagnostic::new(AssignmentToOsEnviron, Range::from(target)));
.push(Diagnostic::new(AssignmentToOsEnviron, target.range()));
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_semantic::scope::ScopeKind;
use crate::checkers::ast::Checker;
@ -51,10 +50,9 @@ pub fn cached_instance_method(checker: &mut Checker, decorator_list: &[Expr]) {
_ => decorator,
},
) {
checker.diagnostics.push(Diagnostic::new(
CachedInstanceMethod,
Range::from(decorator),
));
checker
.diagnostics
.push(Diagnostic::new(CachedInstanceMethod, decorator.range()));
}
}
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -23,5 +22,5 @@ pub fn cannot_raise_literal(checker: &mut Checker, expr: &Expr) {
};
checker
.diagnostics
.push(Diagnostic::new(CannotRaiseLiteral, Range::from(expr)));
.push(Diagnostic::new(CannotRaiseLiteral, expr.range()));
}

View file

@ -1,8 +1,7 @@
use itertools::Itertools;
use ruff_text_size::TextSize;
use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_parser::ast::{
Excepthandler, ExcepthandlerKind, Expr, ExprContext, ExprKind, Location,
};
use rustpython_parser::ast::{Excepthandler, ExcepthandlerKind, Expr, ExprContext, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Violation};
use ruff_diagnostics::{Diagnostic, Edit};
@ -10,7 +9,6 @@ use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path;
use ruff_python_ast::call_path::CallPath;
use ruff_python_ast::helpers::unparse_expr;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::{AsRule, Rule};
@ -52,8 +50,8 @@ impl AlwaysAutofixableViolation for DuplicateHandlerException {
fn type_pattern(elts: Vec<&Expr>) -> Expr {
Expr::new(
Location::default(),
Location::default(),
TextSize::default(),
TextSize::default(),
ExprKind::Tuple {
elts: elts.into_iter().cloned().collect(),
ctx: ExprContext::Load,
@ -95,17 +93,16 @@ fn duplicate_handler_exceptions<'a>(
.sorted()
.collect::<Vec<String>>(),
},
Range::from(expr),
expr.range(),
);
if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement(
diagnostic.set_fix(Edit::range_replacement(
if unique_elts.len() == 1 {
unparse_expr(unique_elts[0], checker.stylist)
} else {
unparse_expr(&type_pattern(unique_elts), checker.stylist)
},
expr.location,
expr.end_location.unwrap(),
expr.range(),
));
}
checker.diagnostics.push(diagnostic);
@ -156,7 +153,7 @@ pub fn duplicate_exceptions(checker: &mut Checker, handlers: &[Excepthandler]) {
DuplicateTryBlockException {
name: name.join("."),
},
Range::from(expr),
expr.range(),
));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{ExcepthandlerKind, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -27,9 +26,8 @@ pub fn except_with_empty_tuple(checker: &mut Checker, excepthandler: &Excepthand
return;
};
if elts.is_empty() {
checker.diagnostics.push(Diagnostic::new(
ExceptWithEmptyTuple,
Range::from(excepthandler),
));
checker
.diagnostics
.push(Diagnostic::new(ExceptWithEmptyTuple, excepthandler.range()));
}
}

View file

@ -4,7 +4,6 @@ use rustpython_parser::ast::{Excepthandler, ExcepthandlerKind, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -55,10 +54,9 @@ pub fn except_with_non_exception_classes(checker: &mut Checker, excepthandler: &
| ExprKind::Name { .. }
| ExprKind::Call { .. },
) {
checker.diagnostics.push(Diagnostic::new(
ExceptWithNonExceptionClasses,
Range::from(expr),
));
checker
.diagnostics
.push(Diagnostic::new(ExceptWithNonExceptionClasses, expr.range()));
}
}
}

View file

@ -1,3 +1,4 @@
use ruff_text_size::TextRange;
use rustpython_parser::ast::{Arguments, Constant, Expr, ExprKind};
use ruff_diagnostics::Violation;
@ -5,7 +6,6 @@ use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path::from_qualified_name;
use ruff_python_ast::call_path::{compose_call_path, CallPath};
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor;
@ -61,7 +61,7 @@ fn is_immutable_func(checker: &Checker, func: &Expr, extend_immutable_calls: &[C
struct ArgumentDefaultVisitor<'a> {
checker: &'a Checker<'a>,
diagnostics: Vec<(DiagnosticKind, Range)>,
diagnostics: Vec<(DiagnosticKind, TextRange)>,
extend_immutable_calls: Vec<CallPath<'a>>,
}
@ -81,7 +81,7 @@ where
name: compose_call_path(func),
}
.into(),
Range::from(expr),
expr.range(),
));
}
visitor::walk_expr(self, expr);

View file

@ -1,10 +1,11 @@
use ruff_text_size::TextRange;
use rustc_hash::FxHashSet;
use rustpython_parser::ast::{Comprehension, Expr, ExprContext, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::collect_arg_names;
use ruff_python_ast::types::{Node, Range};
use ruff_python_ast::types::Node;
use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor;
@ -26,9 +27,9 @@ impl Violation for FunctionUsesLoopVariable {
#[derive(Default)]
struct LoadedNamesVisitor<'a> {
// Tuple of: name, defining expression, and defining range.
loaded: Vec<(&'a str, &'a Expr, Range)>,
loaded: Vec<(&'a str, &'a Expr, TextRange)>,
// Tuple of: name, defining expression, and defining range.
stored: Vec<(&'a str, &'a Expr, Range)>,
stored: Vec<(&'a str, &'a Expr, TextRange)>,
}
/// `Visitor` to collect all used identifiers in a statement.
@ -39,8 +40,8 @@ where
fn visit_expr(&mut self, expr: &'b Expr) {
match &expr.node {
ExprKind::Name { id, ctx } => match ctx {
ExprContext::Load => self.loaded.push((id, expr, Range::from(expr))),
ExprContext::Store => self.stored.push((id, expr, Range::from(expr))),
ExprContext::Load => self.loaded.push((id, expr, expr.range())),
ExprContext::Store => self.stored.push((id, expr, expr.range())),
ExprContext::Del => {}
},
_ => visitor::walk_expr(self, expr),
@ -50,7 +51,7 @@ where
#[derive(Default)]
struct SuspiciousVariablesVisitor<'a> {
names: Vec<(&'a str, &'a Expr, Range)>,
names: Vec<(&'a str, &'a Expr, TextRange)>,
safe_functions: Vec<&'a Expr>,
}

View file

@ -1,9 +1,9 @@
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Location};
use ruff_text_size::TextSize;
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_expr;
use ruff_python_ast::types::Range;
use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private};
use crate::checkers::ast::Checker;
@ -27,8 +27,8 @@ impl AlwaysAutofixableViolation for GetAttrWithConstant {
}
fn attribute(value: &Expr, attr: &str) -> Expr {
Expr::new(
Location::default(),
Location::default(),
TextSize::default(),
TextSize::default(),
ExprKind::Attribute {
value: Box::new(value.clone()),
attr: attr.to_string(),
@ -61,13 +61,12 @@ pub fn getattr_with_constant(checker: &mut Checker, expr: &Expr, func: &Expr, ar
return;
}
let mut diagnostic = Diagnostic::new(GetAttrWithConstant, Range::from(expr));
let mut diagnostic = Diagnostic::new(GetAttrWithConstant, expr.range());
if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement(
diagnostic.set_fix(Edit::range_replacement(
unparse_expr(&attribute(obj, value), checker.stylist),
expr.location,
expr.end_location.unwrap(),
expr.range(),
));
}
checker.diagnostics.push(diagnostic);

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -33,7 +32,7 @@ fn walk_stmt(checker: &mut Checker, body: &[Stmt], f: fn(&Stmt) -> bool) {
),
},
},
Range::from(stmt),
stmt.range(),
));
}
match &stmt.node {

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor;
@ -74,7 +73,7 @@ pub fn loop_variable_overrides_iterator(checker: &mut Checker, target: &Expr, it
LoopVariableOverridesIterator {
name: name.to_string(),
},
Range::from(expr),
expr.range(),
));
}
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Arguments, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_semantic::analyze::typing::is_immutable_annotation;
use crate::checkers::ast::Checker;
@ -74,10 +73,9 @@ pub fn mutable_argument_default(checker: &mut Checker, arguments: &Arguments) {
.as_ref()
.map_or(false, |expr| is_immutable_annotation(&checker.ctx, expr))
{
checker.diagnostics.push(Diagnostic::new(
MutableArgumentDefault,
Range::from(default),
));
checker
.diagnostics
.push(Diagnostic::new(MutableArgumentDefault, default.range()));
}
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::SimpleCallArgs;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -64,5 +63,5 @@ pub fn no_explicit_stacklevel(
checker
.diagnostics
.push(Diagnostic::new(NoExplicitStacklevel, Range::from(func)));
.push(Diagnostic::new(NoExplicitStacklevel, func.range()));
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Excepthandler, ExcepthandlerKind, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_expr;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
@ -45,13 +44,12 @@ pub fn redundant_tuple_in_exception_handler(checker: &mut Checker, handlers: &[E
RedundantTupleInExceptionHandler {
name: unparse_expr(elt, checker.stylist),
},
Range::from(type_),
type_.range(),
);
if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement(
diagnostic.set_fix(Edit::range_replacement(
unparse_expr(elt, checker.stylist),
type_.location,
type_.end_location.unwrap(),
type_.range(),
));
}
checker.diagnostics.push(diagnostic);

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Comprehension, Expr, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_ast::visitor::{self, Visitor};
use crate::checkers::ast::Checker;
@ -339,6 +338,6 @@ pub fn reuse_of_groupby_generator(
for expr in finder.exprs {
checker
.diagnostics
.push(Diagnostic::new(ReuseOfGroupbyGenerator, Range::from(expr)));
.push(Diagnostic::new(ReuseOfGroupbyGenerator, expr.range()));
}
}

View file

@ -1,10 +1,10 @@
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Location, Stmt, StmtKind};
use ruff_text_size::TextSize;
use rustpython_parser::ast::{Constant, Expr, ExprContext, ExprKind, Stmt, StmtKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::unparse_stmt;
use ruff_python_ast::source_code::Stylist;
use ruff_python_ast::types::Range;
use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private};
use crate::checkers::ast::Checker;
@ -29,12 +29,12 @@ impl AlwaysAutofixableViolation for SetAttrWithConstant {
fn assignment(obj: &Expr, name: &str, value: &Expr, stylist: &Stylist) -> String {
let stmt = Stmt::new(
Location::default(),
Location::default(),
TextSize::default(),
TextSize::default(),
StmtKind::Assign {
targets: vec![Expr::new(
Location::default(),
Location::default(),
TextSize::default(),
TextSize::default(),
ExprKind::Attribute {
value: Box::new(obj.clone()),
attr: name.to_string(),
@ -76,13 +76,12 @@ pub fn setattr_with_constant(checker: &mut Checker, expr: &Expr, func: &Expr, ar
// (i.e., it's directly within an `StmtKind::Expr`).
if let StmtKind::Expr { value: child } = &checker.ctx.current_stmt().node {
if expr == child.as_ref() {
let mut diagnostic = Diagnostic::new(SetAttrWithConstant, Range::from(expr));
let mut diagnostic = Diagnostic::new(SetAttrWithConstant, expr.range());
if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Edit::replacement(
diagnostic.set_fix(Edit::range_replacement(
assignment(obj, name, value, checker.stylist),
expr.location,
expr.end_location.unwrap(),
expr.range(),
));
}
checker.diagnostics.push(diagnostic);

View file

@ -11,7 +11,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Keyword};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -38,12 +37,12 @@ pub fn star_arg_unpacking_after_keyword_arg(
let ExprKind::Starred { .. } = arg.node else {
continue;
};
if arg.location <= keyword.location {
if arg.start() <= keyword.start() {
continue;
}
checker.diagnostics.push(Diagnostic::new(
StarArgUnpackingAfterKeywordArg,
Range::from(arg),
arg.range(),
));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -40,6 +39,6 @@ pub fn strip_with_multi_characters(checker: &mut Checker, expr: &Expr, func: &Ex
if num_chars > 1 && num_chars != value.chars().unique().count() {
checker
.diagnostics
.push(Diagnostic::new(StripWithMultiCharacters, Range::from(expr)));
.push(Diagnostic::new(StripWithMultiCharacters, expr.range()));
}
}

View file

@ -21,7 +21,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Unaryop};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -48,5 +47,5 @@ pub fn unary_prefix_increment(checker: &mut Checker, expr: &Expr, op: &Unaryop,
}
checker
.diagnostics
.push(Diagnostic::new(UnaryPrefixIncrement, Range::from(expr)));
.push(Diagnostic::new(UnaryPrefixIncrement, expr.range()));
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind, Stmt};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -47,19 +46,17 @@ pub fn unintentional_type_annotation(
match &target.node {
ExprKind::Subscript { value, .. } => {
if matches!(&value.node, ExprKind::Name { .. }) {
checker.diagnostics.push(Diagnostic::new(
UnintentionalTypeAnnotation,
Range::from(stmt),
));
checker
.diagnostics
.push(Diagnostic::new(UnintentionalTypeAnnotation, stmt.range()));
}
}
ExprKind::Attribute { value, .. } => {
if let ExprKind::Name { id, .. } = &value.node {
if id != "self" {
checker.diagnostics.push(Diagnostic::new(
UnintentionalTypeAnnotation,
Range::from(stmt),
));
checker
.diagnostics
.push(Diagnostic::new(UnintentionalTypeAnnotation, stmt.range()));
}
}
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -42,5 +41,5 @@ pub fn unreliable_callable_check(checker: &mut Checker, expr: &Expr, func: &Expr
}
checker
.diagnostics
.push(Diagnostic::new(UnreliableCallableCheck, Range::from(expr)));
.push(Diagnostic::new(UnreliableCallableCheck, expr.range()));
}

View file

@ -24,7 +24,7 @@ use serde::{Deserialize, Serialize};
use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::{Range, RefEquality};
use ruff_python_ast::types::RefEquality;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{helpers, visitor};
@ -160,7 +160,7 @@ pub fn unused_loop_control_variable(
rename: rename.clone(),
certainty,
},
Range::from(expr),
expr.range(),
);
if let Some(rename) = rename {
if certainty.into() && checker.patch(diagnostic.kind.rule()) {
@ -176,11 +176,7 @@ pub fn unused_loop_control_variable(
if let Some(binding) = binding {
if binding.kind.is_loop_var() {
if !binding.used() {
diagnostic.set_fix(Edit::replacement(
rename,
expr.location,
expr.end_location.unwrap(),
));
diagnostic.set_fix(Edit::range_replacement(rename, expr.range()));
}
}
}

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -24,6 +23,6 @@ pub fn useless_comparison(checker: &mut Checker, expr: &Expr) {
if matches!(expr.node, ExprKind::Compare { .. }) {
checker
.diagnostics
.push(Diagnostic::new(UselessComparison, Range::from(expr)));
.push(Diagnostic::new(UselessComparison, expr.range()));
}
}

View file

@ -1,10 +1,8 @@
use rustpython_parser::ast::Expr;
use crate::checkers::ast::Checker;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
#[violation]
pub struct UselessContextlibSuppress;
@ -29,9 +27,8 @@ pub fn useless_contextlib_suppress(checker: &mut Checker, expr: &Expr, func: &Ex
call_path.as_slice() == ["contextlib", "suppress"]
})
{
checker.diagnostics.push(Diagnostic::new(
UselessContextlibSuppress,
Range::from(expr),
));
checker
.diagnostics
.push(Diagnostic::new(UselessContextlibSuppress, expr.range()));
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::contains_effect;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
@ -62,7 +61,7 @@ pub fn useless_expression(checker: &mut Checker, value: &Expr) {
UselessExpression {
kind: Kind::Attribute,
},
Range::from(value),
value.range(),
));
}
return;
@ -72,6 +71,6 @@ pub fn useless_expression(checker: &mut Checker, value: &Expr) {
UselessExpression {
kind: Kind::Expression,
},
Range::from(value),
value.range(),
));
}

View file

@ -1,10 +1,8 @@
use rustpython_parser::ast::{Expr, ExprKind, Keyword};
use crate::checkers::ast::Checker;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
#[violation]
pub struct ZipWithoutExplicitStrict;
@ -36,7 +34,7 @@ pub fn zip_without_explicit_strict(
{
checker
.diagnostics
.push(Diagnostic::new(ZipWithoutExplicitStrict, Range::from(expr)));
.push(Diagnostic::new(ZipWithoutExplicitStrict, expr.range()));
}
}
}

View file

@ -3,7 +3,6 @@ use rustpython_parser::ast::Located;
use ruff_diagnostics::Violation;
use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use ruff_python_stdlib::builtins::BUILTINS;
use super::types::ShadowingType;
@ -191,7 +190,7 @@ pub fn builtin_shadowing<T>(
}
.into(),
},
Range::from(located),
located.range(),
))
} else {
None

View file

@ -1,4 +1,5 @@
use itertools::Itertools;
use ruff_text_size::TextRange;
use rustpython_parser::lexer::{LexResult, Spanned};
use rustpython_parser::Tok;
@ -6,7 +7,6 @@ use ruff_diagnostics::{AlwaysAutofixableViolation, Violation};
use ruff_diagnostics::{Diagnostic, Edit};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::registry::Rule;
use crate::settings::{flags, Settings};
@ -46,7 +46,7 @@ impl<'tok> Token<'tok> {
}
const fn from_spanned(spanned: &'tok Spanned) -> Token<'tok> {
let type_ = match &spanned.1 {
let type_ = match &spanned.0 {
Tok::NonLogicalNewline => TokenType::NonLogicalNewline,
Tok::Newline => TokenType::Newline,
Tok::For => TokenType::For,
@ -161,7 +161,7 @@ pub fn trailing_commas(
.iter()
.flatten()
// Completely ignore comments -- they just interfere with the logic.
.filter(|&r| !matches!(r, (_, Tok::Comment(_), _)))
.filter(|&r| !matches!(r, (Tok::Comment(_), _)))
.map(Token::from_spanned);
let tokens = [Token::irrelevant(), Token::irrelevant()]
.into_iter()
@ -253,15 +253,9 @@ pub fn trailing_commas(
};
if comma_prohibited {
let comma = prev.spanned.unwrap();
let mut diagnostic = Diagnostic::new(
ProhibitedTrailingComma,
Range {
location: comma.0,
end_location: comma.2,
},
);
let mut diagnostic = Diagnostic::new(ProhibitedTrailingComma, comma.1);
if autofix.into() && settings.rules.should_fix(Rule::ProhibitedTrailingComma) {
diagnostic.set_fix(Edit::deletion(comma.0, comma.2));
diagnostic.set_fix(Edit::range_deletion(diagnostic.range()));
}
diagnostics.push(diagnostic);
}
@ -272,13 +266,7 @@ pub fn trailing_commas(
prev.type_ == TokenType::Comma && token.type_ == TokenType::Newline;
if bare_comma_prohibited {
let comma = prev.spanned.unwrap();
diagnostics.push(Diagnostic::new(
TrailingCommaOnBareTuple,
Range {
location: comma.0,
end_location: comma.2,
},
));
diagnostics.push(Diagnostic::new(TrailingCommaOnBareTuple, comma.1));
}
// Comma is required if:
@ -299,21 +287,17 @@ pub fn trailing_commas(
let missing_comma = prev_prev.spanned.unwrap();
let mut diagnostic = Diagnostic::new(
MissingTrailingComma,
Range {
location: missing_comma.2,
end_location: missing_comma.2,
},
TextRange::empty(missing_comma.1.end()),
);
if autofix.into() && settings.rules.should_fix(Rule::MissingTrailingComma) {
// Create a replacement that includes the final bracket (or other token),
// rather than just inserting a comma at the end. This prevents the UP034 autofix
// removing any brackets in the same linter pass - doing both at the same time could
// lead to a syntax error.
let contents = locator.slice(Range::new(missing_comma.0, missing_comma.2));
diagnostic.set_fix(Edit::replacement(
let contents = locator.slice(missing_comma.1);
diagnostic.set_fix(Edit::range_replacement(
format!("{contents},"),
missing_comma.0,
missing_comma.2,
missing_comma.1,
));
}
diagnostics.push(diagnostic);

View file

@ -35,7 +35,7 @@ pub fn fix_unnecessary_generator_list(
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
// Expr(Call(GeneratorExp)))) -> Expr(ListComp)))
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -67,11 +67,7 @@ pub fn fix_unnecessary_generator_list(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C401) Convert `set(x for x in y)` to `{x for x in y}`.
@ -82,7 +78,7 @@ pub fn fix_unnecessary_generator_set(
parent: Option<&rustpython_parser::ast::Expr>,
) -> Result<Edit> {
// Expr(Call(GeneratorExp)))) -> Expr(SetComp)))
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -124,11 +120,7 @@ pub fn fix_unnecessary_generator_set(
}
}
Ok(Edit::replacement(
content,
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(content, expr.range()))
}
/// (C402) Convert `dict((x, x) for x in range(3))` to `{x: x for x in
@ -139,7 +131,7 @@ pub fn fix_unnecessary_generator_dict(
expr: &rustpython_parser::ast::Expr,
parent: Option<&rustpython_parser::ast::Expr>,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -198,11 +190,7 @@ pub fn fix_unnecessary_generator_dict(
}
}
Ok(Edit::replacement(
content,
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(content, expr.range()))
}
/// (C403) Convert `set([x for x in y])` to `{x for x in y}`.
@ -213,7 +201,7 @@ pub fn fix_unnecessary_list_comprehension_set(
) -> Result<Edit> {
// Expr(Call(ListComp)))) ->
// Expr(SetComp)))
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -243,11 +231,7 @@ pub fn fix_unnecessary_list_comprehension_set(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C404) Convert `dict([(i, i) for i in range(3)])` to `{i: i for i in
@ -257,7 +241,7 @@ pub fn fix_unnecessary_list_comprehension_dict(
stylist: &Stylist,
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -299,11 +283,7 @@ pub fn fix_unnecessary_list_comprehension_dict(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// Drop a trailing comma from a list of tuple elements.
@ -356,7 +336,7 @@ pub fn fix_unnecessary_literal_set(
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
// Expr(Call(List|Tuple)))) -> Expr(Set)))
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let mut call = match_call(body)?;
@ -393,11 +373,7 @@ pub fn fix_unnecessary_literal_set(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C406) Convert `dict([(1, 2)])` to `{1: 2}`.
@ -407,7 +383,7 @@ pub fn fix_unnecessary_literal_dict(
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
// Expr(Call(List|Tuple)))) -> Expr(Dict)))
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -466,11 +442,7 @@ pub fn fix_unnecessary_literal_dict(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C408)
@ -480,7 +452,7 @@ pub fn fix_unnecessary_collection_call(
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
// Expr(Call("list" | "tuple" | "dict")))) -> Expr(List|Tuple|Dict)
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -582,11 +554,7 @@ pub fn fix_unnecessary_collection_call(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C409) Convert `tuple([1, 2])` to `tuple(1, 2)`
@ -595,7 +563,7 @@ pub fn fix_unnecessary_literal_within_tuple_call(
stylist: &Stylist,
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -641,11 +609,7 @@ pub fn fix_unnecessary_literal_within_tuple_call(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C410) Convert `list([1, 2])` to `[1, 2]`
@ -654,7 +618,7 @@ pub fn fix_unnecessary_literal_within_list_call(
stylist: &Stylist,
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -702,11 +666,7 @@ pub fn fix_unnecessary_literal_within_list_call(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C411) Convert `list([i * i for i in x])` to `[i * i for i in x]`.
@ -716,7 +676,7 @@ pub fn fix_unnecessary_list_call(
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
// Expr(Call(List|Tuple)))) -> Expr(List|Tuple)))
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -731,11 +691,7 @@ pub fn fix_unnecessary_list_call(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C413) Convert `list(sorted([2, 3, 1]))` to `sorted([2, 3, 1])`.
@ -746,7 +702,7 @@ pub fn fix_unnecessary_call_around_sorted(
stylist: &Stylist,
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let outer_call = match_call(body)?;
@ -860,11 +816,7 @@ pub fn fix_unnecessary_call_around_sorted(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C414) Convert `sorted(list(foo))` to `sorted(foo)`
@ -873,7 +825,7 @@ pub fn fix_unnecessary_double_cast_or_process(
stylist: &Stylist,
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let body = match_expr(&mut tree)?;
let mut outer_call = match_call(body)?;
@ -901,11 +853,7 @@ pub fn fix_unnecessary_double_cast_or_process(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C416) Convert `[i for i in x]` to `list(x)`.
@ -914,7 +862,7 @@ pub fn fix_unnecessary_comprehension(
stylist: &Stylist,
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
@ -997,11 +945,7 @@ pub fn fix_unnecessary_comprehension(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C417) Convert `map(lambda x: x * 2, bar)` to `(x * 2 for x in bar)`.
@ -1012,7 +956,7 @@ pub fn fix_unnecessary_map(
parent: Option<&rustpython_parser::ast::Expr>,
kind: &str,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -1164,11 +1108,7 @@ pub fn fix_unnecessary_map(
}
}
Ok(Edit::replacement(
content,
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(content, expr.range()))
} else {
bail!("Should have two arguments");
}
@ -1180,7 +1120,7 @@ pub fn fix_unnecessary_literal_within_dict_call(
stylist: &Stylist,
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let mut body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -1195,11 +1135,7 @@ pub fn fix_unnecessary_literal_within_dict_call(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}
/// (C419) Convert `[i for i in a]` into `i for i in a`
@ -1209,7 +1145,7 @@ pub fn fix_unnecessary_comprehension_any_all(
expr: &rustpython_parser::ast::Expr,
) -> Result<Edit> {
// Expr(ListComp) -> Expr(GeneratorExp)
let module_text = locator.slice(expr);
let module_text = locator.slice(expr.range());
let mut tree = match_module(module_text)?;
let body = match_expr(&mut tree)?;
let call = match_call(body)?;
@ -1239,9 +1175,5 @@ pub fn fix_unnecessary_comprehension_any_all(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
expr.location,
expr.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), expr.range()))
}

View file

@ -1,12 +1,10 @@
use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
use crate::rules::flake8_comprehensions::fixes;
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation};
use super::helpers;
@ -83,7 +81,7 @@ pub fn unnecessary_call_around_sorted(
UnnecessaryCallAroundSorted {
func: outer.to_string(),
},
Range::from(expr),
expr.range(),
);
if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| {

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, Keyword};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
@ -87,7 +86,7 @@ pub fn unnecessary_collection_call(
UnnecessaryCollectionCall {
obj_type: id.to_string(),
},
Range::from(expr),
expr.range(),
);
if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| {

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Comprehension, Expr, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
@ -64,7 +63,7 @@ fn add_diagnostic(checker: &mut Checker, expr: &Expr) {
UnnecessaryComprehension {
obj_type: id.to_string(),
},
Range::from(expr),
expr.range(),
);
if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| {

View file

@ -4,7 +4,6 @@ use ruff_diagnostics::AlwaysAutofixableViolation;
use ruff_diagnostics::Diagnostic;
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::any_over_expr;
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
@ -78,7 +77,7 @@ pub fn unnecessary_comprehension_any_all(
if !checker.ctx.is_builtin(id) {
return;
}
let mut diagnostic = Diagnostic::new(UnnecessaryComprehensionAnyAll, Range::from(&args[0]));
let mut diagnostic = Diagnostic::new(UnnecessaryComprehensionAnyAll, args[0].range());
if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| {
fixes::fix_unnecessary_comprehension_any_all(checker.locator, checker.stylist, expr)

View file

@ -2,7 +2,6 @@ use rustpython_parser::ast::{Expr, ExprKind};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::AsRule;
@ -108,7 +107,7 @@ pub fn unnecessary_double_cast_or_process(
inner: inner.to_string(),
outer: outer.to_string(),
},
Range::from(expr),
expr.range(),
);
if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| {

Some files were not shown because too many files have changed in this diff Show more