Rename ruff crate to ruff_linter (#7529)

This commit is contained in:
Charlie Marsh 2023-09-20 02:38:27 -04:00 committed by GitHub
parent dcbd8eacd8
commit 5849a75223
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4397 changed files with 93921 additions and 93915 deletions

View file

@ -0,0 +1,166 @@
//! Interface for editing code snippets. These functions take statements or expressions as input,
//! and return the modified code snippet as output.
use anyhow::{bail, Result};
use libcst_native::{
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
};
use ruff_python_ast::Stmt;
use ruff_python_codegen::Stylist;
use ruff_source_file::Locator;
use crate::cst::helpers::compose_module_path;
use crate::cst::matchers::match_statement;
/// Glue code to make libcst codegen work with ruff's Stylist
pub(crate) trait CodegenStylist<'a>: Codegen<'a> {
fn codegen_stylist(&self, stylist: &'a Stylist) -> String;
}
impl<'a, T: Codegen<'a>> CodegenStylist<'a> for T {
fn codegen_stylist(&self, stylist: &'a Stylist) -> String {
let mut state = CodegenState {
default_newline: stylist.line_ending().as_str(),
default_indent: stylist.indentation(),
..Default::default()
};
self.codegen(&mut state);
state.to_string()
}
}
/// Given an import statement, remove any imports that are specified in the `imports` iterator.
///
/// Returns `Ok(None)` if the statement is empty after removing the imports.
pub(crate) fn remove_imports<'a>(
member_names: impl Iterator<Item = &'a str>,
stmt: &Stmt,
locator: &Locator,
stylist: &Stylist,
) -> Result<Option<String>> {
let module_text = locator.slice(stmt);
let mut tree = match_statement(module_text)?;
let Statement::Simple(body) = &mut tree else {
bail!("Expected Statement::Simple");
};
let aliases = match body.body.first_mut() {
Some(SmallStatement::Import(import_body)) => &mut import_body.names,
Some(SmallStatement::ImportFrom(import_body)) => {
if let ImportNames::Aliases(names) = &mut import_body.names {
names
} else if let ImportNames::Star(..) = &import_body.names {
// Special-case: if the import is a `from ... import *`, then we delete the
// entire statement.
let mut found_star = false;
for member in member_names {
if member == "*" {
found_star = true;
} else {
bail!("Expected \"*\" for unused import (got: \"{}\")", member);
}
}
if !found_star {
bail!("Expected \'*\' for unused import");
}
return Ok(None);
} else {
bail!("Expected: ImportNames::Aliases | ImportNames::Star");
}
}
_ => bail!("Expected: SmallStatement::ImportFrom | SmallStatement::Import"),
};
// Preserve the trailing comma (or not) from the last entry.
let trailing_comma = aliases.last().and_then(|alias| alias.comma.clone());
for member in member_names {
let alias_index = aliases
.iter()
.position(|alias| member == compose_module_path(&alias.name));
if let Some(index) = alias_index {
aliases.remove(index);
}
}
// But avoid destroying any trailing comments.
if let Some(alias) = aliases.last_mut() {
let has_comment = if let Some(comma) = &alias.comma {
match &comma.whitespace_after {
ParenthesizableWhitespace::SimpleWhitespace(_) => false,
ParenthesizableWhitespace::ParenthesizedWhitespace(whitespace) => {
whitespace.first_line.comment.is_some()
}
}
} else {
false
};
if !has_comment {
alias.comma = trailing_comma;
}
}
if aliases.is_empty() {
return Ok(None);
}
Ok(Some(tree.codegen_stylist(stylist)))
}
/// Given an import statement, remove any imports that are not specified in the `imports` slice.
///
/// Returns the modified import statement.
pub(crate) fn retain_imports(
member_names: &[&str],
stmt: &Stmt,
locator: &Locator,
stylist: &Stylist,
) -> Result<String> {
let module_text = locator.slice(stmt);
let mut tree = match_statement(module_text)?;
let Statement::Simple(body) = &mut tree else {
bail!("Expected Statement::Simple");
};
let aliases = match body.body.first_mut() {
Some(SmallStatement::Import(import_body)) => &mut import_body.names,
Some(SmallStatement::ImportFrom(import_body)) => {
if let ImportNames::Aliases(names) = &mut import_body.names {
names
} else {
bail!("Expected: ImportNames::Aliases");
}
}
_ => bail!("Expected: SmallStatement::ImportFrom | SmallStatement::Import"),
};
// Preserve the trailing comma (or not) from the last entry.
let trailing_comma = aliases.last().and_then(|alias| alias.comma.clone());
aliases.retain(|alias| {
member_names
.iter()
.any(|member| *member == compose_module_path(&alias.name))
});
// But avoid destroying any trailing comments.
if let Some(alias) = aliases.last_mut() {
let has_comment = if let Some(comma) = &alias.comma {
match &comma.whitespace_after {
ParenthesizableWhitespace::SimpleWhitespace(_) => false,
ParenthesizableWhitespace::ParenthesizedWhitespace(whitespace) => {
whitespace.first_line.comment.is_some()
}
}
} else {
false
};
if !has_comment {
alias.comma = trailing_comma;
}
}
Ok(tree.codegen_stylist(stylist))
}

View file

@ -0,0 +1,367 @@
//! Interface for generating autofix edits from higher-level actions (e.g., "remove an argument").
use anyhow::{Context, Result};
use ruff_diagnostics::Edit;
use ruff_python_ast::{self as ast, Arguments, ExceptHandler, Stmt};
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_trivia::{
has_leading_content, is_python_whitespace, PythonWhitespace, SimpleTokenKind, SimpleTokenizer,
};
use ruff_source_file::{Locator, NewlineWithTrailingNewline};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::autofix::codemods;
/// Return the `Fix` to use when deleting a `Stmt`.
///
/// In some cases, this is as simple as deleting the `Range` of the `Stmt`
/// itself. However, there are a few exceptions:
/// - If the `Stmt` is _not_ the terminal statement in a multi-statement line,
/// we need to delete up to the start of the next statement (and avoid
/// deleting any content that precedes the statement).
/// - If the `Stmt` is the terminal statement in a multi-statement line, we need
/// to avoid deleting any content that precedes the statement.
/// - If the `Stmt` has no trailing and leading content, then it's convenient to
/// remove the entire start and end lines.
/// - If the `Stmt` is the last statement in its parent body, replace it with a
/// `pass` instead.
pub(crate) fn delete_stmt(
stmt: &Stmt,
parent: Option<&Stmt>,
locator: &Locator,
indexer: &Indexer,
) -> Edit {
if parent
.map(|parent| is_lone_child(stmt, parent))
.unwrap_or_default()
{
// If removing this node would lead to an invalid syntax tree, replace
// it with a `pass`.
Edit::range_replacement("pass".to_string(), stmt.range())
} else {
if let Some(semicolon) = trailing_semicolon(stmt.end(), locator) {
let next = next_stmt_break(semicolon, locator);
Edit::deletion(stmt.start(), next)
} else if has_leading_content(stmt.start(), locator) {
Edit::range_deletion(stmt.range())
} else if let Some(start) = indexer.preceded_by_continuations(stmt.start(), locator) {
Edit::deletion(start, stmt.end())
} else {
let range = locator.full_lines_range(stmt.range());
Edit::range_deletion(range)
}
}
}
/// Generate a `Fix` to remove the specified imports from an `import` statement.
pub(crate) fn remove_unused_imports<'a>(
member_names: impl Iterator<Item = &'a str>,
stmt: &Stmt,
parent: Option<&Stmt>,
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
) -> Result<Edit> {
match codemods::remove_imports(member_names, stmt, locator, stylist)? {
None => Ok(delete_stmt(stmt, parent, locator, indexer)),
Some(content) => Ok(Edit::range_replacement(content, stmt.range())),
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum Parentheses {
/// Remove parentheses, if the removed argument is the only argument left.
Remove,
/// Preserve parentheses, even if the removed argument is the only argument
Preserve,
}
/// Generic function to remove arguments or keyword arguments in function
/// calls and class definitions. (For classes `args` should be considered
/// `bases`)
///
/// Supports the removal of parentheses when this is the only (kw)arg left.
/// For this behavior, set `remove_parentheses` to `true`.
pub(crate) fn remove_argument<T: Ranged>(
argument: &T,
arguments: &Arguments,
parentheses: Parentheses,
source: &str,
) -> Result<Edit> {
// Partition into arguments before and after the argument to remove.
let (before, after): (Vec<_>, Vec<_>) = arguments
.arguments_source_order()
.map(|arg| arg.range())
.filter(|range| argument.range() != *range)
.partition(|range| range.start() < argument.start());
if !after.is_empty() {
// Case 1: argument or keyword is _not_ the last node, so delete from the start of the
// argument to the end of the subsequent comma.
let mut tokenizer = SimpleTokenizer::starts_at(argument.end(), source);
// Find the trailing comma.
tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
// Find the next non-whitespace token.
let next = tokenizer
.find(|token| {
token.kind != SimpleTokenKind::Whitespace && token.kind != SimpleTokenKind::Newline
})
.context("Unable to find next token")?;
Ok(Edit::deletion(argument.start(), next.start()))
} else if let Some(previous) = before.iter().map(Ranged::end).max() {
// Case 2: argument or keyword is the last node, so delete from the start of the
// previous comma to the end of the argument.
let mut tokenizer = SimpleTokenizer::starts_at(previous, source);
// Find the trailing comma.
let comma = tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
Ok(Edit::deletion(comma.start(), argument.end()))
} else {
// Case 3: argument or keyword is the only node, so delete the arguments (but preserve
// parentheses, if needed).
Ok(match parentheses {
Parentheses::Remove => Edit::range_deletion(arguments.range()),
Parentheses::Preserve => Edit::range_replacement("()".to_string(), arguments.range()),
})
}
}
/// Determine if a vector contains only one, specific element.
fn is_only<T: PartialEq>(vec: &[T], value: &T) -> bool {
vec.len() == 1 && vec[0] == *value
}
/// Determine if a child is the only statement in its body.
fn is_lone_child(child: &Stmt, parent: &Stmt) -> bool {
match parent {
Stmt::FunctionDef(ast::StmtFunctionDef { body, .. })
| Stmt::ClassDef(ast::StmtClassDef { body, .. })
| Stmt::With(ast::StmtWith { body, .. }) => {
if is_only(body, child) {
return true;
}
}
Stmt::For(ast::StmtFor { body, orelse, .. })
| Stmt::While(ast::StmtWhile { body, orelse, .. }) => {
if is_only(body, child) || is_only(orelse, child) {
return true;
}
}
Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
..
}) => {
if is_only(body, child)
|| elif_else_clauses
.iter()
.any(|ast::ElifElseClause { body, .. }| is_only(body, child))
{
return true;
}
}
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
..
}) => {
if is_only(body, child)
|| is_only(orelse, child)
|| is_only(finalbody, child)
|| handlers.iter().any(|handler| match handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler {
body, ..
}) => is_only(body, child),
})
{
return true;
}
}
Stmt::Match(ast::StmtMatch { cases, .. }) => {
if cases.iter().any(|case| is_only(&case.body, child)) {
return true;
}
}
_ => {}
}
false
}
/// Return the location of a trailing semicolon following a `Stmt`, if it's part
/// of a multi-statement line.
fn trailing_semicolon(offset: TextSize, locator: &Locator) -> Option<TextSize> {
let contents = locator.after(offset);
for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim_whitespace_start();
if trimmed.starts_with(';') {
let colon_offset = line.text_len() - trimmed.text_len();
return Some(offset + line.start() + colon_offset);
}
if !trimmed.starts_with('\\') {
break;
}
}
None
}
/// Find the next valid break for a `Stmt` after a semicolon.
fn next_stmt_break(semicolon: TextSize, locator: &Locator) -> TextSize {
let start_location = semicolon + TextSize::from(1);
for line in
NewlineWithTrailingNewline::with_offset(locator.after(start_location), start_location)
{
let trimmed = line.trim_whitespace();
// Skip past any continuations.
if trimmed.starts_with('\\') {
continue;
}
return if trimmed.is_empty() {
// If the line is empty, then despite the previous statement ending in a
// semicolon, we know that it's not a multi-statement line.
line.start()
} else {
// Otherwise, find the start of the next statement. (Or, anything that isn't
// whitespace.)
let relative_offset = line.find(|c: char| !is_python_whitespace(c)).unwrap();
line.start() + TextSize::try_from(relative_offset).unwrap()
};
}
locator.line_end(start_location)
}
/// Add leading whitespace to a snippet, if it's immediately preceded an identifier or keyword.
pub(crate) fn pad_start(mut content: String, start: TextSize, locator: &Locator) -> String {
// Ex) When converting `except(ValueError,)` from a tuple to a single argument, we need to
// insert a space before the fix, to achieve `except ValueError`.
if locator
.up_to(start)
.chars()
.last()
.is_some_and(|char| char.is_ascii_alphabetic())
{
content.insert(0, ' ');
}
content
}
/// Add trailing whitespace to a snippet, if it's immediately followed by an identifier or keyword.
pub(crate) fn pad_end(mut content: String, end: TextSize, locator: &Locator) -> String {
if locator
.after(end)
.chars()
.next()
.is_some_and(|char| char.is_ascii_alphabetic())
{
content.push(' ');
}
content
}
/// Add leading or trailing whitespace to a snippet, if it's immediately preceded or followed by
/// an identifier or keyword.
pub(crate) fn pad(content: String, range: TextRange, locator: &Locator) -> String {
pad_start(
pad_end(content, range.end(), locator),
range.start(),
locator,
)
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_python_parser::parse_suite;
use ruff_source_file::Locator;
use ruff_text_size::{Ranged, TextSize};
use crate::autofix::edits::{next_stmt_break, trailing_semicolon};
#[test]
fn find_semicolon() -> Result<()> {
let contents = "x = 1";
let program = parse_suite(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(trailing_semicolon(stmt.end(), &locator), None);
let contents = "x = 1; y = 1";
let program = parse_suite(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt.end(), &locator),
Some(TextSize::from(5))
);
let contents = "x = 1 ; y = 1";
let program = parse_suite(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt.end(), &locator),
Some(TextSize::from(6))
);
let contents = r"
x = 1 \
; y = 1
"
.trim();
let program = parse_suite(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt.end(), &locator),
Some(TextSize::from(10))
);
Ok(())
}
#[test]
fn find_next_stmt_break() {
let contents = "x = 1; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(TextSize::from(4), &locator),
TextSize::from(5)
);
let contents = "x = 1 ; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(TextSize::from(5), &locator),
TextSize::from(6)
);
let contents = r"
x = 1 \
; y = 1
"
.trim();
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(TextSize::from(10), &locator),
TextSize::from(12)
);
}
}

View file

@ -0,0 +1,357 @@
use itertools::Itertools;
use std::collections::BTreeSet;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use rustc_hash::{FxHashMap, FxHashSet};
use ruff_diagnostics::{Diagnostic, Edit, Fix, IsolationLevel, SourceMap};
use ruff_source_file::Locator;
use crate::linter::FixTable;
use crate::registry::{AsRule, Rule};
pub(crate) mod codemods;
pub(crate) mod edits;
pub(crate) mod snippet;
pub(crate) struct FixResult {
/// The resulting source code, after applying all fixes.
pub(crate) code: String,
/// The number of fixes applied for each [`Rule`].
pub(crate) fixes: FixTable,
/// Source map for the fixed source code.
pub(crate) source_map: SourceMap,
}
/// Auto-fix errors in a file, and write the fixed source code to disk.
pub(crate) fn fix_file(diagnostics: &[Diagnostic], locator: &Locator) -> Option<FixResult> {
let mut with_fixes = diagnostics
.iter()
.filter(|diag| diag.fix.is_some())
.peekable();
if with_fixes.peek().is_none() {
None
} else {
Some(apply_fixes(with_fixes, locator))
}
}
/// Apply a series of fixes.
fn apply_fixes<'a>(
diagnostics: impl Iterator<Item = &'a Diagnostic>,
locator: &'a Locator<'a>,
) -> FixResult {
let mut output = String::with_capacity(locator.len());
let mut last_pos: Option<TextSize> = None;
let mut applied: BTreeSet<&Edit> = BTreeSet::default();
let mut isolated: FxHashSet<u32> = FxHashSet::default();
let mut fixed = FxHashMap::default();
let mut source_map = SourceMap::default();
for (rule, fix) in diagnostics
.filter_map(|diagnostic| {
diagnostic
.fix
.as_ref()
.map(|fix| (diagnostic.kind.rule(), fix))
})
.sorted_by(|(rule1, fix1), (rule2, fix2)| cmp_fix(*rule1, *rule2, fix1, fix2))
{
let mut edits = fix
.edits()
.iter()
.filter(|edit| !applied.contains(edit))
.peekable();
// If the fix contains at least one new edit, enforce isolation and positional requirements.
if let Some(first) = edits.peek() {
// If this fix requires isolation, and we've already applied another fix in the
// same isolation group, skip it.
if let IsolationLevel::Group(id) = fix.isolation() {
if !isolated.insert(id) {
continue;
}
}
// If this fix overlaps with a fix we've already applied, skip it.
if last_pos.is_some_and(|last_pos| last_pos >= first.start()) {
continue;
}
}
let mut applied_edits = Vec::with_capacity(fix.edits().len());
for edit in edits {
// Add all contents from `last_pos` to `fix.location`.
let slice = locator.slice(TextRange::new(last_pos.unwrap_or_default(), edit.start()));
output.push_str(slice);
// Add the start source marker for the patch.
source_map.push_start_marker(edit, output.text_len());
// Add the patch itself.
output.push_str(edit.content().unwrap_or_default());
// Add the end source marker for the added patch.
source_map.push_end_marker(edit, output.text_len());
// Track that the edit was applied.
last_pos = Some(edit.end());
applied_edits.push(edit);
}
applied.extend(applied_edits.drain(..));
*fixed.entry(rule).or_default() += 1;
}
// Add the remaining content.
let slice = locator.after(last_pos.unwrap_or_default());
output.push_str(slice);
FixResult {
code: output,
fixes: fixed,
source_map,
}
}
/// Compare two fixes.
fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
fix1.min_start()
.cmp(&fix2.min_start())
.then_with(|| match (&rule1, &rule2) {
// Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes.
(Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less,
(Rule::NewLineAfterLastParagraph, Rule::EndsInPeriod) => std::cmp::Ordering::Greater,
// Apply `IfElseBlockInsteadOfDictGet` fixes before `IfElseBlockInsteadOfIfExp` fixes.
(Rule::IfElseBlockInsteadOfDictGet, Rule::IfElseBlockInsteadOfIfExp) => {
std::cmp::Ordering::Less
}
(Rule::IfElseBlockInsteadOfIfExp, Rule::IfElseBlockInsteadOfDictGet) => {
std::cmp::Ordering::Greater
}
_ => std::cmp::Ordering::Equal,
})
}
#[cfg(test)]
mod tests {
use ruff_text_size::{Ranged, TextSize};
use ruff_diagnostics::{Diagnostic, Edit, Fix, SourceMarker};
use ruff_source_file::Locator;
use crate::autofix::{apply_fixes, FixResult};
use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile;
#[allow(deprecated)]
fn create_diagnostics(edit: impl IntoIterator<Item = Edit>) -> Vec<Diagnostic> {
edit.into_iter()
.map(|edit| Diagnostic {
// The choice of rule here is arbitrary.
kind: MissingNewlineAtEndOfFile.into(),
range: edit.range(),
fix: Some(Fix::unspecified(edit)),
parent: None,
})
.collect()
}
#[test]
fn empty_file() {
let locator = Locator::new(r#""#);
let diagnostics = create_diagnostics([]);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(code, "");
assert_eq!(fixes.values().sum::<usize>(), 0);
assert!(source_map.markers().is_empty());
}
#[test]
fn apply_one_insertion() {
let locator = Locator::new(
r#"
import os
print("hello world")
"#
.trim(),
);
let diagnostics = create_diagnostics([Edit::insertion(
"import sys\n".to_string(),
TextSize::new(10),
)]);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r#"
import os
import sys
print("hello world")
"#
.trim()
);
assert_eq!(fixes.values().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(10.into(), 10.into(),),
SourceMarker::new(10.into(), 21.into(),),
]
);
}
#[test]
fn apply_one_replacement() {
let locator = Locator::new(
r#"
class A(object):
...
"#
.trim(),
);
let diagnostics = create_diagnostics([Edit::replacement(
"Bar".to_string(),
TextSize::new(8),
TextSize::new(14),
)]);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r#"
class A(Bar):
...
"#
.trim(),
);
assert_eq!(fixes.values().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(8.into(), 8.into(),),
SourceMarker::new(14.into(), 11.into(),),
]
);
}
#[test]
fn apply_one_removal() {
let locator = Locator::new(
r#"
class A(object):
...
"#
.trim(),
);
let diagnostics = create_diagnostics([Edit::deletion(TextSize::new(7), TextSize::new(15))]);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r#"
class A:
...
"#
.trim()
);
assert_eq!(fixes.values().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(7.into(), 7.into()),
SourceMarker::new(15.into(), 7.into()),
]
);
}
#[test]
fn apply_two_removals() {
let locator = Locator::new(
r#"
class A(object, object, object):
...
"#
.trim(),
);
let diagnostics = create_diagnostics([
Edit::deletion(TextSize::from(8), TextSize::from(16)),
Edit::deletion(TextSize::from(22), TextSize::from(30)),
]);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r#"
class A(object):
...
"#
.trim()
);
assert_eq!(fixes.values().sum::<usize>(), 2);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(8.into(), 8.into()),
SourceMarker::new(16.into(), 8.into()),
SourceMarker::new(22.into(), 14.into(),),
SourceMarker::new(30.into(), 14.into(),),
]
);
}
#[test]
fn ignore_overlapping_fixes() {
let locator = Locator::new(
r#"
class A(object):
...
"#
.trim(),
);
let diagnostics = create_diagnostics([
Edit::deletion(TextSize::from(7), TextSize::from(15)),
Edit::replacement("ignored".to_string(), TextSize::from(9), TextSize::from(11)),
]);
let FixResult {
code,
fixes,
source_map,
} = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
code,
r#"
class A:
...
"#
.trim(),
);
assert_eq!(fixes.values().sum::<usize>(), 1);
assert_eq!(
source_map.markers(),
&[
SourceMarker::new(7.into(), 7.into(),),
SourceMarker::new(15.into(), 7.into(),),
]
);
}
}

View file

@ -0,0 +1,40 @@
use unicode_width::UnicodeWidthStr;
/// A snippet of source code for user-facing display, as in a diagnostic.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct SourceCodeSnippet(String);
impl SourceCodeSnippet {
pub(crate) fn new(source_code: String) -> Self {
Self(source_code)
}
pub(crate) fn from_str(source_code: &str) -> Self {
Self(source_code.to_string())
}
/// Return the full snippet for user-facing display, or `None` if the snippet should be
/// truncated.
pub(crate) fn full_display(&self) -> Option<&str> {
if Self::should_truncate(&self.0) {
None
} else {
Some(&self.0)
}
}
/// Return a truncated snippet for user-facing display.
pub(crate) fn truncated_display(&self) -> &str {
if Self::should_truncate(&self.0) {
"..."
} else {
&self.0
}
}
/// Returns `true` if the source code should be truncated when included in a user-facing
/// diagnostic.
fn should_truncate(source_code: &str) -> bool {
source_code.width() > 50 || source_code.contains(['\r', '\n'])
}
}

View file

@ -0,0 +1,74 @@
use ruff_diagnostics::{Diagnostic, Fix};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_import_conventions, flake8_pyi, pyflakes, pylint};
/// Run lint rules over the [`Binding`]s.
pub(crate) fn bindings(checker: &mut Checker) {
if !checker.any_enabled(&[
Rule::InvalidAllFormat,
Rule::InvalidAllObject,
Rule::UnaliasedCollectionsAbcSetImport,
Rule::UnconventionalImportAlias,
Rule::UnusedVariable,
]) {
return;
}
for binding in &*checker.semantic.bindings {
if checker.enabled(Rule::UnusedVariable) {
if binding.kind.is_bound_exception()
&& !binding.is_used()
&& !checker
.settings
.dummy_variable_rgx
.is_match(binding.name(checker.locator))
{
let mut diagnostic = Diagnostic::new(
pyflakes::rules::UnusedVariable {
name: binding.name(checker.locator).to_string(),
},
binding.range(),
);
if checker.patch(Rule::UnusedVariable) {
diagnostic.try_set_fix(|| {
pyflakes::fixes::remove_exception_handler_assignment(
binding,
checker.locator,
)
.map(Fix::automatic)
});
}
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::InvalidAllFormat) {
if let Some(diagnostic) = pylint::rules::invalid_all_format(binding) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::InvalidAllObject) {
if let Some(diagnostic) = pylint::rules::invalid_all_object(binding) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::UnconventionalImportAlias) {
if let Some(diagnostic) = flake8_import_conventions::rules::unconventional_import_alias(
checker,
binding,
&checker.settings.flake8_import_conventions.aliases,
) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::UnaliasedCollectionsAbcSetImport) {
if let Some(diagnostic) =
flake8_pyi::rules::unaliased_collections_abc_set_import(checker, binding)
{
checker.diagnostics.push(diagnostic);
}
}
}
}

View file

@ -0,0 +1,12 @@
use ruff_python_ast::Comprehension;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::flake8_simplify;
/// Run lint rules over a [`Comprehension`] syntax nodes.
pub(crate) fn comprehension(comprehension: &Comprehension, checker: &mut Checker) {
if checker.enabled(Rule::InDictKeys) {
flake8_simplify::rules::key_in_dict_comprehension(checker, comprehension);
}
}

View file

@ -0,0 +1,32 @@
use ruff_python_ast::Stmt;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_bugbear, perflint, pyupgrade, refurb};
/// Run lint rules over all deferred for-loops in the [`SemanticModel`].
pub(crate) fn deferred_for_loops(checker: &mut Checker) {
while !checker.deferred.for_loops.is_empty() {
let for_loops = std::mem::take(&mut checker.deferred.for_loops);
for snapshot in for_loops {
checker.semantic.restore(snapshot);
let Stmt::For(stmt_for) = checker.semantic.current_statement() else {
unreachable!("Expected Stmt::For");
};
if checker.enabled(Rule::UnusedLoopControlVariable) {
flake8_bugbear::rules::unused_loop_control_variable(checker, stmt_for);
}
if checker.enabled(Rule::IncorrectDictIterator) {
perflint::rules::incorrect_dict_iterator(checker, stmt_for);
}
if checker.enabled(Rule::YieldInForLoop) {
pyupgrade::rules::yield_in_for_loop(checker, stmt_for);
}
if checker.enabled(Rule::UnnecessaryEnumerate) {
refurb::rules::unnecessary_enumerate(checker, stmt_for);
}
}
}
}

View file

@ -0,0 +1,314 @@
use ruff_diagnostics::Diagnostic;
use ruff_python_semantic::analyze::visibility;
use ruff_python_semantic::{Binding, BindingKind, ScopeKind};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_pyi, flake8_type_checking, flake8_unused_arguments, pyflakes, pylint};
/// Run lint rules over all deferred scopes in the [`SemanticModel`].
pub(crate) fn deferred_scopes(checker: &mut Checker) {
if !checker.any_enabled(&[
Rule::GlobalVariableNotAssigned,
Rule::ImportShadowedByLoopVar,
Rule::RedefinedWhileUnused,
Rule::RuntimeImportInTypeCheckingBlock,
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyStandardLibraryImport,
Rule::TypingOnlyThirdPartyImport,
Rule::UndefinedLocal,
Rule::UnusedAnnotation,
Rule::UnusedClassMethodArgument,
Rule::UnusedFunctionArgument,
Rule::UnusedImport,
Rule::UnusedLambdaArgument,
Rule::UnusedMethodArgument,
Rule::UnusedPrivateProtocol,
Rule::UnusedPrivateTypeAlias,
Rule::UnusedPrivateTypeVar,
Rule::UnusedPrivateTypedDict,
Rule::UnusedStaticMethodArgument,
Rule::UnusedVariable,
Rule::NoSelfUse,
]) {
return;
}
// Identify any valid runtime imports. If a module is imported at runtime, and
// used at runtime, then by default, we avoid flagging any other
// imports from that model as typing-only.
let enforce_typing_imports = !checker.source_type.is_stub()
&& checker.any_enabled(&[
Rule::RuntimeImportInTypeCheckingBlock,
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyStandardLibraryImport,
Rule::TypingOnlyThirdPartyImport,
]);
let runtime_imports: Vec<Vec<&Binding>> = if enforce_typing_imports {
checker
.semantic
.scopes
.iter()
.map(|scope| {
scope
.binding_ids()
.map(|binding_id| checker.semantic.binding(binding_id))
.filter(|binding| {
flake8_type_checking::helpers::is_valid_runtime_import(
binding,
&checker.semantic,
)
})
.collect()
})
.collect::<Vec<_>>()
} else {
vec![]
};
let mut diagnostics: Vec<Diagnostic> = vec![];
for scope_id in checker.deferred.scopes.iter().rev().copied() {
let scope = &checker.semantic.scopes[scope_id];
if checker.enabled(Rule::UndefinedLocal) {
pyflakes::rules::undefined_local(checker, scope_id, scope, &mut diagnostics);
}
if checker.enabled(Rule::GlobalVariableNotAssigned) {
for (name, binding_id) in scope.bindings() {
let binding = checker.semantic.binding(binding_id);
if binding.kind.is_global() {
diagnostics.push(Diagnostic::new(
pylint::rules::GlobalVariableNotAssigned {
name: (*name).to_string(),
},
binding.range(),
));
}
}
}
if checker.enabled(Rule::ImportShadowedByLoopVar) {
for (name, binding_id) in scope.bindings() {
for shadow in checker.semantic.shadowed_bindings(scope_id, binding_id) {
// If the shadowing binding isn't a loop variable, abort.
let binding = &checker.semantic.bindings[shadow.binding_id()];
if !binding.kind.is_loop_var() {
continue;
}
// If the shadowed binding isn't an import, abort.
let shadowed = &checker.semantic.bindings[shadow.shadowed_id()];
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) {
continue;
}
// If the bindings are in different forks, abort.
if shadowed.source.map_or(true, |left| {
binding.source.map_or(true, |right| {
checker.semantic.different_branches(left, right)
})
}) {
continue;
}
#[allow(deprecated)]
let line = checker.locator.compute_line_index(shadowed.start());
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::ImportShadowedByLoopVar {
name: name.to_string(),
line,
},
binding.range(),
));
}
}
}
if checker.enabled(Rule::RedefinedWhileUnused) {
for (name, binding_id) in scope.bindings() {
for shadow in checker.semantic.shadowed_bindings(scope_id, binding_id) {
// If the shadowing binding is a loop variable, abort, to avoid overlap
// with F402.
let binding = &checker.semantic.bindings[shadow.binding_id()];
if binding.kind.is_loop_var() {
continue;
}
// If the shadowed binding is used, abort.
let shadowed = &checker.semantic.bindings[shadow.shadowed_id()];
if shadowed.is_used() {
continue;
}
// If the shadowing binding isn't considered a "redefinition" of the
// shadowed binding, abort.
if !binding.redefines(shadowed) {
continue;
}
if shadow.same_scope() {
// If the symbol is a dummy variable, abort, unless the shadowed
// binding is an import.
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) && checker.settings.dummy_variable_rgx.is_match(name)
{
continue;
}
let Some(node_id) = shadowed.source else {
continue;
};
// If this is an overloaded function, abort.
if shadowed.kind.is_function_definition() {
if checker
.semantic
.statement(node_id)
.as_function_def_stmt()
.is_some_and(|function| {
visibility::is_overload(
&function.decorator_list,
&checker.semantic,
)
})
{
continue;
}
}
} else {
// Only enforce cross-scope shadowing for imports.
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) {
continue;
}
}
// If the bindings are in different forks, abort.
if shadowed.source.map_or(true, |left| {
binding.source.map_or(true, |right| {
checker.semantic.different_branches(left, right)
})
}) {
continue;
}
#[allow(deprecated)]
let line = checker.locator.compute_line_index(shadowed.start());
let mut diagnostic = Diagnostic::new(
pyflakes::rules::RedefinedWhileUnused {
name: (*name).to_string(),
line,
},
binding.range(),
);
if let Some(range) = binding.parent_range(&checker.semantic) {
diagnostic.set_parent(range.start());
}
diagnostics.push(diagnostic);
}
}
}
if checker.enabled(Rule::UnusedPrivateTypeVar) {
flake8_pyi::rules::unused_private_type_var(checker, scope, &mut diagnostics);
}
if checker.enabled(Rule::UnusedPrivateProtocol) {
flake8_pyi::rules::unused_private_protocol(checker, scope, &mut diagnostics);
}
if checker.enabled(Rule::UnusedPrivateTypeAlias) {
flake8_pyi::rules::unused_private_type_alias(checker, scope, &mut diagnostics);
}
if checker.enabled(Rule::UnusedPrivateTypedDict) {
flake8_pyi::rules::unused_private_typed_dict(checker, scope, &mut diagnostics);
}
if matches!(scope.kind, ScopeKind::Function(_) | ScopeKind::Lambda(_)) {
if checker.enabled(Rule::UnusedVariable) {
pyflakes::rules::unused_variable(checker, scope, &mut diagnostics);
}
if checker.enabled(Rule::UnusedAnnotation) {
pyflakes::rules::unused_annotation(checker, scope, &mut diagnostics);
}
if !checker.source_type.is_stub() {
if checker.any_enabled(&[
Rule::UnusedClassMethodArgument,
Rule::UnusedFunctionArgument,
Rule::UnusedLambdaArgument,
Rule::UnusedMethodArgument,
Rule::UnusedStaticMethodArgument,
]) {
flake8_unused_arguments::rules::unused_arguments(
checker,
scope,
&mut diagnostics,
);
}
}
}
if matches!(scope.kind, ScopeKind::Function(_) | ScopeKind::Module) {
if enforce_typing_imports {
let runtime_imports: Vec<&Binding> = checker
.semantic
.scopes
.ancestor_ids(scope_id)
.flat_map(|scope_id| runtime_imports[scope_id.as_usize()].iter())
.copied()
.collect();
if checker.enabled(Rule::RuntimeImportInTypeCheckingBlock) {
flake8_type_checking::rules::runtime_import_in_type_checking_block(
checker,
scope,
&mut diagnostics,
);
}
if checker.any_enabled(&[
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyStandardLibraryImport,
Rule::TypingOnlyThirdPartyImport,
]) {
flake8_type_checking::rules::typing_only_runtime_import(
checker,
scope,
&runtime_imports,
&mut diagnostics,
);
}
}
if checker.enabled(Rule::UnusedImport) {
pyflakes::rules::unused_import(checker, scope, &mut diagnostics);
}
}
if scope.kind.is_function() {
if checker.enabled(Rule::NoSelfUse) {
pylint::rules::no_self_use(checker, scope, &mut diagnostics);
}
}
}
checker.diagnostics.extend(diagnostics);
}

View file

@ -0,0 +1,292 @@
use ruff_python_ast::str::raw_contents_range;
use ruff_text_size::{Ranged, TextRange};
use ruff_python_semantic::{BindingKind, ContextualizedDefinition, Export};
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::docstrings::Docstring;
use crate::fs::relativize_path;
use crate::rules::{flake8_annotations, flake8_pyi, pydocstyle};
use crate::{docstrings, warn_user};
/// Run lint rules over all [`Definition`] nodes in the [`SemanticModel`].
///
/// This phase is expected to run after the AST has been traversed in its entirety; as such,
/// it is expected that all [`Definition`] nodes have been visited by the time, and that this
/// method will not recurse into any other nodes.
pub(crate) fn definitions(checker: &mut Checker) {
let enforce_annotations = checker.any_enabled(&[
Rule::AnyType,
Rule::MissingReturnTypeClassMethod,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingTypeArgs,
Rule::MissingTypeCls,
Rule::MissingTypeFunctionArgument,
Rule::MissingTypeKwargs,
Rule::MissingTypeSelf,
]);
let enforce_stubs = checker.source_type.is_stub() && checker.enabled(Rule::DocstringInStub);
let enforce_stubs_and_runtime = checker.enabled(Rule::IterMethodReturnIterable);
let enforce_docstrings = checker.any_enabled(&[
Rule::BlankLineAfterLastSection,
Rule::BlankLineAfterSummary,
Rule::BlankLineBeforeClass,
Rule::BlankLinesBetweenHeaderAndContent,
Rule::CapitalizeSectionName,
Rule::DashedUnderlineAfterSection,
Rule::DocstringStartsWithThis,
Rule::EmptyDocstring,
Rule::EmptyDocstringSection,
Rule::EndsInPeriod,
Rule::EndsInPunctuation,
Rule::EscapeSequenceInDocstring,
Rule::FirstLineCapitalized,
Rule::FitsOnOneLine,
Rule::IndentWithSpaces,
Rule::MultiLineSummaryFirstLine,
Rule::MultiLineSummarySecondLine,
Rule::NewLineAfterLastParagraph,
Rule::NewLineAfterSectionName,
Rule::NoBlankLineAfterFunction,
Rule::NoBlankLineAfterSection,
Rule::NoBlankLineBeforeFunction,
Rule::NoBlankLineBeforeSection,
Rule::NoSignature,
Rule::NonImperativeMood,
Rule::OneBlankLineAfterClass,
Rule::OneBlankLineBeforeClass,
Rule::OverIndentation,
Rule::OverloadWithDocstring,
Rule::SectionNameEndsInColon,
Rule::SectionNotOverIndented,
Rule::SectionUnderlineAfterName,
Rule::SectionUnderlineMatchesSectionLength,
Rule::SectionUnderlineNotOverIndented,
Rule::SurroundingWhitespace,
Rule::TripleSingleQuotes,
Rule::UnderIndentation,
Rule::UndocumentedMagicMethod,
Rule::UndocumentedParam,
Rule::UndocumentedPublicClass,
Rule::UndocumentedPublicFunction,
Rule::UndocumentedPublicInit,
Rule::UndocumentedPublicMethod,
Rule::UndocumentedPublicModule,
Rule::UndocumentedPublicNestedClass,
Rule::UndocumentedPublicPackage,
]);
if !enforce_annotations && !enforce_docstrings && !enforce_stubs && !enforce_stubs_and_runtime {
return;
}
// Compute visibility of all definitions.
let exports: Option<Vec<&str>> = {
checker
.semantic
.global_scope()
.get_all("__all__")
.map(|binding_id| &checker.semantic.bindings[binding_id])
.filter_map(|binding| match &binding.kind {
BindingKind::Export(Export { names }) => Some(names.iter().copied()),
_ => None,
})
.fold(None, |acc, names| {
Some(acc.into_iter().flatten().chain(names).collect())
})
};
let definitions = std::mem::take(&mut checker.semantic.definitions);
let mut overloaded_name: Option<String> = None;
for ContextualizedDefinition {
definition,
visibility,
} in definitions.resolve(exports.as_deref()).iter()
{
let docstring = docstrings::extraction::extract_docstring(definition);
// flake8-annotations
if enforce_annotations {
// TODO(charlie): This should be even stricter, in that an overload
// implementation should come immediately after the overloaded
// interfaces, without any AST nodes in between. Right now, we
// only error when traversing definition boundaries (functions,
// classes, etc.).
if !overloaded_name.is_some_and(|overloaded_name| {
flake8_annotations::helpers::is_overload_impl(
definition,
&overloaded_name,
&checker.semantic,
)
}) {
checker
.diagnostics
.extend(flake8_annotations::rules::definition(
checker,
definition,
*visibility,
));
}
overloaded_name =
flake8_annotations::helpers::overloaded_name(definition, &checker.semantic);
}
// flake8-pyi
if enforce_stubs {
if checker.enabled(Rule::DocstringInStub) {
flake8_pyi::rules::docstring_in_stubs(checker, docstring);
}
}
if enforce_stubs_and_runtime {
if checker.enabled(Rule::IterMethodReturnIterable) {
flake8_pyi::rules::iter_method_return_iterable(checker, definition);
}
}
// pydocstyle
if enforce_docstrings {
if pydocstyle::helpers::should_ignore_definition(
definition,
&checker.settings.pydocstyle.ignore_decorators,
&checker.semantic,
) {
continue;
}
// Extract a `Docstring` from a `Definition`.
let Some(expr) = docstring else {
pydocstyle::rules::not_missing(checker, definition, *visibility);
continue;
};
let contents = checker.locator().slice(expr);
let indentation = checker.locator().slice(TextRange::new(
checker.locator.line_start(expr.start()),
expr.start(),
));
if pydocstyle::helpers::should_ignore_docstring(expr) {
#[allow(deprecated)]
let location = checker.locator.compute_source_location(expr.start());
warn_user!(
"Docstring at {}:{}:{} contains implicit string concatenation; ignoring...",
relativize_path(checker.path),
location.row,
location.column
);
continue;
}
// SAFETY: Safe for docstrings that pass `should_ignore_docstring`.
let body_range = raw_contents_range(contents).unwrap();
let docstring = Docstring {
definition,
expr,
contents,
body_range,
indentation,
};
if !pydocstyle::rules::not_empty(checker, &docstring) {
continue;
}
if checker.enabled(Rule::FitsOnOneLine) {
pydocstyle::rules::one_liner(checker, &docstring);
}
if checker.any_enabled(&[
Rule::NoBlankLineAfterFunction,
Rule::NoBlankLineBeforeFunction,
]) {
pydocstyle::rules::blank_before_after_function(checker, &docstring);
}
if checker.any_enabled(&[
Rule::BlankLineBeforeClass,
Rule::OneBlankLineAfterClass,
Rule::OneBlankLineBeforeClass,
]) {
pydocstyle::rules::blank_before_after_class(checker, &docstring);
}
if checker.enabled(Rule::BlankLineAfterSummary) {
pydocstyle::rules::blank_after_summary(checker, &docstring);
}
if checker.any_enabled(&[
Rule::IndentWithSpaces,
Rule::OverIndentation,
Rule::UnderIndentation,
]) {
pydocstyle::rules::indent(checker, &docstring);
}
if checker.enabled(Rule::NewLineAfterLastParagraph) {
pydocstyle::rules::newline_after_last_paragraph(checker, &docstring);
}
if checker.enabled(Rule::SurroundingWhitespace) {
pydocstyle::rules::no_surrounding_whitespace(checker, &docstring);
}
if checker.any_enabled(&[
Rule::MultiLineSummaryFirstLine,
Rule::MultiLineSummarySecondLine,
]) {
pydocstyle::rules::multi_line_summary_start(checker, &docstring);
}
if checker.enabled(Rule::TripleSingleQuotes) {
pydocstyle::rules::triple_quotes(checker, &docstring);
}
if checker.enabled(Rule::EscapeSequenceInDocstring) {
pydocstyle::rules::backslashes(checker, &docstring);
}
if checker.enabled(Rule::EndsInPeriod) {
pydocstyle::rules::ends_with_period(checker, &docstring);
}
if checker.enabled(Rule::NonImperativeMood) {
pydocstyle::rules::non_imperative_mood(
checker,
&docstring,
&checker.settings.pydocstyle.property_decorators,
);
}
if checker.enabled(Rule::NoSignature) {
pydocstyle::rules::no_signature(checker, &docstring);
}
if checker.enabled(Rule::FirstLineCapitalized) {
pydocstyle::rules::capitalized(checker, &docstring);
}
if checker.enabled(Rule::DocstringStartsWithThis) {
pydocstyle::rules::starts_with_this(checker, &docstring);
}
if checker.enabled(Rule::EndsInPunctuation) {
pydocstyle::rules::ends_with_punctuation(checker, &docstring);
}
if checker.enabled(Rule::OverloadWithDocstring) {
pydocstyle::rules::if_needed(checker, &docstring);
}
if checker.any_enabled(&[
Rule::BlankLineAfterLastSection,
Rule::BlankLinesBetweenHeaderAndContent,
Rule::CapitalizeSectionName,
Rule::DashedUnderlineAfterSection,
Rule::EmptyDocstringSection,
Rule::MultiLineSummaryFirstLine,
Rule::NewLineAfterSectionName,
Rule::NoBlankLineAfterSection,
Rule::NoBlankLineBeforeSection,
Rule::SectionNameEndsInColon,
Rule::SectionNotOverIndented,
Rule::SectionUnderlineAfterName,
Rule::SectionUnderlineMatchesSectionLength,
Rule::SectionUnderlineNotOverIndented,
Rule::UndocumentedParam,
]) {
pydocstyle::rules::sections(
checker,
&docstring,
checker.settings.pydocstyle.convention.as_ref(),
);
}
}
}
}

View file

@ -0,0 +1,89 @@
use ruff_python_ast::{self as ast, ExceptHandler};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
use crate::rules::{
flake8_bandit, flake8_blind_except, flake8_bugbear, flake8_builtins, pycodestyle, pylint,
tryceratops,
};
/// Run lint rules over an [`ExceptHandler`] syntax node.
pub(crate) fn except_handler(except_handler: &ExceptHandler, checker: &mut Checker) {
match except_handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler {
type_,
name,
body,
range: _,
}) => {
if checker.enabled(Rule::BareExcept) {
if let Some(diagnostic) = pycodestyle::rules::bare_except(
type_.as_deref(),
body,
except_handler,
checker.locator,
) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::RaiseWithoutFromInsideExcept) {
flake8_bugbear::rules::raise_without_from_inside_except(
checker,
name.as_deref(),
body,
);
}
if checker.enabled(Rule::BlindExcept) {
flake8_blind_except::rules::blind_except(
checker,
type_.as_deref(),
name.as_deref(),
body,
);
}
if checker.enabled(Rule::TryExceptPass) {
flake8_bandit::rules::try_except_pass(
checker,
except_handler,
type_.as_deref(),
body,
checker.settings.flake8_bandit.check_typed_exception,
);
}
if checker.enabled(Rule::TryExceptContinue) {
flake8_bandit::rules::try_except_continue(
checker,
except_handler,
type_.as_deref(),
body,
checker.settings.flake8_bandit.check_typed_exception,
);
}
if checker.enabled(Rule::ExceptWithEmptyTuple) {
flake8_bugbear::rules::except_with_empty_tuple(checker, except_handler);
}
if checker.enabled(Rule::ExceptWithNonExceptionClasses) {
flake8_bugbear::rules::except_with_non_exception_classes(checker, except_handler);
}
if checker.enabled(Rule::ReraiseNoCause) {
tryceratops::rules::reraise_no_cause(checker, body);
}
if checker.enabled(Rule::BinaryOpException) {
pylint::rules::binary_op_exception(checker, except_handler);
}
if let Some(name) = name {
if checker.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) =
pycodestyle::rules::ambiguous_variable_name(name.as_str(), name.range())
{
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::BuiltinVariableShadowing) {
flake8_builtins::rules::builtin_variable_shadowing(checker, name, name.range());
}
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
pub(super) use bindings::bindings;
pub(super) use comprehension::comprehension;
pub(super) use deferred_for_loops::deferred_for_loops;
pub(super) use deferred_scopes::deferred_scopes;
pub(super) use definitions::definitions;
pub(super) use except_handler::except_handler;
pub(super) use expression::expression;
pub(super) use module::module;
pub(super) use parameter::parameter;
pub(super) use parameters::parameters;
pub(super) use statement::statement;
pub(super) use suite::suite;
pub(super) use unresolved_references::unresolved_references;
mod bindings;
mod comprehension;
mod deferred_for_loops;
mod deferred_scopes;
mod definitions;
mod except_handler;
mod expression;
mod module;
mod parameter;
mod parameters;
mod statement;
mod suite;
mod unresolved_references;

View file

@ -0,0 +1,12 @@
use ruff_python_ast::Suite;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::flake8_bugbear;
/// Run lint rules over a module.
pub(crate) fn module(suite: &Suite, checker: &mut Checker) {
if checker.enabled(Rule::FStringDocstring) {
flake8_bugbear::rules::f_string_docstring(checker, suite);
}
}

View file

@ -0,0 +1,29 @@
use ruff_python_ast::Parameter;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_builtins, pep8_naming, pycodestyle};
/// Run lint rules over a [`Parameter`] syntax node.
pub(crate) fn parameter(parameter: &Parameter, checker: &mut Checker) {
if checker.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) =
pycodestyle::rules::ambiguous_variable_name(&parameter.name, parameter.range())
{
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::InvalidArgumentName) {
if let Some(diagnostic) = pep8_naming::rules::invalid_argument_name(
&parameter.name,
parameter,
&checker.settings.pep8_naming.ignore_names,
) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::BuiltinArgumentShadowing) {
flake8_builtins::rules::builtin_argument_shadowing(checker, parameter);
}
}

View file

@ -0,0 +1,23 @@
use ruff_python_ast::Parameters;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_bugbear, flake8_pyi, ruff};
/// Run lint rules over a [`Parameters`] syntax node.
pub(crate) fn parameters(parameters: &Parameters, checker: &mut Checker) {
if checker.enabled(Rule::FunctionCallInDefaultArgument) {
flake8_bugbear::rules::function_call_in_argument_default(checker, parameters);
}
if checker.settings.rules.enabled(Rule::ImplicitOptional) {
ruff::rules::implicit_optional(checker, parameters);
}
if checker.source_type.is_stub() {
if checker.enabled(Rule::TypedArgumentDefaultInStub) {
flake8_pyi::rules::typed_argument_simple_defaults(checker, parameters);
}
if checker.enabled(Rule::ArgumentDefaultInStub) {
flake8_pyi::rules::argument_simple_defaults(checker, parameters);
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,12 @@
use ruff_python_ast::Stmt;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::flake8_pie;
/// Run lint rules over a suite of [`Stmt`] syntax nodes.
pub(crate) fn suite(suite: &[Stmt], checker: &mut Checker) {
if checker.enabled(Rule::UnnecessaryPass) {
flake8_pie::rules::no_unnecessary_pass(checker, suite);
}
}

View file

@ -0,0 +1,47 @@
use ruff_diagnostics::Diagnostic;
use ruff_python_semantic::Exceptions;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::pyflakes;
/// Run lint rules over all [`UnresolvedReference`] entities in the [`SemanticModel`].
pub(crate) fn unresolved_references(checker: &mut Checker) {
if !checker.any_enabled(&[Rule::UndefinedLocalWithImportStarUsage, Rule::UndefinedName]) {
return;
}
for reference in checker.semantic.unresolved_references() {
if reference.is_wildcard_import() {
if checker.enabled(Rule::UndefinedLocalWithImportStarUsage) {
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedLocalWithImportStarUsage {
name: reference.name(checker.locator).to_string(),
},
reference.range(),
));
}
} else {
if checker.enabled(Rule::UndefinedName) {
// Avoid flagging if `NameError` is handled.
if reference.exceptions().contains(Exceptions::NAME_ERROR) {
continue;
}
// Allow __path__.
if checker.path.ends_with("__init__.py") {
if reference.name(checker.locator) == "__path__" {
continue;
}
}
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedName {
name: reference.name(checker.locator).to_string(),
},
reference.range(),
));
}
}
}
}

View file

@ -0,0 +1,17 @@
use ruff_python_ast::{Expr, TypeParam};
use ruff_python_semantic::{ScopeId, Snapshot};
use ruff_text_size::TextRange;
/// A collection of AST nodes that are deferred for later analysis.
/// Used to, e.g., store functions, whose bodies shouldn't be analyzed until all
/// module-level definitions have been analyzed.
#[derive(Debug, Default)]
pub(crate) struct Deferred<'a> {
pub(crate) scopes: Vec<ScopeId>,
pub(crate) string_type_definitions: Vec<(TextRange, &'a str, Snapshot)>,
pub(crate) future_type_definitions: Vec<(&'a Expr, Snapshot)>,
pub(crate) type_param_definitions: Vec<(&'a TypeParam, Snapshot)>,
pub(crate) functions: Vec<Snapshot>,
pub(crate) lambdas: Vec<(&'a Expr, Snapshot)>,
pub(crate) for_loops: Vec<Snapshot>,
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,36 @@
use std::path::Path;
use ruff_diagnostics::Diagnostic;
use crate::registry::Rule;
use crate::rules::flake8_no_pep420::rules::implicit_namespace_package;
use crate::rules::pep8_naming::rules::invalid_module_name;
use crate::settings::Settings;
pub(crate) fn check_file_path(
path: &Path,
package: Option<&Path>,
settings: &Settings,
) -> Vec<Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = vec![];
// flake8-no-pep420
if settings.rules.enabled(Rule::ImplicitNamespacePackage) {
if let Some(diagnostic) =
implicit_namespace_package(path, package, &settings.project_root, &settings.src)
{
diagnostics.push(diagnostic);
}
}
// pep8-naming
if settings.rules.enabled(Rule::InvalidModuleName) {
if let Some(diagnostic) =
invalid_module_name(path, package, &settings.pep8_naming.ignore_names)
{
diagnostics.push(diagnostic);
}
}
diagnostics
}

View file

@ -0,0 +1,133 @@
//! Lint rules based on import analysis.
use std::borrow::Cow;
use std::path::Path;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::helpers::to_module_path;
use ruff_python_ast::imports::{ImportMap, ModuleImport};
use ruff_python_ast::statement_visitor::StatementVisitor;
use ruff_python_ast::{self as ast, PySourceType, Stmt, Suite};
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_source_file::Locator;
use ruff_text_size::Ranged;
use crate::directives::IsortDirectives;
use crate::registry::Rule;
use crate::rules::isort;
use crate::rules::isort::block::{Block, BlockBuilder};
use crate::settings::Settings;
use crate::source_kind::SourceKind;
fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) -> Option<ImportMap> {
let Some(package) = package else {
return None;
};
let Some(module_path) = to_module_path(package, path) else {
return None;
};
let num_imports = blocks.iter().map(|block| block.imports.len()).sum();
let mut module_imports = Vec::with_capacity(num_imports);
for stmt in blocks.iter().flat_map(|block| &block.imports) {
match stmt {
Stmt::Import(ast::StmtImport { names, range: _ }) => {
module_imports.extend(
names
.iter()
.map(|name| ModuleImport::new(name.name.to_string(), stmt.range())),
);
}
Stmt::ImportFrom(ast::StmtImportFrom {
module,
names,
level,
range: _,
}) => {
let level = level.map_or(0, |level| level.to_usize());
let module = if let Some(module) = module {
let module: &String = module.as_ref();
if level == 0 {
Cow::Borrowed(module)
} else {
if module_path.len() <= level {
continue;
}
let prefix = module_path[..module_path.len() - level].join(".");
Cow::Owned(format!("{prefix}.{module}"))
}
} else {
if module_path.len() <= level {
continue;
}
Cow::Owned(module_path[..module_path.len() - level].join("."))
};
module_imports.extend(names.iter().map(|name| {
ModuleImport::new(format!("{}.{}", module, name.name), name.range())
}));
}
_ => panic!("Expected Stmt::Import | Stmt::ImportFrom"),
}
}
let mut import_map = ImportMap::default();
import_map.insert(module_path.join("."), module_imports);
Some(import_map)
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn check_imports(
python_ast: &Suite,
locator: &Locator,
indexer: &Indexer,
directives: &IsortDirectives,
settings: &Settings,
stylist: &Stylist,
path: &Path,
package: Option<&Path>,
source_kind: &SourceKind,
source_type: PySourceType,
) -> (Vec<Diagnostic>, Option<ImportMap>) {
// Extract all import blocks from the AST.
let tracker = {
let mut tracker =
BlockBuilder::new(locator, directives, source_type.is_stub(), source_kind);
tracker.visit_body(python_ast);
tracker
};
let blocks: Vec<&Block> = tracker.iter().collect();
// Enforce import rules.
let mut diagnostics = vec![];
if settings.rules.enabled(Rule::UnsortedImports) {
for block in &blocks {
if !block.imports.is_empty() {
if let Some(diagnostic) = isort::rules::organize_imports(
block,
locator,
stylist,
indexer,
settings,
package,
source_type,
) {
diagnostics.push(diagnostic);
}
}
}
}
if settings.rules.enabled(Rule::MissingRequiredImport) {
diagnostics.extend(isort::rules::add_required_imports(
python_ast,
locator,
stylist,
settings,
source_type,
));
}
// Extract import map.
let imports = extract_import_map(path, package, &blocks);
(diagnostics, imports)
}

View file

@ -0,0 +1,164 @@
use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_python_codegen::Stylist;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::TokenKind;
use ruff_source_file::Locator;
use ruff_text_size::{Ranged, TextRange};
use crate::registry::{AsRule, Rule};
use crate::rules::pycodestyle::rules::logical_lines::{
extraneous_whitespace, indentation, missing_whitespace, missing_whitespace_after_keyword,
missing_whitespace_around_operator, space_after_comma, space_around_operator,
whitespace_around_keywords, whitespace_around_named_parameter_equals,
whitespace_before_comment, whitespace_before_parameters, LogicalLines, TokenFlags,
};
use crate::settings::Settings;
/// Return the amount of indentation, expanding tabs to the next multiple of 8.
fn expand_indent(line: &str) -> usize {
let line = line.trim_end_matches(['\n', '\r']);
let mut indent = 0;
for c in line.bytes() {
match c {
b'\t' => indent = (indent / 8) * 8 + 8,
b' ' => indent += 1,
_ => break,
}
}
indent
}
pub(crate) fn check_logical_lines(
tokens: &[LexResult],
locator: &Locator,
stylist: &Stylist,
settings: &Settings,
) -> Vec<Diagnostic> {
let mut context = LogicalLinesContext::new(settings);
let should_fix_missing_whitespace = settings.rules.should_fix(Rule::MissingWhitespace);
let should_fix_whitespace_before_parameters =
settings.rules.should_fix(Rule::WhitespaceBeforeParameters);
let should_fix_whitespace_after_open_bracket =
settings.rules.should_fix(Rule::WhitespaceAfterOpenBracket);
let should_fix_whitespace_before_close_bracket = settings
.rules
.should_fix(Rule::WhitespaceBeforeCloseBracket);
let should_fix_whitespace_before_punctuation =
settings.rules.should_fix(Rule::WhitespaceBeforePunctuation);
let mut prev_line = None;
let mut prev_indent_level = None;
let indent_char = stylist.indentation().as_char();
for line in &LogicalLines::from_tokens(tokens, locator) {
if line.flags().contains(TokenFlags::OPERATOR) {
space_around_operator(&line, &mut context);
whitespace_around_named_parameter_equals(&line, &mut context);
missing_whitespace_around_operator(&line, &mut context);
missing_whitespace(&line, should_fix_missing_whitespace, &mut context);
}
if line.flags().contains(TokenFlags::PUNCTUATION) {
space_after_comma(&line, &mut context);
}
if line
.flags()
.intersects(TokenFlags::OPERATOR | TokenFlags::BRACKET | TokenFlags::PUNCTUATION)
{
extraneous_whitespace(
&line,
&mut context,
should_fix_whitespace_after_open_bracket,
should_fix_whitespace_before_close_bracket,
should_fix_whitespace_before_punctuation,
);
}
if line.flags().contains(TokenFlags::KEYWORD) {
whitespace_around_keywords(&line, &mut context);
missing_whitespace_after_keyword(&line, &mut context);
}
if line.flags().contains(TokenFlags::COMMENT) {
whitespace_before_comment(&line, locator, &mut context);
}
if line.flags().contains(TokenFlags::BRACKET) {
whitespace_before_parameters(
&line,
should_fix_whitespace_before_parameters,
&mut context,
);
}
// Extract the indentation level.
let Some(first_token) = line.first_token() else {
continue;
};
let range = if first_token.kind() == TokenKind::Indent {
first_token.range()
} else {
TextRange::new(locator.line_start(first_token.start()), first_token.start())
};
let indent_level = expand_indent(locator.slice(range));
let indent_size = 4;
for kind in indentation(
&line,
prev_line.as_ref(),
indent_char,
indent_level,
prev_indent_level,
indent_size,
) {
if settings.rules.enabled(kind.rule()) {
context.push(kind, range);
}
}
if !line.is_comment_only() {
prev_line = Some(line);
prev_indent_level = Some(indent_level);
}
}
context.diagnostics
}
#[derive(Debug, Clone)]
pub(crate) struct LogicalLinesContext<'a> {
settings: &'a Settings,
diagnostics: Vec<Diagnostic>,
}
impl<'a> LogicalLinesContext<'a> {
fn new(settings: &'a Settings) -> Self {
Self {
settings,
diagnostics: Vec::new(),
}
}
pub(crate) fn push<K: Into<DiagnosticKind>>(&mut self, kind: K, range: TextRange) {
let kind = kind.into();
if self.settings.rules.enabled(kind.rule()) {
self.diagnostics.push(Diagnostic {
kind,
range,
fix: None,
parent: None,
});
}
}
pub(crate) fn push_diagnostic(&mut self, diagnostic: Diagnostic) {
if self.settings.rules.enabled(diagnostic.kind.rule()) {
self.diagnostics.push(diagnostic);
}
}
}

View file

@ -0,0 +1,7 @@
pub(crate) mod ast;
pub(crate) mod filesystem;
pub(crate) mod imports;
pub(crate) mod logical_lines;
pub(crate) mod noqa;
pub(crate) mod physical_lines;
pub(crate) mod tokens;

View file

@ -0,0 +1,243 @@
//! `NoQA` enforcement and validation.
use std::path::Path;
use itertools::Itertools;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use ruff_diagnostics::{Diagnostic, Edit, Fix};
use ruff_python_trivia::CommentRanges;
use ruff_source_file::Locator;
use crate::noqa;
use crate::noqa::{Directive, FileExemption, NoqaDirectives, NoqaMapping};
use crate::registry::{AsRule, Rule};
use crate::rule_redirects::get_redirect_target;
use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA};
use crate::settings::Settings;
pub(crate) fn check_noqa(
diagnostics: &mut Vec<Diagnostic>,
path: &Path,
locator: &Locator,
comment_ranges: &CommentRanges,
noqa_line_for: &NoqaMapping,
analyze_directives: bool,
settings: &Settings,
) -> Vec<usize> {
// Identify any codes that are globally exempted (within the current file).
let exemption = FileExemption::try_extract(locator.contents(), comment_ranges, path, locator);
// Extract all `noqa` directives.
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, path, locator);
// Indices of diagnostics that were ignored by a `noqa` directive.
let mut ignored_diagnostics = vec![];
// Remove any ignored diagnostics.
'outer: for (index, diagnostic) in diagnostics.iter().enumerate() {
if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) {
continue;
}
match &exemption {
Some(FileExemption::All) => {
// If the file is exempted, ignore all diagnostics.
ignored_diagnostics.push(index);
continue;
}
Some(FileExemption::Codes(codes)) => {
// If the diagnostic is ignored by a global exemption, ignore it.
if codes.contains(&diagnostic.kind.rule().noqa_code()) {
ignored_diagnostics.push(index);
continue;
}
}
None => {}
}
let noqa_offsets = diagnostic
.parent
.into_iter()
.chain(std::iter::once(diagnostic.start()))
.map(|position| noqa_line_for.resolve(position))
.unique();
for noqa_offset in noqa_offsets {
if let Some(directive_line) = noqa_directives.find_line_with_directive_mut(noqa_offset)
{
let suppressed = match &directive_line.directive {
Directive::All(_) => {
directive_line
.matches
.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
true
}
Directive::Codes(directive) => {
if noqa::includes(diagnostic.kind.rule(), directive.codes()) {
directive_line
.matches
.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
true
} else {
false
}
}
};
if suppressed {
continue 'outer;
}
}
}
}
// Enforce that the noqa directive was actually used (RUF100), unless RUF100 was itself
// suppressed.
if settings.rules.enabled(Rule::UnusedNOQA)
&& analyze_directives
&& !exemption.is_some_and(|exemption| match exemption {
FileExemption::All => true,
FileExemption::Codes(codes) => codes.contains(&Rule::UnusedNOQA.noqa_code()),
})
{
for line in noqa_directives.lines() {
match &line.directive {
Directive::All(directive) => {
if line.matches.is_empty() {
let mut diagnostic =
Diagnostic::new(UnusedNOQA { codes: None }, directive.range());
if settings.rules.should_fix(diagnostic.kind.rule()) {
diagnostic
.set_fix(Fix::suggested(delete_noqa(directive.range(), locator)));
}
diagnostics.push(diagnostic);
}
}
Directive::Codes(directive) => {
let mut disabled_codes = vec![];
let mut unknown_codes = vec![];
let mut unmatched_codes = vec![];
let mut valid_codes = vec![];
let mut self_ignore = false;
for code in directive.codes() {
let code = get_redirect_target(code).unwrap_or(code);
if Rule::UnusedNOQA.noqa_code() == code {
self_ignore = true;
break;
}
if line.matches.iter().any(|match_| *match_ == code)
|| settings.external.contains(code)
{
valid_codes.push(code);
} else {
if let Ok(rule) = Rule::from_code(code) {
if settings.rules.enabled(rule) {
unmatched_codes.push(code);
} else {
disabled_codes.push(code);
}
} else {
unknown_codes.push(code);
}
}
}
if self_ignore {
continue;
}
if !(disabled_codes.is_empty()
&& unknown_codes.is_empty()
&& unmatched_codes.is_empty())
{
let mut diagnostic = Diagnostic::new(
UnusedNOQA {
codes: Some(UnusedCodes {
disabled: disabled_codes
.iter()
.map(|code| (*code).to_string())
.collect(),
unknown: unknown_codes
.iter()
.map(|code| (*code).to_string())
.collect(),
unmatched: unmatched_codes
.iter()
.map(|code| (*code).to_string())
.collect(),
}),
},
directive.range(),
);
if settings.rules.should_fix(diagnostic.kind.rule()) {
if valid_codes.is_empty() {
diagnostic.set_fix(Fix::suggested(delete_noqa(
directive.range(),
locator,
)));
} else {
diagnostic.set_fix(Fix::suggested(Edit::range_replacement(
format!("# noqa: {}", valid_codes.join(", ")),
directive.range(),
)));
}
}
diagnostics.push(diagnostic);
}
}
}
}
}
ignored_diagnostics.sort_unstable();
ignored_diagnostics
}
/// Generate a [`Edit`] to delete a `noqa` directive.
fn delete_noqa(range: TextRange, locator: &Locator) -> Edit {
let line_range = locator.line_range(range.start());
// Compute the leading space.
let prefix = locator.slice(TextRange::new(line_range.start(), range.start()));
let leading_space = prefix
.rfind(|c: char| !c.is_whitespace())
.map_or(prefix.len(), |i| prefix.len() - i - 1);
let leading_space_len = TextSize::try_from(leading_space).unwrap();
// Compute the trailing space.
let suffix = locator.slice(TextRange::new(range.end(), line_range.end()));
let trailing_space = suffix
.find(|c: char| !c.is_whitespace())
.map_or(suffix.len(), |i| i);
let trailing_space_len = TextSize::try_from(trailing_space).unwrap();
// Ex) `# noqa`
if line_range
== TextRange::new(
range.start() - leading_space_len,
range.end() + trailing_space_len,
)
{
let full_line_end = locator.full_line_end(line_range.end());
Edit::deletion(line_range.start(), full_line_end)
}
// Ex) `x = 1 # noqa`
else if range.end() + trailing_space_len == line_range.end() {
Edit::deletion(range.start() - leading_space_len, line_range.end())
}
// Ex) `x = 1 # noqa # type: ignore`
else if locator.contents()[usize::from(range.end() + trailing_space_len)..].starts_with('#') {
Edit::deletion(range.start(), range.end() + trailing_space_len)
}
// Ex) `x = 1 # noqa here`
else {
Edit::deletion(
range.start() + "# ".text_len(),
range.end() + trailing_space_len,
)
}
}

View file

@ -0,0 +1,130 @@
//! Lint rules based on checking physical lines.
use ruff_diagnostics::Diagnostic;
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_source_file::{Locator, UniversalNewlines};
use ruff_text_size::TextSize;
use crate::registry::Rule;
use crate::rules::flake8_copyright::rules::missing_copyright_notice;
use crate::rules::pycodestyle::rules::{
doc_line_too_long, line_too_long, mixed_spaces_and_tabs, no_newline_at_end_of_file,
trailing_whitespace,
};
use crate::rules::pylint;
use crate::settings::Settings;
pub(crate) fn check_physical_lines(
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
doc_lines: &[TextSize],
settings: &Settings,
) -> Vec<Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = vec![];
let enforce_doc_line_too_long = settings.rules.enabled(Rule::DocLineTooLong);
let enforce_line_too_long = settings.rules.enabled(Rule::LineTooLong);
let enforce_no_newline_at_end_of_file = settings.rules.enabled(Rule::MissingNewlineAtEndOfFile);
let enforce_mixed_spaces_and_tabs = settings.rules.enabled(Rule::MixedSpacesAndTabs);
let enforce_bidirectional_unicode = settings.rules.enabled(Rule::BidirectionalUnicode);
let enforce_trailing_whitespace = settings.rules.enabled(Rule::TrailingWhitespace);
let enforce_blank_line_contains_whitespace =
settings.rules.enabled(Rule::BlankLineWithWhitespace);
let enforce_copyright_notice = settings.rules.enabled(Rule::MissingCopyrightNotice);
let mut doc_lines_iter = doc_lines.iter().peekable();
for line in locator.contents().universal_newlines() {
while doc_lines_iter
.next_if(|doc_line_start| line.range().contains_inclusive(**doc_line_start))
.is_some()
{
if enforce_doc_line_too_long {
if let Some(diagnostic) = doc_line_too_long(&line, settings) {
diagnostics.push(diagnostic);
}
}
}
if enforce_mixed_spaces_and_tabs {
if let Some(diagnostic) = mixed_spaces_and_tabs(&line) {
diagnostics.push(diagnostic);
}
}
if enforce_line_too_long {
if let Some(diagnostic) = line_too_long(&line, settings) {
diagnostics.push(diagnostic);
}
}
if enforce_bidirectional_unicode {
diagnostics.extend(pylint::rules::bidirectional_unicode(&line));
}
if enforce_trailing_whitespace || enforce_blank_line_contains_whitespace {
if let Some(diagnostic) = trailing_whitespace(&line, locator, indexer, settings) {
diagnostics.push(diagnostic);
}
}
}
if enforce_no_newline_at_end_of_file {
if let Some(diagnostic) = no_newline_at_end_of_file(
locator,
stylist,
settings.rules.should_fix(Rule::MissingNewlineAtEndOfFile),
) {
diagnostics.push(diagnostic);
}
}
if enforce_copyright_notice {
if let Some(diagnostic) = missing_copyright_notice(locator, settings) {
diagnostics.push(diagnostic);
}
}
diagnostics
}
#[cfg(test)]
mod tests {
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_parser::lexer::lex;
use ruff_python_parser::Mode;
use ruff_source_file::Locator;
use crate::line_width::LineLength;
use crate::registry::Rule;
use crate::settings::Settings;
use super::check_physical_lines;
#[test]
fn e501_non_ascii_char() {
let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8.
let locator = Locator::new(line);
let tokens: Vec<_> = lex(line, Mode::Module).collect();
let indexer = Indexer::from_tokens(&tokens, &locator);
let stylist = Stylist::from_tokens(&tokens, &locator);
let check_with_max_line_length = |line_length: LineLength| {
check_physical_lines(
&locator,
&stylist,
&indexer,
&[],
&Settings {
line_length,
..Settings::for_rule(Rule::LineTooLong)
},
)
};
let line_length = LineLength::try_from(8).unwrap();
assert_eq!(check_with_max_line_length(line_length), vec![]);
assert_eq!(check_with_max_line_length(line_length), vec![]);
}
}

View file

@ -0,0 +1,197 @@
//! Lint rules based on token traversal.
use std::path::Path;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::Tok;
use ruff_diagnostics::Diagnostic;
use ruff_python_index::Indexer;
use ruff_source_file::Locator;
use crate::directives::TodoComment;
use crate::lex::docstring_detection::StateMachine;
use crate::registry::{AsRule, Rule};
use crate::rules::ruff::rules::Context;
use crate::rules::{
eradicate, flake8_commas, flake8_executable, flake8_fixme, flake8_implicit_str_concat,
flake8_pyi, flake8_quotes, flake8_todos, pycodestyle, pygrep_hooks, pylint, pyupgrade, ruff,
};
use crate::settings::Settings;
pub(crate) fn check_tokens(
tokens: &[LexResult],
path: &Path,
locator: &Locator,
indexer: &Indexer,
settings: &Settings,
is_stub: bool,
) -> Vec<Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = vec![];
if settings.rules.enabled(Rule::BlanketNOQA) {
pygrep_hooks::rules::blanket_noqa(&mut diagnostics, indexer, locator);
}
if settings.rules.enabled(Rule::BlanketTypeIgnore) {
pygrep_hooks::rules::blanket_type_ignore(&mut diagnostics, indexer, locator);
}
if settings.rules.any_enabled(&[
Rule::AmbiguousUnicodeCharacterString,
Rule::AmbiguousUnicodeCharacterDocstring,
Rule::AmbiguousUnicodeCharacterComment,
]) {
let mut state_machine = StateMachine::default();
for &(ref tok, range) in tokens.iter().flatten() {
let is_docstring = state_machine.consume(tok);
if matches!(tok, Tok::String { .. } | Tok::Comment(_)) {
ruff::rules::ambiguous_unicode_character(
&mut diagnostics,
locator,
range,
if tok.is_string() {
if is_docstring {
Context::Docstring
} else {
Context::String
}
} else {
Context::Comment
},
settings,
);
}
}
}
if settings.rules.enabled(Rule::CommentedOutCode) {
eradicate::rules::commented_out_code(&mut diagnostics, locator, indexer, settings);
}
if settings.rules.enabled(Rule::UTF8EncodingDeclaration) {
pyupgrade::rules::unnecessary_coding_comment(&mut diagnostics, locator, indexer, settings);
}
if settings.rules.enabled(Rule::InvalidEscapeSequence) {
for (tok, range) in tokens.iter().flatten() {
if tok.is_string() {
pycodestyle::rules::invalid_escape_sequence(
&mut diagnostics,
locator,
*range,
settings.rules.should_fix(Rule::InvalidEscapeSequence),
);
}
}
}
if settings.rules.enabled(Rule::TabIndentation) {
pycodestyle::rules::tab_indentation(&mut diagnostics, tokens, locator, indexer);
}
if settings.rules.any_enabled(&[
Rule::InvalidCharacterBackspace,
Rule::InvalidCharacterSub,
Rule::InvalidCharacterEsc,
Rule::InvalidCharacterNul,
Rule::InvalidCharacterZeroWidthSpace,
]) {
for (tok, range) in tokens.iter().flatten() {
if tok.is_string() {
pylint::rules::invalid_string_characters(&mut diagnostics, *range, locator);
}
}
}
if settings.rules.any_enabled(&[
Rule::MultipleStatementsOnOneLineColon,
Rule::MultipleStatementsOnOneLineSemicolon,
Rule::UselessSemicolon,
]) {
pycodestyle::rules::compound_statements(
&mut diagnostics,
tokens,
locator,
indexer,
settings,
);
}
if settings.rules.any_enabled(&[
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
]) {
flake8_quotes::rules::from_tokens(&mut diagnostics, tokens, locator, settings);
}
if settings.rules.any_enabled(&[
Rule::SingleLineImplicitStringConcatenation,
Rule::MultiLineImplicitStringConcatenation,
]) {
flake8_implicit_str_concat::rules::implicit(
&mut diagnostics,
tokens,
&settings.flake8_implicit_str_concat,
locator,
);
}
if settings.rules.any_enabled(&[
Rule::MissingTrailingComma,
Rule::TrailingCommaOnBareTuple,
Rule::ProhibitedTrailingComma,
]) {
flake8_commas::rules::trailing_commas(&mut diagnostics, tokens, locator, settings);
}
if settings.rules.enabled(Rule::ExtraneousParentheses) {
pyupgrade::rules::extraneous_parentheses(&mut diagnostics, tokens, locator, settings);
}
if is_stub && settings.rules.enabled(Rule::TypeCommentInStub) {
flake8_pyi::rules::type_comment_in_stub(&mut diagnostics, locator, indexer);
}
if settings.rules.any_enabled(&[
Rule::ShebangNotExecutable,
Rule::ShebangMissingExecutableFile,
Rule::ShebangLeadingWhitespace,
Rule::ShebangNotFirstLine,
Rule::ShebangMissingPython,
]) {
flake8_executable::rules::from_tokens(tokens, path, locator, settings, &mut diagnostics);
}
if settings.rules.any_enabled(&[
Rule::InvalidTodoTag,
Rule::MissingTodoAuthor,
Rule::MissingTodoLink,
Rule::MissingTodoColon,
Rule::MissingTodoDescription,
Rule::InvalidTodoCapitalization,
Rule::MissingSpaceAfterTodoColon,
Rule::LineContainsFixme,
Rule::LineContainsXxx,
Rule::LineContainsTodo,
Rule::LineContainsHack,
]) {
let todo_comments: Vec<TodoComment> = indexer
.comment_ranges()
.iter()
.enumerate()
.filter_map(|(i, comment_range)| {
let comment = locator.slice(*comment_range);
TodoComment::from_comment(comment, *comment_range, i)
})
.collect();
flake8_todos::rules::todos(&mut diagnostics, &todo_comments, locator, indexer, settings);
flake8_fixme::rules::todos(&mut diagnostics, &todo_comments);
}
diagnostics.retain(|diagnostic| settings.rules.enabled(diagnostic.kind.rule()));
diagnostics
}

View file

@ -0,0 +1,935 @@
/// In this module we generate [`Rule`], an enum of all rules, and [`RuleCodePrefix`], an enum of
/// all rules categories. A rule category is something like pyflakes or flake8-todos. Each rule
/// category contains all rules and their common prefixes, i.e. everything you can specify in
/// `--select`. For pylint this is e.g. C0414 and E0118 but also C and E01.
use std::fmt::Formatter;
use strum_macros::{AsRefStr, EnumIter};
use ruff_diagnostics::Violation;
use crate::registry::{AsRule, Linter};
use crate::rule_selector::is_single_rule_selector;
use crate::rules;
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct NoqaCode(&'static str, &'static str);
impl NoqaCode {
/// Return the prefix for the [`NoqaCode`], e.g., `SIM` for `SIM101`.
pub fn prefix(&self) -> &str {
self.0
}
/// Return the suffix for the [`NoqaCode`], e.g., `101` for `SIM101`.
pub fn suffix(&self) -> &str {
self.1
}
}
impl std::fmt::Debug for NoqaCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for NoqaCode {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{}{}", self.0, self.1)
}
}
impl PartialEq<&str> for NoqaCode {
fn eq(&self, other: &&str) -> bool {
match other.strip_prefix(self.0) {
Some(suffix) => suffix == self.1,
None => false,
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum RuleGroup {
/// The rule has not been assigned to any specific group.
Unspecified,
/// The rule is unstable, and preview mode must be enabled for usage.
Preview,
/// Legacy category for unstable rules, supports backwards compatible selection.
#[deprecated(note = "Use `RuleGroup::Preview` for new rules instead")]
Nursery,
}
#[ruff_macros::map_codes]
pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
#[allow(clippy::enum_glob_use)]
use Linter::*;
#[rustfmt::skip]
Some(match (linter, code) {
// pycodestyle errors
(Pycodestyle, "E101") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MixedSpacesAndTabs),
#[allow(deprecated)]
(Pycodestyle, "E111") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultiple),
#[allow(deprecated)]
(Pycodestyle, "E112") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoIndentedBlock),
#[allow(deprecated)]
(Pycodestyle, "E113") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedIndentation),
#[allow(deprecated)]
(Pycodestyle, "E114") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultipleComment),
#[allow(deprecated)]
(Pycodestyle, "E115") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoIndentedBlockComment),
#[allow(deprecated)]
(Pycodestyle, "E116") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedIndentationComment),
#[allow(deprecated)]
(Pycodestyle, "E117") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::OverIndented),
#[allow(deprecated)]
(Pycodestyle, "E201") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceAfterOpenBracket),
#[allow(deprecated)]
(Pycodestyle, "E202") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforeCloseBracket),
#[allow(deprecated)]
(Pycodestyle, "E203") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforePunctuation),
#[allow(deprecated)]
(Pycodestyle, "E211") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforeParameters),
#[allow(deprecated)]
(Pycodestyle, "E221") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeOperator),
#[allow(deprecated)]
(Pycodestyle, "E222") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterOperator),
#[allow(deprecated)]
(Pycodestyle, "E223") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabBeforeOperator),
#[allow(deprecated)]
(Pycodestyle, "E224") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterOperator),
#[allow(deprecated)]
(Pycodestyle, "E225") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundOperator),
#[allow(deprecated)]
(Pycodestyle, "E226") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundArithmeticOperator),
#[allow(deprecated)]
(Pycodestyle, "E227") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundBitwiseOrShiftOperator),
#[allow(deprecated)]
(Pycodestyle, "E228") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundModuloOperator),
#[allow(deprecated)]
(Pycodestyle, "E231") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespace),
#[allow(deprecated)]
(Pycodestyle, "E241") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterComma),
#[allow(deprecated)]
(Pycodestyle, "E242") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterComma),
#[allow(deprecated)]
(Pycodestyle, "E251") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedSpacesAroundKeywordParameterEquals),
#[allow(deprecated)]
(Pycodestyle, "E252") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundParameterEquals),
#[allow(deprecated)]
(Pycodestyle, "E261") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TooFewSpacesBeforeInlineComment),
#[allow(deprecated)]
(Pycodestyle, "E262") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoSpaceAfterInlineComment),
#[allow(deprecated)]
(Pycodestyle, "E265") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoSpaceAfterBlockComment),
#[allow(deprecated)]
(Pycodestyle, "E266") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleLeadingHashesForBlockComment),
#[allow(deprecated)]
(Pycodestyle, "E271") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterKeyword),
#[allow(deprecated)]
(Pycodestyle, "E272") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeKeyword),
#[allow(deprecated)]
(Pycodestyle, "E273") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterKeyword),
#[allow(deprecated)]
(Pycodestyle, "E274") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabBeforeKeyword),
#[allow(deprecated)]
(Pycodestyle, "E275") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAfterKeyword),
(Pycodestyle, "E401") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MultipleImportsOnOneLine),
(Pycodestyle, "E402") => (RuleGroup::Unspecified, rules::pycodestyle::rules::ModuleImportNotAtTopOfFile),
(Pycodestyle, "E501") => (RuleGroup::Unspecified, rules::pycodestyle::rules::LineTooLong),
(Pycodestyle, "E701") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MultipleStatementsOnOneLineColon),
(Pycodestyle, "E702") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MultipleStatementsOnOneLineSemicolon),
(Pycodestyle, "E703") => (RuleGroup::Unspecified, rules::pycodestyle::rules::UselessSemicolon),
(Pycodestyle, "E711") => (RuleGroup::Unspecified, rules::pycodestyle::rules::NoneComparison),
(Pycodestyle, "E712") => (RuleGroup::Unspecified, rules::pycodestyle::rules::TrueFalseComparison),
(Pycodestyle, "E713") => (RuleGroup::Unspecified, rules::pycodestyle::rules::NotInTest),
(Pycodestyle, "E714") => (RuleGroup::Unspecified, rules::pycodestyle::rules::NotIsTest),
(Pycodestyle, "E721") => (RuleGroup::Unspecified, rules::pycodestyle::rules::TypeComparison),
(Pycodestyle, "E722") => (RuleGroup::Unspecified, rules::pycodestyle::rules::BareExcept),
(Pycodestyle, "E731") => (RuleGroup::Unspecified, rules::pycodestyle::rules::LambdaAssignment),
(Pycodestyle, "E741") => (RuleGroup::Unspecified, rules::pycodestyle::rules::AmbiguousVariableName),
(Pycodestyle, "E742") => (RuleGroup::Unspecified, rules::pycodestyle::rules::AmbiguousClassName),
(Pycodestyle, "E743") => (RuleGroup::Unspecified, rules::pycodestyle::rules::AmbiguousFunctionName),
(Pycodestyle, "E902") => (RuleGroup::Unspecified, rules::pycodestyle::rules::IOError),
(Pycodestyle, "E999") => (RuleGroup::Unspecified, rules::pycodestyle::rules::SyntaxError),
// pycodestyle warnings
(Pycodestyle, "W191") => (RuleGroup::Unspecified, rules::pycodestyle::rules::TabIndentation),
(Pycodestyle, "W291") => (RuleGroup::Unspecified, rules::pycodestyle::rules::TrailingWhitespace),
(Pycodestyle, "W292") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MissingNewlineAtEndOfFile),
(Pycodestyle, "W293") => (RuleGroup::Unspecified, rules::pycodestyle::rules::BlankLineWithWhitespace),
(Pycodestyle, "W505") => (RuleGroup::Unspecified, rules::pycodestyle::rules::DocLineTooLong),
(Pycodestyle, "W605") => (RuleGroup::Unspecified, rules::pycodestyle::rules::InvalidEscapeSequence),
// pyflakes
(Pyflakes, "401") => (RuleGroup::Unspecified, rules::pyflakes::rules::UnusedImport),
(Pyflakes, "402") => (RuleGroup::Unspecified, rules::pyflakes::rules::ImportShadowedByLoopVar),
(Pyflakes, "403") => (RuleGroup::Unspecified, rules::pyflakes::rules::UndefinedLocalWithImportStar),
(Pyflakes, "404") => (RuleGroup::Unspecified, rules::pyflakes::rules::LateFutureImport),
(Pyflakes, "405") => (RuleGroup::Unspecified, rules::pyflakes::rules::UndefinedLocalWithImportStarUsage),
(Pyflakes, "406") => (RuleGroup::Unspecified, rules::pyflakes::rules::UndefinedLocalWithNestedImportStarUsage),
(Pyflakes, "407") => (RuleGroup::Unspecified, rules::pyflakes::rules::FutureFeatureNotDefined),
(Pyflakes, "501") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatInvalidFormat),
(Pyflakes, "502") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatExpectedMapping),
(Pyflakes, "503") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatExpectedSequence),
(Pyflakes, "504") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatExtraNamedArguments),
(Pyflakes, "505") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatMissingArgument),
(Pyflakes, "506") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatMixedPositionalAndNamed),
(Pyflakes, "507") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatPositionalCountMismatch),
(Pyflakes, "508") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatStarRequiresSequence),
(Pyflakes, "509") => (RuleGroup::Unspecified, rules::pyflakes::rules::PercentFormatUnsupportedFormatCharacter),
(Pyflakes, "521") => (RuleGroup::Unspecified, rules::pyflakes::rules::StringDotFormatInvalidFormat),
(Pyflakes, "522") => (RuleGroup::Unspecified, rules::pyflakes::rules::StringDotFormatExtraNamedArguments),
(Pyflakes, "523") => (RuleGroup::Unspecified, rules::pyflakes::rules::StringDotFormatExtraPositionalArguments),
(Pyflakes, "524") => (RuleGroup::Unspecified, rules::pyflakes::rules::StringDotFormatMissingArguments),
(Pyflakes, "525") => (RuleGroup::Unspecified, rules::pyflakes::rules::StringDotFormatMixingAutomatic),
(Pyflakes, "541") => (RuleGroup::Unspecified, rules::pyflakes::rules::FStringMissingPlaceholders),
(Pyflakes, "601") => (RuleGroup::Unspecified, rules::pyflakes::rules::MultiValueRepeatedKeyLiteral),
(Pyflakes, "602") => (RuleGroup::Unspecified, rules::pyflakes::rules::MultiValueRepeatedKeyVariable),
(Pyflakes, "621") => (RuleGroup::Unspecified, rules::pyflakes::rules::ExpressionsInStarAssignment),
(Pyflakes, "622") => (RuleGroup::Unspecified, rules::pyflakes::rules::MultipleStarredExpressions),
(Pyflakes, "631") => (RuleGroup::Unspecified, rules::pyflakes::rules::AssertTuple),
(Pyflakes, "632") => (RuleGroup::Unspecified, rules::pyflakes::rules::IsLiteral),
(Pyflakes, "633") => (RuleGroup::Unspecified, rules::pyflakes::rules::InvalidPrintSyntax),
(Pyflakes, "634") => (RuleGroup::Unspecified, rules::pyflakes::rules::IfTuple),
(Pyflakes, "701") => (RuleGroup::Unspecified, rules::pyflakes::rules::BreakOutsideLoop),
(Pyflakes, "702") => (RuleGroup::Unspecified, rules::pyflakes::rules::ContinueOutsideLoop),
(Pyflakes, "704") => (RuleGroup::Unspecified, rules::pyflakes::rules::YieldOutsideFunction),
(Pyflakes, "706") => (RuleGroup::Unspecified, rules::pyflakes::rules::ReturnOutsideFunction),
(Pyflakes, "707") => (RuleGroup::Unspecified, rules::pyflakes::rules::DefaultExceptNotLast),
(Pyflakes, "722") => (RuleGroup::Unspecified, rules::pyflakes::rules::ForwardAnnotationSyntaxError),
(Pyflakes, "811") => (RuleGroup::Unspecified, rules::pyflakes::rules::RedefinedWhileUnused),
(Pyflakes, "821") => (RuleGroup::Unspecified, rules::pyflakes::rules::UndefinedName),
(Pyflakes, "822") => (RuleGroup::Unspecified, rules::pyflakes::rules::UndefinedExport),
(Pyflakes, "823") => (RuleGroup::Unspecified, rules::pyflakes::rules::UndefinedLocal),
(Pyflakes, "841") => (RuleGroup::Unspecified, rules::pyflakes::rules::UnusedVariable),
(Pyflakes, "842") => (RuleGroup::Unspecified, rules::pyflakes::rules::UnusedAnnotation),
(Pyflakes, "901") => (RuleGroup::Unspecified, rules::pyflakes::rules::RaiseNotImplemented),
// pylint
(Pylint, "C0105") => (RuleGroup::Unspecified, rules::pylint::rules::TypeNameIncorrectVariance),
(Pylint, "C0131") => (RuleGroup::Unspecified, rules::pylint::rules::TypeBivariance),
(Pylint, "C0132") => (RuleGroup::Unspecified, rules::pylint::rules::TypeParamNameMismatch),
(Pylint, "C0205") => (RuleGroup::Unspecified, rules::pylint::rules::SingleStringSlots),
(Pylint, "C0208") => (RuleGroup::Unspecified, rules::pylint::rules::IterationOverSet),
(Pylint, "C0414") => (RuleGroup::Unspecified, rules::pylint::rules::UselessImportAlias),
#[allow(deprecated)]
(Pylint, "C1901") => (RuleGroup::Nursery, rules::pylint::rules::CompareToEmptyString),
(Pylint, "C3002") => (RuleGroup::Unspecified, rules::pylint::rules::UnnecessaryDirectLambdaCall),
(Pylint, "E0100") => (RuleGroup::Unspecified, rules::pylint::rules::YieldInInit),
(Pylint, "E0101") => (RuleGroup::Unspecified, rules::pylint::rules::ReturnInInit),
(Pylint, "E0116") => (RuleGroup::Unspecified, rules::pylint::rules::ContinueInFinally),
(Pylint, "E0117") => (RuleGroup::Unspecified, rules::pylint::rules::NonlocalWithoutBinding),
(Pylint, "E0118") => (RuleGroup::Unspecified, rules::pylint::rules::LoadBeforeGlobalDeclaration),
(Pylint, "E0241") => (RuleGroup::Unspecified, rules::pylint::rules::DuplicateBases),
(Pylint, "E0302") => (RuleGroup::Unspecified, rules::pylint::rules::UnexpectedSpecialMethodSignature),
(Pylint, "E0307") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidStrReturnType),
(Pylint, "E0604") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidAllObject),
(Pylint, "E0605") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidAllFormat),
(Pylint, "E1142") => (RuleGroup::Unspecified, rules::pylint::rules::AwaitOutsideAsync),
(Pylint, "E1205") => (RuleGroup::Unspecified, rules::pylint::rules::LoggingTooManyArgs),
(Pylint, "E1206") => (RuleGroup::Unspecified, rules::pylint::rules::LoggingTooFewArgs),
(Pylint, "E1300") => (RuleGroup::Unspecified, rules::pylint::rules::BadStringFormatCharacter),
(Pylint, "E1307") => (RuleGroup::Unspecified, rules::pylint::rules::BadStringFormatType),
(Pylint, "E1310") => (RuleGroup::Unspecified, rules::pylint::rules::BadStrStripCall),
(Pylint, "E1507") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidEnvvarValue),
(Pylint, "E1700") => (RuleGroup::Unspecified, rules::pylint::rules::YieldFromInAsyncFunction),
(Pylint, "E2502") => (RuleGroup::Unspecified, rules::pylint::rules::BidirectionalUnicode),
(Pylint, "E2510") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidCharacterBackspace),
(Pylint, "E2512") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidCharacterSub),
(Pylint, "E2513") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidCharacterEsc),
(Pylint, "E2514") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidCharacterNul),
(Pylint, "E2515") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidCharacterZeroWidthSpace),
(Pylint, "R0124") => (RuleGroup::Unspecified, rules::pylint::rules::ComparisonWithItself),
(Pylint, "R0133") => (RuleGroup::Unspecified, rules::pylint::rules::ComparisonOfConstant),
(Pylint, "R0206") => (RuleGroup::Unspecified, rules::pylint::rules::PropertyWithParameters),
(Pylint, "R0402") => (RuleGroup::Unspecified, rules::pylint::rules::ManualFromImport),
(Pylint, "R0911") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyReturnStatements),
(Pylint, "R0912") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyBranches),
(Pylint, "R0913") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyArguments),
(Pylint, "R0915") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyStatements),
(Pylint, "R1701") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedIsinstanceCalls),
(Pylint, "R1711") => (RuleGroup::Unspecified, rules::pylint::rules::UselessReturn),
(Pylint, "R1714") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedEqualityComparison),
(Pylint, "R1722") => (RuleGroup::Unspecified, rules::pylint::rules::SysExitAlias),
(Pylint, "R2004") => (RuleGroup::Unspecified, rules::pylint::rules::MagicValueComparison),
(Pylint, "R5501") => (RuleGroup::Unspecified, rules::pylint::rules::CollapsibleElseIf),
#[allow(deprecated)]
(Pylint, "R6301") => (RuleGroup::Nursery, rules::pylint::rules::NoSelfUse),
(Pylint, "W0120") => (RuleGroup::Unspecified, rules::pylint::rules::UselessElseOnLoop),
(Pylint, "W0127") => (RuleGroup::Unspecified, rules::pylint::rules::SelfAssigningVariable),
(Pylint, "W0129") => (RuleGroup::Unspecified, rules::pylint::rules::AssertOnStringLiteral),
(Pylint, "W0131") => (RuleGroup::Unspecified, rules::pylint::rules::NamedExprWithoutContext),
(Pylint, "W0406") => (RuleGroup::Unspecified, rules::pylint::rules::ImportSelf),
(Pylint, "W0602") => (RuleGroup::Unspecified, rules::pylint::rules::GlobalVariableNotAssigned),
(Pylint, "W0603") => (RuleGroup::Unspecified, rules::pylint::rules::GlobalStatement),
(Pylint, "W0711") => (RuleGroup::Unspecified, rules::pylint::rules::BinaryOpException),
(Pylint, "W1508") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidEnvvarDefault),
(Pylint, "W1509") => (RuleGroup::Unspecified, rules::pylint::rules::SubprocessPopenPreexecFn),
(Pylint, "W1510") => (RuleGroup::Unspecified, rules::pylint::rules::SubprocessRunWithoutCheck),
#[allow(deprecated)]
(Pylint, "W1641") => (RuleGroup::Nursery, rules::pylint::rules::EqWithoutHash),
(Pylint, "R0904") => (RuleGroup::Preview, rules::pylint::rules::TooManyPublicMethods),
(Pylint, "W2901") => (RuleGroup::Unspecified, rules::pylint::rules::RedefinedLoopName),
#[allow(deprecated)]
(Pylint, "W3201") => (RuleGroup::Nursery, rules::pylint::rules::BadDunderMethodName),
(Pylint, "W3301") => (RuleGroup::Unspecified, rules::pylint::rules::NestedMinMax),
// flake8-async
(Flake8Async, "100") => (RuleGroup::Unspecified, rules::flake8_async::rules::BlockingHttpCallInAsyncFunction),
(Flake8Async, "101") => (RuleGroup::Unspecified, rules::flake8_async::rules::OpenSleepOrSubprocessInAsyncFunction),
(Flake8Async, "102") => (RuleGroup::Unspecified, rules::flake8_async::rules::BlockingOsCallInAsyncFunction),
// flake8-builtins
(Flake8Builtins, "001") => (RuleGroup::Unspecified, rules::flake8_builtins::rules::BuiltinVariableShadowing),
(Flake8Builtins, "002") => (RuleGroup::Unspecified, rules::flake8_builtins::rules::BuiltinArgumentShadowing),
(Flake8Builtins, "003") => (RuleGroup::Unspecified, rules::flake8_builtins::rules::BuiltinAttributeShadowing),
// flake8-bugbear
(Flake8Bugbear, "002") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnaryPrefixIncrementDecrement),
(Flake8Bugbear, "003") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::AssignmentToOsEnviron),
(Flake8Bugbear, "004") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnreliableCallableCheck),
(Flake8Bugbear, "005") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::StripWithMultiCharacters),
(Flake8Bugbear, "006") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::MutableArgumentDefault),
(Flake8Bugbear, "007") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnusedLoopControlVariable),
(Flake8Bugbear, "008") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::FunctionCallInDefaultArgument),
(Flake8Bugbear, "009") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::GetAttrWithConstant),
(Flake8Bugbear, "010") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::SetAttrWithConstant),
(Flake8Bugbear, "011") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::AssertFalse),
(Flake8Bugbear, "012") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::JumpStatementInFinally),
(Flake8Bugbear, "013") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::RedundantTupleInExceptionHandler),
(Flake8Bugbear, "014") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::DuplicateHandlerException),
(Flake8Bugbear, "015") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UselessComparison),
(Flake8Bugbear, "016") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::RaiseLiteral),
(Flake8Bugbear, "017") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::AssertRaisesException),
(Flake8Bugbear, "018") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UselessExpression),
(Flake8Bugbear, "019") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::CachedInstanceMethod),
(Flake8Bugbear, "020") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::LoopVariableOverridesIterator),
(Flake8Bugbear, "021") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::FStringDocstring),
(Flake8Bugbear, "022") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UselessContextlibSuppress),
(Flake8Bugbear, "023") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::FunctionUsesLoopVariable),
(Flake8Bugbear, "024") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::AbstractBaseClassWithoutAbstractMethod),
(Flake8Bugbear, "025") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::DuplicateTryBlockException),
(Flake8Bugbear, "026") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::StarArgUnpackingAfterKeywordArg),
(Flake8Bugbear, "027") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::EmptyMethodWithoutAbstractDecorator),
(Flake8Bugbear, "028") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::NoExplicitStacklevel),
(Flake8Bugbear, "029") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ExceptWithEmptyTuple),
(Flake8Bugbear, "030") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ExceptWithNonExceptionClasses),
(Flake8Bugbear, "031") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ReuseOfGroupbyGenerator),
(Flake8Bugbear, "032") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnintentionalTypeAnnotation),
(Flake8Bugbear, "033") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::DuplicateValue),
(Flake8Bugbear, "034") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ReSubPositionalArgs),
(Flake8Bugbear, "904") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::RaiseWithoutFromInsideExcept),
(Flake8Bugbear, "905") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ZipWithoutExplicitStrict),
// flake8-blind-except
(Flake8BlindExcept, "001") => (RuleGroup::Unspecified, rules::flake8_blind_except::rules::BlindExcept),
// flake8-comprehensions
(Flake8Comprehensions, "00") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryGeneratorList),
(Flake8Comprehensions, "01") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryGeneratorSet),
(Flake8Comprehensions, "02") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryGeneratorDict),
(Flake8Comprehensions, "03") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryListComprehensionSet),
(Flake8Comprehensions, "04") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryListComprehensionDict),
(Flake8Comprehensions, "05") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryLiteralSet),
(Flake8Comprehensions, "06") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryLiteralDict),
(Flake8Comprehensions, "08") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryCollectionCall),
(Flake8Comprehensions, "09") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryLiteralWithinTupleCall),
(Flake8Comprehensions, "10") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryLiteralWithinListCall),
(Flake8Comprehensions, "11") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryListCall),
(Flake8Comprehensions, "13") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryCallAroundSorted),
(Flake8Comprehensions, "14") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryDoubleCastOrProcess),
(Flake8Comprehensions, "15") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessarySubscriptReversal),
(Flake8Comprehensions, "16") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryComprehension),
(Flake8Comprehensions, "17") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryMap),
(Flake8Comprehensions, "18") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryLiteralWithinDictCall),
(Flake8Comprehensions, "19") => (RuleGroup::Unspecified, rules::flake8_comprehensions::rules::UnnecessaryComprehensionAnyAll),
// flake8-debugger
(Flake8Debugger, "0") => (RuleGroup::Unspecified, rules::flake8_debugger::rules::Debugger),
// mccabe
(McCabe, "1") => (RuleGroup::Unspecified, rules::mccabe::rules::ComplexStructure),
// flake8-tidy-imports
(Flake8TidyImports, "251") => (RuleGroup::Unspecified, rules::flake8_tidy_imports::rules::BannedApi),
(Flake8TidyImports, "252") => (RuleGroup::Unspecified, rules::flake8_tidy_imports::rules::RelativeImports),
(Flake8TidyImports, "253") => (RuleGroup::Unspecified, rules::flake8_tidy_imports::rules::BannedModuleLevelImports),
// flake8-return
(Flake8Return, "501") => (RuleGroup::Unspecified, rules::flake8_return::rules::UnnecessaryReturnNone),
(Flake8Return, "502") => (RuleGroup::Unspecified, rules::flake8_return::rules::ImplicitReturnValue),
(Flake8Return, "503") => (RuleGroup::Unspecified, rules::flake8_return::rules::ImplicitReturn),
(Flake8Return, "504") => (RuleGroup::Unspecified, rules::flake8_return::rules::UnnecessaryAssign),
(Flake8Return, "505") => (RuleGroup::Unspecified, rules::flake8_return::rules::SuperfluousElseReturn),
(Flake8Return, "506") => (RuleGroup::Unspecified, rules::flake8_return::rules::SuperfluousElseRaise),
(Flake8Return, "507") => (RuleGroup::Unspecified, rules::flake8_return::rules::SuperfluousElseContinue),
(Flake8Return, "508") => (RuleGroup::Unspecified, rules::flake8_return::rules::SuperfluousElseBreak),
// flake8-gettext
(Flake8GetText, "001") => (RuleGroup::Unspecified, rules::flake8_gettext::rules::FStringInGetTextFuncCall),
(Flake8GetText, "002") => (RuleGroup::Unspecified, rules::flake8_gettext::rules::FormatInGetTextFuncCall),
(Flake8GetText, "003") => (RuleGroup::Unspecified, rules::flake8_gettext::rules::PrintfInGetTextFuncCall),
// flake8-implicit-str-concat
(Flake8ImplicitStrConcat, "001") => (RuleGroup::Unspecified, rules::flake8_implicit_str_concat::rules::SingleLineImplicitStringConcatenation),
(Flake8ImplicitStrConcat, "002") => (RuleGroup::Unspecified, rules::flake8_implicit_str_concat::rules::MultiLineImplicitStringConcatenation),
(Flake8ImplicitStrConcat, "003") => (RuleGroup::Unspecified, rules::flake8_implicit_str_concat::rules::ExplicitStringConcatenation),
// flake8-print
(Flake8Print, "1") => (RuleGroup::Unspecified, rules::flake8_print::rules::Print),
(Flake8Print, "3") => (RuleGroup::Unspecified, rules::flake8_print::rules::PPrint),
// flake8-quotes
(Flake8Quotes, "000") => (RuleGroup::Unspecified, rules::flake8_quotes::rules::BadQuotesInlineString),
(Flake8Quotes, "001") => (RuleGroup::Unspecified, rules::flake8_quotes::rules::BadQuotesMultilineString),
(Flake8Quotes, "002") => (RuleGroup::Unspecified, rules::flake8_quotes::rules::BadQuotesDocstring),
(Flake8Quotes, "003") => (RuleGroup::Unspecified, rules::flake8_quotes::rules::AvoidableEscapedQuote),
// flake8-annotations
(Flake8Annotations, "001") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingTypeFunctionArgument),
(Flake8Annotations, "002") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingTypeArgs),
(Flake8Annotations, "003") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingTypeKwargs),
(Flake8Annotations, "101") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingTypeSelf),
(Flake8Annotations, "102") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingTypeCls),
(Flake8Annotations, "201") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingReturnTypeUndocumentedPublicFunction),
(Flake8Annotations, "202") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingReturnTypePrivateFunction),
(Flake8Annotations, "204") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingReturnTypeSpecialMethod),
(Flake8Annotations, "205") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingReturnTypeStaticMethod),
(Flake8Annotations, "206") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::MissingReturnTypeClassMethod),
(Flake8Annotations, "401") => (RuleGroup::Unspecified, rules::flake8_annotations::rules::AnyType),
// flake8-future-annotations
(Flake8FutureAnnotations, "100") => (RuleGroup::Unspecified, rules::flake8_future_annotations::rules::FutureRewritableTypeAnnotation),
(Flake8FutureAnnotations, "102") => (RuleGroup::Unspecified, rules::flake8_future_annotations::rules::FutureRequiredTypeAnnotation),
// flake8-2020
(Flake82020, "101") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersionSlice3),
(Flake82020, "102") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersion2),
(Flake82020, "103") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersionCmpStr3),
(Flake82020, "201") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersionInfo0Eq3),
(Flake82020, "202") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SixPY3),
(Flake82020, "203") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersionInfo1CmpInt),
(Flake82020, "204") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersionInfoMinorCmpInt),
(Flake82020, "301") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersion0),
(Flake82020, "302") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersionCmpStr10),
(Flake82020, "303") => (RuleGroup::Unspecified, rules::flake8_2020::rules::SysVersionSlice1),
// flake8-simplify
(Flake8Simplify, "101") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::DuplicateIsinstanceCall),
(Flake8Simplify, "102") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::CollapsibleIf),
(Flake8Simplify, "103") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::NeedlessBool),
(Flake8Simplify, "105") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::SuppressibleException),
(Flake8Simplify, "107") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::ReturnInTryExceptFinally),
(Flake8Simplify, "108") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfElseBlockInsteadOfIfExp),
(Flake8Simplify, "109") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::CompareWithTuple),
(Flake8Simplify, "110") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::ReimplementedBuiltin),
(Flake8Simplify, "112") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::UncapitalizedEnvironmentVariables),
(Flake8Simplify, "114") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfWithSameArms),
(Flake8Simplify, "115") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::OpenFileWithContextHandler),
(Flake8Simplify, "116") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfElseBlockInsteadOfDictLookup),
(Flake8Simplify, "117") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::MultipleWithStatements),
(Flake8Simplify, "118") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::InDictKeys),
(Flake8Simplify, "201") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::NegateEqualOp),
(Flake8Simplify, "202") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::NegateNotEqualOp),
(Flake8Simplify, "208") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::DoubleNegation),
(Flake8Simplify, "210") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfExprWithTrueFalse),
(Flake8Simplify, "211") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfExprWithFalseTrue),
(Flake8Simplify, "212") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfExprWithTwistedArms),
(Flake8Simplify, "220") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::ExprAndNotExpr),
(Flake8Simplify, "221") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::ExprOrNotExpr),
(Flake8Simplify, "222") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::ExprOrTrue),
(Flake8Simplify, "223") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::ExprAndFalse),
(Flake8Simplify, "300") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::YodaConditions),
(Flake8Simplify, "401") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfElseBlockInsteadOfDictGet),
(Flake8Simplify, "910") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::DictGetWithNoneDefault),
// flake8-copyright
#[allow(deprecated)]
(Flake8Copyright, "001") => (RuleGroup::Nursery, rules::flake8_copyright::rules::MissingCopyrightNotice),
// pyupgrade
(Pyupgrade, "001") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UselessMetaclassType),
(Pyupgrade, "003") => (RuleGroup::Unspecified, rules::pyupgrade::rules::TypeOfPrimitive),
(Pyupgrade, "004") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UselessObjectInheritance),
(Pyupgrade, "005") => (RuleGroup::Unspecified, rules::pyupgrade::rules::DeprecatedUnittestAlias),
(Pyupgrade, "006") => (RuleGroup::Unspecified, rules::pyupgrade::rules::NonPEP585Annotation),
(Pyupgrade, "007") => (RuleGroup::Unspecified, rules::pyupgrade::rules::NonPEP604Annotation),
(Pyupgrade, "008") => (RuleGroup::Unspecified, rules::pyupgrade::rules::SuperCallWithParameters),
(Pyupgrade, "009") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UTF8EncodingDeclaration),
(Pyupgrade, "010") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UnnecessaryFutureImport),
(Pyupgrade, "011") => (RuleGroup::Unspecified, rules::pyupgrade::rules::LRUCacheWithoutParameters),
(Pyupgrade, "012") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UnnecessaryEncodeUTF8),
(Pyupgrade, "013") => (RuleGroup::Unspecified, rules::pyupgrade::rules::ConvertTypedDictFunctionalToClass),
(Pyupgrade, "014") => (RuleGroup::Unspecified, rules::pyupgrade::rules::ConvertNamedTupleFunctionalToClass),
(Pyupgrade, "015") => (RuleGroup::Unspecified, rules::pyupgrade::rules::RedundantOpenModes),
(Pyupgrade, "017") => (RuleGroup::Unspecified, rules::pyupgrade::rules::DatetimeTimezoneUTC),
(Pyupgrade, "018") => (RuleGroup::Unspecified, rules::pyupgrade::rules::NativeLiterals),
(Pyupgrade, "019") => (RuleGroup::Unspecified, rules::pyupgrade::rules::TypingTextStrAlias),
(Pyupgrade, "020") => (RuleGroup::Unspecified, rules::pyupgrade::rules::OpenAlias),
(Pyupgrade, "021") => (RuleGroup::Unspecified, rules::pyupgrade::rules::ReplaceUniversalNewlines),
(Pyupgrade, "022") => (RuleGroup::Unspecified, rules::pyupgrade::rules::ReplaceStdoutStderr),
(Pyupgrade, "023") => (RuleGroup::Unspecified, rules::pyupgrade::rules::DeprecatedCElementTree),
(Pyupgrade, "024") => (RuleGroup::Unspecified, rules::pyupgrade::rules::OSErrorAlias),
(Pyupgrade, "025") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UnicodeKindPrefix),
(Pyupgrade, "026") => (RuleGroup::Unspecified, rules::pyupgrade::rules::DeprecatedMockImport),
(Pyupgrade, "027") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UnpackedListComprehension),
(Pyupgrade, "028") => (RuleGroup::Unspecified, rules::pyupgrade::rules::YieldInForLoop),
(Pyupgrade, "029") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UnnecessaryBuiltinImport),
(Pyupgrade, "030") => (RuleGroup::Unspecified, rules::pyupgrade::rules::FormatLiterals),
(Pyupgrade, "031") => (RuleGroup::Unspecified, rules::pyupgrade::rules::PrintfStringFormatting),
(Pyupgrade, "032") => (RuleGroup::Unspecified, rules::pyupgrade::rules::FString),
(Pyupgrade, "033") => (RuleGroup::Unspecified, rules::pyupgrade::rules::LRUCacheWithMaxsizeNone),
(Pyupgrade, "034") => (RuleGroup::Unspecified, rules::pyupgrade::rules::ExtraneousParentheses),
(Pyupgrade, "035") => (RuleGroup::Unspecified, rules::pyupgrade::rules::DeprecatedImport),
(Pyupgrade, "036") => (RuleGroup::Unspecified, rules::pyupgrade::rules::OutdatedVersionBlock),
(Pyupgrade, "037") => (RuleGroup::Unspecified, rules::pyupgrade::rules::QuotedAnnotation),
(Pyupgrade, "038") => (RuleGroup::Unspecified, rules::pyupgrade::rules::NonPEP604Isinstance),
(Pyupgrade, "039") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UnnecessaryClassParentheses),
(Pyupgrade, "040") => (RuleGroup::Unspecified, rules::pyupgrade::rules::NonPEP695TypeAlias),
// pydocstyle
(Pydocstyle, "100") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedPublicModule),
(Pydocstyle, "101") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedPublicClass),
(Pydocstyle, "102") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedPublicMethod),
(Pydocstyle, "103") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedPublicFunction),
(Pydocstyle, "104") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedPublicPackage),
(Pydocstyle, "105") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedMagicMethod),
(Pydocstyle, "106") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedPublicNestedClass),
(Pydocstyle, "107") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedPublicInit),
(Pydocstyle, "200") => (RuleGroup::Unspecified, rules::pydocstyle::rules::FitsOnOneLine),
(Pydocstyle, "201") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NoBlankLineBeforeFunction),
(Pydocstyle, "202") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NoBlankLineAfterFunction),
(Pydocstyle, "203") => (RuleGroup::Unspecified, rules::pydocstyle::rules::OneBlankLineBeforeClass),
(Pydocstyle, "204") => (RuleGroup::Unspecified, rules::pydocstyle::rules::OneBlankLineAfterClass),
(Pydocstyle, "205") => (RuleGroup::Unspecified, rules::pydocstyle::rules::BlankLineAfterSummary),
(Pydocstyle, "206") => (RuleGroup::Unspecified, rules::pydocstyle::rules::IndentWithSpaces),
(Pydocstyle, "207") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UnderIndentation),
(Pydocstyle, "208") => (RuleGroup::Unspecified, rules::pydocstyle::rules::OverIndentation),
(Pydocstyle, "209") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NewLineAfterLastParagraph),
(Pydocstyle, "210") => (RuleGroup::Unspecified, rules::pydocstyle::rules::SurroundingWhitespace),
(Pydocstyle, "211") => (RuleGroup::Unspecified, rules::pydocstyle::rules::BlankLineBeforeClass),
(Pydocstyle, "212") => (RuleGroup::Unspecified, rules::pydocstyle::rules::MultiLineSummaryFirstLine),
(Pydocstyle, "213") => (RuleGroup::Unspecified, rules::pydocstyle::rules::MultiLineSummarySecondLine),
(Pydocstyle, "214") => (RuleGroup::Unspecified, rules::pydocstyle::rules::SectionNotOverIndented),
(Pydocstyle, "215") => (RuleGroup::Unspecified, rules::pydocstyle::rules::SectionUnderlineNotOverIndented),
(Pydocstyle, "300") => (RuleGroup::Unspecified, rules::pydocstyle::rules::TripleSingleQuotes),
(Pydocstyle, "301") => (RuleGroup::Unspecified, rules::pydocstyle::rules::EscapeSequenceInDocstring),
(Pydocstyle, "400") => (RuleGroup::Unspecified, rules::pydocstyle::rules::EndsInPeriod),
(Pydocstyle, "401") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NonImperativeMood),
(Pydocstyle, "402") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NoSignature),
(Pydocstyle, "403") => (RuleGroup::Unspecified, rules::pydocstyle::rules::FirstLineCapitalized),
(Pydocstyle, "404") => (RuleGroup::Unspecified, rules::pydocstyle::rules::DocstringStartsWithThis),
(Pydocstyle, "405") => (RuleGroup::Unspecified, rules::pydocstyle::rules::CapitalizeSectionName),
(Pydocstyle, "406") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NewLineAfterSectionName),
(Pydocstyle, "407") => (RuleGroup::Unspecified, rules::pydocstyle::rules::DashedUnderlineAfterSection),
(Pydocstyle, "408") => (RuleGroup::Unspecified, rules::pydocstyle::rules::SectionUnderlineAfterName),
(Pydocstyle, "409") => (RuleGroup::Unspecified, rules::pydocstyle::rules::SectionUnderlineMatchesSectionLength),
(Pydocstyle, "410") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NoBlankLineAfterSection),
(Pydocstyle, "411") => (RuleGroup::Unspecified, rules::pydocstyle::rules::NoBlankLineBeforeSection),
(Pydocstyle, "412") => (RuleGroup::Unspecified, rules::pydocstyle::rules::BlankLinesBetweenHeaderAndContent),
(Pydocstyle, "413") => (RuleGroup::Unspecified, rules::pydocstyle::rules::BlankLineAfterLastSection),
(Pydocstyle, "414") => (RuleGroup::Unspecified, rules::pydocstyle::rules::EmptyDocstringSection),
(Pydocstyle, "415") => (RuleGroup::Unspecified, rules::pydocstyle::rules::EndsInPunctuation),
(Pydocstyle, "416") => (RuleGroup::Unspecified, rules::pydocstyle::rules::SectionNameEndsInColon),
(Pydocstyle, "417") => (RuleGroup::Unspecified, rules::pydocstyle::rules::UndocumentedParam),
(Pydocstyle, "418") => (RuleGroup::Unspecified, rules::pydocstyle::rules::OverloadWithDocstring),
(Pydocstyle, "419") => (RuleGroup::Unspecified, rules::pydocstyle::rules::EmptyDocstring),
// pep8-naming
(PEP8Naming, "801") => (RuleGroup::Unspecified, rules::pep8_naming::rules::InvalidClassName),
(PEP8Naming, "802") => (RuleGroup::Unspecified, rules::pep8_naming::rules::InvalidFunctionName),
(PEP8Naming, "803") => (RuleGroup::Unspecified, rules::pep8_naming::rules::InvalidArgumentName),
(PEP8Naming, "804") => (RuleGroup::Unspecified, rules::pep8_naming::rules::InvalidFirstArgumentNameForClassMethod),
(PEP8Naming, "805") => (RuleGroup::Unspecified, rules::pep8_naming::rules::InvalidFirstArgumentNameForMethod),
(PEP8Naming, "806") => (RuleGroup::Unspecified, rules::pep8_naming::rules::NonLowercaseVariableInFunction),
(PEP8Naming, "807") => (RuleGroup::Unspecified, rules::pep8_naming::rules::DunderFunctionName),
(PEP8Naming, "811") => (RuleGroup::Unspecified, rules::pep8_naming::rules::ConstantImportedAsNonConstant),
(PEP8Naming, "812") => (RuleGroup::Unspecified, rules::pep8_naming::rules::LowercaseImportedAsNonLowercase),
(PEP8Naming, "813") => (RuleGroup::Unspecified, rules::pep8_naming::rules::CamelcaseImportedAsLowercase),
(PEP8Naming, "814") => (RuleGroup::Unspecified, rules::pep8_naming::rules::CamelcaseImportedAsConstant),
(PEP8Naming, "815") => (RuleGroup::Unspecified, rules::pep8_naming::rules::MixedCaseVariableInClassScope),
(PEP8Naming, "816") => (RuleGroup::Unspecified, rules::pep8_naming::rules::MixedCaseVariableInGlobalScope),
(PEP8Naming, "817") => (RuleGroup::Unspecified, rules::pep8_naming::rules::CamelcaseImportedAsAcronym),
(PEP8Naming, "818") => (RuleGroup::Unspecified, rules::pep8_naming::rules::ErrorSuffixOnExceptionName),
(PEP8Naming, "999") => (RuleGroup::Unspecified, rules::pep8_naming::rules::InvalidModuleName),
// isort
(Isort, "001") => (RuleGroup::Unspecified, rules::isort::rules::UnsortedImports),
(Isort, "002") => (RuleGroup::Unspecified, rules::isort::rules::MissingRequiredImport),
// eradicate
(Eradicate, "001") => (RuleGroup::Unspecified, rules::eradicate::rules::CommentedOutCode),
// flake8-bandit
(Flake8Bandit, "101") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::Assert),
(Flake8Bandit, "102") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::ExecBuiltin),
(Flake8Bandit, "103") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::BadFilePermissions),
(Flake8Bandit, "104") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::HardcodedBindAllInterfaces),
(Flake8Bandit, "105") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::HardcodedPasswordString),
(Flake8Bandit, "106") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::HardcodedPasswordFuncArg),
(Flake8Bandit, "107") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::HardcodedPasswordDefault),
(Flake8Bandit, "108") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::HardcodedTempFile),
(Flake8Bandit, "110") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::TryExceptPass),
(Flake8Bandit, "112") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::TryExceptContinue),
(Flake8Bandit, "113") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::RequestWithoutTimeout),
(Flake8Bandit, "201") => (RuleGroup::Preview, rules::flake8_bandit::rules::FlaskDebugTrue),
(Flake8Bandit, "301") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousPickleUsage),
(Flake8Bandit, "302") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousMarshalUsage),
(Flake8Bandit, "303") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousInsecureHashUsage),
(Flake8Bandit, "304") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousInsecureCipherUsage),
(Flake8Bandit, "305") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousInsecureCipherModeUsage),
(Flake8Bandit, "306") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousMktempUsage),
(Flake8Bandit, "307") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousEvalUsage),
(Flake8Bandit, "308") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousMarkSafeUsage),
(Flake8Bandit, "310") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousURLOpenUsage),
(Flake8Bandit, "311") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousNonCryptographicRandomUsage),
(Flake8Bandit, "312") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousTelnetUsage),
(Flake8Bandit, "313") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLCElementTreeUsage),
(Flake8Bandit, "314") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLElementTreeUsage),
(Flake8Bandit, "315") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLExpatReaderUsage),
(Flake8Bandit, "316") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLExpatBuilderUsage),
(Flake8Bandit, "317") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLSaxUsage),
(Flake8Bandit, "318") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLMiniDOMUsage),
(Flake8Bandit, "319") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLPullDOMUsage),
(Flake8Bandit, "320") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousXMLETreeUsage),
(Flake8Bandit, "321") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousFTPLibUsage),
(Flake8Bandit, "323") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SuspiciousUnverifiedContextUsage),
(Flake8Bandit, "324") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::HashlibInsecureHashFunction),
(Flake8Bandit, "501") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::RequestWithNoCertValidation),
(Flake8Bandit, "506") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::UnsafeYAMLLoad),
(Flake8Bandit, "507") => (RuleGroup::Preview, rules::flake8_bandit::rules::SSHNoHostKeyVerification),
(Flake8Bandit, "508") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SnmpInsecureVersion),
(Flake8Bandit, "509") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SnmpWeakCryptography),
(Flake8Bandit, "601") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::ParamikoCall),
(Flake8Bandit, "602") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SubprocessPopenWithShellEqualsTrue),
(Flake8Bandit, "603") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::SubprocessWithoutShellEqualsTrue),
(Flake8Bandit, "604") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::CallWithShellEqualsTrue),
(Flake8Bandit, "605") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::StartProcessWithAShell),
(Flake8Bandit, "606") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::StartProcessWithNoShell),
(Flake8Bandit, "607") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::StartProcessWithPartialPath),
(Flake8Bandit, "608") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::HardcodedSQLExpression),
(Flake8Bandit, "609") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::UnixCommandWildcardInjection),
(Flake8Bandit, "612") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::LoggingConfigInsecureListen),
(Flake8Bandit, "701") => (RuleGroup::Unspecified, rules::flake8_bandit::rules::Jinja2AutoescapeFalse),
// flake8-boolean-trap
(Flake8BooleanTrap, "001") => (RuleGroup::Unspecified, rules::flake8_boolean_trap::rules::BooleanTypeHintPositionalArgument),
(Flake8BooleanTrap, "002") => (RuleGroup::Unspecified, rules::flake8_boolean_trap::rules::BooleanDefaultValuePositionalArgument),
(Flake8BooleanTrap, "003") => (RuleGroup::Unspecified, rules::flake8_boolean_trap::rules::BooleanPositionalValueInCall),
// flake8-unused-arguments
(Flake8UnusedArguments, "001") => (RuleGroup::Unspecified, rules::flake8_unused_arguments::rules::UnusedFunctionArgument),
(Flake8UnusedArguments, "002") => (RuleGroup::Unspecified, rules::flake8_unused_arguments::rules::UnusedMethodArgument),
(Flake8UnusedArguments, "003") => (RuleGroup::Unspecified, rules::flake8_unused_arguments::rules::UnusedClassMethodArgument),
(Flake8UnusedArguments, "004") => (RuleGroup::Unspecified, rules::flake8_unused_arguments::rules::UnusedStaticMethodArgument),
(Flake8UnusedArguments, "005") => (RuleGroup::Unspecified, rules::flake8_unused_arguments::rules::UnusedLambdaArgument),
// flake8-import-conventions
(Flake8ImportConventions, "001") => (RuleGroup::Unspecified, rules::flake8_import_conventions::rules::UnconventionalImportAlias),
(Flake8ImportConventions, "002") => (RuleGroup::Unspecified, rules::flake8_import_conventions::rules::BannedImportAlias),
(Flake8ImportConventions, "003") => (RuleGroup::Unspecified, rules::flake8_import_conventions::rules::BannedImportFrom),
// flake8-datetimez
(Flake8Datetimez, "001") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDatetimeWithoutTzinfo),
(Flake8Datetimez, "002") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDatetimeToday),
(Flake8Datetimez, "003") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDatetimeUtcnow),
(Flake8Datetimez, "004") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDatetimeUtcfromtimestamp),
(Flake8Datetimez, "005") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDatetimeNowWithoutTzinfo),
(Flake8Datetimez, "006") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDatetimeFromtimestamp),
(Flake8Datetimez, "007") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDatetimeStrptimeWithoutZone),
(Flake8Datetimez, "011") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDateToday),
(Flake8Datetimez, "012") => (RuleGroup::Unspecified, rules::flake8_datetimez::rules::CallDateFromtimestamp),
// pygrep-hooks
(PygrepHooks, "001") => (RuleGroup::Unspecified, rules::pygrep_hooks::rules::Eval),
(PygrepHooks, "002") => (RuleGroup::Unspecified, rules::pygrep_hooks::rules::DeprecatedLogWarn),
(PygrepHooks, "003") => (RuleGroup::Unspecified, rules::pygrep_hooks::rules::BlanketTypeIgnore),
(PygrepHooks, "004") => (RuleGroup::Unspecified, rules::pygrep_hooks::rules::BlanketNOQA),
(PygrepHooks, "005") => (RuleGroup::Unspecified, rules::pygrep_hooks::rules::InvalidMockAccess),
// pandas-vet
(PandasVet, "002") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfInplaceArgument),
(PandasVet, "003") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotIsNull),
(PandasVet, "004") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotNotNull),
(PandasVet, "007") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotIx),
(PandasVet, "008") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotAt),
(PandasVet, "009") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotIat),
(PandasVet, "010") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotPivotOrUnstack),
(PandasVet, "011") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotValues),
(PandasVet, "012") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotReadTable),
(PandasVet, "013") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotStack),
(PandasVet, "015") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfPdMerge),
(PandasVet, "101") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasNuniqueConstantSeriesCheck),
(PandasVet, "901") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasDfVariableName),
// flake8-errmsg
(Flake8ErrMsg, "101") => (RuleGroup::Unspecified, rules::flake8_errmsg::rules::RawStringInException),
(Flake8ErrMsg, "102") => (RuleGroup::Unspecified, rules::flake8_errmsg::rules::FStringInException),
(Flake8ErrMsg, "103") => (RuleGroup::Unspecified, rules::flake8_errmsg::rules::DotFormatInException),
// flake8-pyi
(Flake8Pyi, "001") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnprefixedTypeParam),
(Flake8Pyi, "002") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::ComplexIfStatementInStub),
(Flake8Pyi, "003") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnrecognizedVersionInfoCheck),
(Flake8Pyi, "004") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::PatchVersionComparison),
(Flake8Pyi, "005") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::WrongTupleLengthVersionComparison),
(Flake8Pyi, "006") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::BadVersionInfoComparison),
(Flake8Pyi, "007") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnrecognizedPlatformCheck),
(Flake8Pyi, "008") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnrecognizedPlatformName),
(Flake8Pyi, "009") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::PassStatementStubBody),
(Flake8Pyi, "010") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::NonEmptyStubBody),
(Flake8Pyi, "011") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::TypedArgumentDefaultInStub),
(Flake8Pyi, "012") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::PassInClassBody),
(Flake8Pyi, "013") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::EllipsisInNonEmptyClassBody),
(Flake8Pyi, "014") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::ArgumentDefaultInStub),
(Flake8Pyi, "015") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::AssignmentDefaultInStub),
(Flake8Pyi, "016") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::DuplicateUnionMember),
(Flake8Pyi, "017") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::ComplexAssignmentInStub),
(Flake8Pyi, "018") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnusedPrivateTypeVar),
(Flake8Pyi, "019") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::CustomTypeVarReturnType),
(Flake8Pyi, "020") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::QuotedAnnotationInStub),
(Flake8Pyi, "021") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::DocstringInStub),
(Flake8Pyi, "024") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::CollectionsNamedTuple),
(Flake8Pyi, "025") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnaliasedCollectionsAbcSetImport),
(Flake8Pyi, "026") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::TypeAliasWithoutAnnotation),
(Flake8Pyi, "029") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::StrOrReprDefinedInStub),
(Flake8Pyi, "030") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnnecessaryLiteralUnion),
(Flake8Pyi, "032") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::AnyEqNeAnnotation),
(Flake8Pyi, "033") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::TypeCommentInStub),
(Flake8Pyi, "034") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::NonSelfReturnType),
(Flake8Pyi, "035") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnassignedSpecialVariableInStub),
(Flake8Pyi, "036") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::BadExitAnnotation),
(Flake8Pyi, "041") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::RedundantNumericUnion),
(Flake8Pyi, "042") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::SnakeCaseTypeAlias),
(Flake8Pyi, "043") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::TSuffixedTypeAlias),
(Flake8Pyi, "044") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::FutureAnnotationsInStub),
(Flake8Pyi, "045") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::IterMethodReturnIterable),
(Flake8Pyi, "046") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnusedPrivateProtocol),
(Flake8Pyi, "047") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnusedPrivateTypeAlias),
(Flake8Pyi, "048") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::StubBodyMultipleStatements),
(Flake8Pyi, "049") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnusedPrivateTypedDict),
(Flake8Pyi, "050") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::NoReturnArgumentAnnotationInStub),
(Flake8Pyi, "051") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::RedundantLiteralUnion),
(Flake8Pyi, "052") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnannotatedAssignmentInStub),
(Flake8Pyi, "054") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::NumericLiteralTooLong),
(Flake8Pyi, "053") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::StringOrBytesTooLong),
(Flake8Pyi, "055") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnnecessaryTypeUnion),
(Flake8Pyi, "056") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnsupportedMethodCallOnAll),
// flake8-pytest-style
(Flake8PytestStyle, "001") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestFixtureIncorrectParenthesesStyle),
(Flake8PytestStyle, "002") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestFixturePositionalArgs),
(Flake8PytestStyle, "003") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestExtraneousScopeFunction),
(Flake8PytestStyle, "004") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestMissingFixtureNameUnderscore),
(Flake8PytestStyle, "005") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestIncorrectFixtureNameUnderscore),
(Flake8PytestStyle, "006") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestParametrizeNamesWrongType),
(Flake8PytestStyle, "007") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestParametrizeValuesWrongType),
(Flake8PytestStyle, "008") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestPatchWithLambda),
(Flake8PytestStyle, "009") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestUnittestAssertion),
(Flake8PytestStyle, "010") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestRaisesWithoutException),
(Flake8PytestStyle, "011") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestRaisesTooBroad),
(Flake8PytestStyle, "012") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestRaisesWithMultipleStatements),
(Flake8PytestStyle, "013") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestIncorrectPytestImport),
(Flake8PytestStyle, "014") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestDuplicateParametrizeTestCases),
(Flake8PytestStyle, "015") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestAssertAlwaysFalse),
(Flake8PytestStyle, "016") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestFailWithoutMessage),
(Flake8PytestStyle, "017") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestAssertInExcept),
(Flake8PytestStyle, "018") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestCompositeAssertion),
(Flake8PytestStyle, "019") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestFixtureParamWithoutValue),
(Flake8PytestStyle, "020") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestDeprecatedYieldFixture),
(Flake8PytestStyle, "021") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestFixtureFinalizerCallback),
(Flake8PytestStyle, "022") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestUselessYieldFixture),
(Flake8PytestStyle, "023") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestIncorrectMarkParenthesesStyle),
(Flake8PytestStyle, "024") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestUnnecessaryAsyncioMarkOnFixture),
(Flake8PytestStyle, "025") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestErroneousUseFixturesOnFixture),
(Flake8PytestStyle, "026") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestUseFixturesWithoutParameters),
(Flake8PytestStyle, "027") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestUnittestRaisesAssertion),
// flake8-pie
(Flake8Pie, "790") => (RuleGroup::Unspecified, rules::flake8_pie::rules::UnnecessaryPass),
(Flake8Pie, "794") => (RuleGroup::Unspecified, rules::flake8_pie::rules::DuplicateClassFieldDefinition),
(Flake8Pie, "796") => (RuleGroup::Unspecified, rules::flake8_pie::rules::NonUniqueEnums),
(Flake8Pie, "800") => (RuleGroup::Unspecified, rules::flake8_pie::rules::UnnecessarySpread),
(Flake8Pie, "804") => (RuleGroup::Unspecified, rules::flake8_pie::rules::UnnecessaryDictKwargs),
(Flake8Pie, "807") => (RuleGroup::Unspecified, rules::flake8_pie::rules::ReimplementedListBuiltin),
(Flake8Pie, "808") => (RuleGroup::Unspecified, rules::flake8_pie::rules::UnnecessaryRangeStart),
(Flake8Pie, "810") => (RuleGroup::Unspecified, rules::flake8_pie::rules::MultipleStartsEndsWith),
// flake8-commas
(Flake8Commas, "812") => (RuleGroup::Unspecified, rules::flake8_commas::rules::MissingTrailingComma),
(Flake8Commas, "818") => (RuleGroup::Unspecified, rules::flake8_commas::rules::TrailingCommaOnBareTuple),
(Flake8Commas, "819") => (RuleGroup::Unspecified, rules::flake8_commas::rules::ProhibitedTrailingComma),
// flake8-no-pep420
(Flake8NoPep420, "001") => (RuleGroup::Unspecified, rules::flake8_no_pep420::rules::ImplicitNamespacePackage),
// flake8-executable
(Flake8Executable, "001") => (RuleGroup::Unspecified, rules::flake8_executable::rules::ShebangNotExecutable),
(Flake8Executable, "002") => (RuleGroup::Unspecified, rules::flake8_executable::rules::ShebangMissingExecutableFile),
(Flake8Executable, "003") => (RuleGroup::Unspecified, rules::flake8_executable::rules::ShebangMissingPython),
(Flake8Executable, "004") => (RuleGroup::Unspecified, rules::flake8_executable::rules::ShebangLeadingWhitespace),
(Flake8Executable, "005") => (RuleGroup::Unspecified, rules::flake8_executable::rules::ShebangNotFirstLine),
// flake8-type-checking
(Flake8TypeChecking, "001") => (RuleGroup::Unspecified, rules::flake8_type_checking::rules::TypingOnlyFirstPartyImport),
(Flake8TypeChecking, "002") => (RuleGroup::Unspecified, rules::flake8_type_checking::rules::TypingOnlyThirdPartyImport),
(Flake8TypeChecking, "003") => (RuleGroup::Unspecified, rules::flake8_type_checking::rules::TypingOnlyStandardLibraryImport),
(Flake8TypeChecking, "004") => (RuleGroup::Unspecified, rules::flake8_type_checking::rules::RuntimeImportInTypeCheckingBlock),
(Flake8TypeChecking, "005") => (RuleGroup::Unspecified, rules::flake8_type_checking::rules::EmptyTypeCheckingBlock),
// tryceratops
(Tryceratops, "002") => (RuleGroup::Unspecified, rules::tryceratops::rules::RaiseVanillaClass),
(Tryceratops, "003") => (RuleGroup::Unspecified, rules::tryceratops::rules::RaiseVanillaArgs),
(Tryceratops, "004") => (RuleGroup::Unspecified, rules::tryceratops::rules::TypeCheckWithoutTypeError),
(Tryceratops, "200") => (RuleGroup::Unspecified, rules::tryceratops::rules::ReraiseNoCause),
(Tryceratops, "201") => (RuleGroup::Unspecified, rules::tryceratops::rules::VerboseRaise),
(Tryceratops, "300") => (RuleGroup::Unspecified, rules::tryceratops::rules::TryConsiderElse),
(Tryceratops, "301") => (RuleGroup::Unspecified, rules::tryceratops::rules::RaiseWithinTry),
(Tryceratops, "302") => (RuleGroup::Unspecified, rules::tryceratops::rules::UselessTryExcept),
(Tryceratops, "400") => (RuleGroup::Unspecified, rules::tryceratops::rules::ErrorInsteadOfException),
(Tryceratops, "401") => (RuleGroup::Unspecified, rules::tryceratops::rules::VerboseLogMessage),
// flake8-use-pathlib
(Flake8UsePathlib, "100") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathAbspath),
(Flake8UsePathlib, "101") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsChmod),
(Flake8UsePathlib, "102") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsMkdir),
(Flake8UsePathlib, "103") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsMakedirs),
(Flake8UsePathlib, "104") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsRename),
(Flake8UsePathlib, "105") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsReplace),
(Flake8UsePathlib, "106") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsRmdir),
(Flake8UsePathlib, "107") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsRemove),
(Flake8UsePathlib, "108") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsUnlink),
(Flake8UsePathlib, "109") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsGetcwd),
(Flake8UsePathlib, "110") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathExists),
(Flake8UsePathlib, "111") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathExpanduser),
(Flake8UsePathlib, "112") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathIsdir),
(Flake8UsePathlib, "113") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathIsfile),
(Flake8UsePathlib, "114") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathIslink),
(Flake8UsePathlib, "115") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsReadlink),
(Flake8UsePathlib, "116") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsStat),
(Flake8UsePathlib, "117") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathIsabs),
(Flake8UsePathlib, "118") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathJoin),
(Flake8UsePathlib, "119") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathBasename),
(Flake8UsePathlib, "120") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathDirname),
(Flake8UsePathlib, "121") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathSamefile),
(Flake8UsePathlib, "122") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathSplitext),
(Flake8UsePathlib, "123") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::BuiltinOpen),
(Flake8UsePathlib, "124") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::PyPath),
(Flake8UsePathlib, "201") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::PathConstructorCurrentDirectory),
(Flake8UsePathlib, "202") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetsize),
(Flake8UsePathlib, "202") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetsize),
(Flake8UsePathlib, "203") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetatime),
(Flake8UsePathlib, "204") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetmtime),
(Flake8UsePathlib, "205") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetctime),
(Flake8UsePathlib, "206") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsSepSplit),
(Flake8UsePathlib, "207") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::Glob),
// flake8-logging-format
(Flake8LoggingFormat, "001") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingStringFormat),
(Flake8LoggingFormat, "002") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingPercentFormat),
(Flake8LoggingFormat, "003") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingStringConcat),
(Flake8LoggingFormat, "004") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingFString),
(Flake8LoggingFormat, "010") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingWarn),
(Flake8LoggingFormat, "101") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingExtraAttrClash),
(Flake8LoggingFormat, "201") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingExcInfo),
(Flake8LoggingFormat, "202") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingRedundantExcInfo),
// flake8-raise
(Flake8Raise, "102") => (RuleGroup::Unspecified, rules::flake8_raise::rules::UnnecessaryParenOnRaiseException),
// flake8-self
(Flake8Self, "001") => (RuleGroup::Unspecified, rules::flake8_self::rules::PrivateMemberAccess),
// numpy
(Numpy, "001") => (RuleGroup::Unspecified, rules::numpy::rules::NumpyDeprecatedTypeAlias),
(Numpy, "002") => (RuleGroup::Unspecified, rules::numpy::rules::NumpyLegacyRandom),
(Numpy, "003") => (RuleGroup::Unspecified, rules::numpy::rules::NumpyDeprecatedFunction),
// ruff
(Ruff, "001") => (RuleGroup::Unspecified, rules::ruff::rules::AmbiguousUnicodeCharacterString),
(Ruff, "002") => (RuleGroup::Unspecified, rules::ruff::rules::AmbiguousUnicodeCharacterDocstring),
(Ruff, "003") => (RuleGroup::Unspecified, rules::ruff::rules::AmbiguousUnicodeCharacterComment),
(Ruff, "005") => (RuleGroup::Unspecified, rules::ruff::rules::CollectionLiteralConcatenation),
(Ruff, "006") => (RuleGroup::Unspecified, rules::ruff::rules::AsyncioDanglingTask),
(Ruff, "007") => (RuleGroup::Unspecified, rules::ruff::rules::PairwiseOverZipped),
(Ruff, "008") => (RuleGroup::Unspecified, rules::ruff::rules::MutableDataclassDefault),
(Ruff, "009") => (RuleGroup::Unspecified, rules::ruff::rules::FunctionCallInDataclassDefaultArgument),
(Ruff, "010") => (RuleGroup::Unspecified, rules::ruff::rules::ExplicitFStringTypeConversion),
(Ruff, "011") => (RuleGroup::Unspecified, rules::ruff::rules::StaticKeyDictComprehension),
(Ruff, "012") => (RuleGroup::Unspecified, rules::ruff::rules::MutableClassDefault),
(Ruff, "013") => (RuleGroup::Unspecified, rules::ruff::rules::ImplicitOptional),
#[cfg(feature = "unreachable-code")] // When removing this feature gate, also update rules_selector.rs
#[allow(deprecated)]
(Ruff, "014") => (RuleGroup::Nursery, rules::ruff::rules::UnreachableCode),
(Ruff, "015") => (RuleGroup::Unspecified, rules::ruff::rules::UnnecessaryIterableAllocationForFirstElement),
(Ruff, "016") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidIndexType),
#[allow(deprecated)]
(Ruff, "017") => (RuleGroup::Nursery, rules::ruff::rules::QuadraticListSummation),
(Ruff, "100") => (RuleGroup::Unspecified, rules::ruff::rules::UnusedNOQA),
(Ruff, "200") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidPyprojectToml),
// flake8-django
(Flake8Django, "001") => (RuleGroup::Unspecified, rules::flake8_django::rules::DjangoNullableModelStringField),
(Flake8Django, "003") => (RuleGroup::Unspecified, rules::flake8_django::rules::DjangoLocalsInRenderFunction),
(Flake8Django, "006") => (RuleGroup::Unspecified, rules::flake8_django::rules::DjangoExcludeWithModelForm),
(Flake8Django, "007") => (RuleGroup::Unspecified, rules::flake8_django::rules::DjangoAllWithModelForm),
(Flake8Django, "008") => (RuleGroup::Unspecified, rules::flake8_django::rules::DjangoModelWithoutDunderStr),
(Flake8Django, "012") => (RuleGroup::Unspecified, rules::flake8_django::rules::DjangoUnorderedBodyContentInModel),
(Flake8Django, "013") => (RuleGroup::Unspecified, rules::flake8_django::rules::DjangoNonLeadingReceiverDecorator),
// flynt
// Reserved: (Flynt, "001") => (RuleGroup::Unspecified, Rule: :StringConcatenationToFString),
(Flynt, "002") => (RuleGroup::Unspecified, rules::flynt::rules::StaticJoinToFString),
// flake8-todos
(Flake8Todos, "001") => (RuleGroup::Unspecified, rules::flake8_todos::rules::InvalidTodoTag),
(Flake8Todos, "002") => (RuleGroup::Unspecified, rules::flake8_todos::rules::MissingTodoAuthor),
(Flake8Todos, "003") => (RuleGroup::Unspecified, rules::flake8_todos::rules::MissingTodoLink),
(Flake8Todos, "004") => (RuleGroup::Unspecified, rules::flake8_todos::rules::MissingTodoColon),
(Flake8Todos, "005") => (RuleGroup::Unspecified, rules::flake8_todos::rules::MissingTodoDescription),
(Flake8Todos, "006") => (RuleGroup::Unspecified, rules::flake8_todos::rules::InvalidTodoCapitalization),
(Flake8Todos, "007") => (RuleGroup::Unspecified, rules::flake8_todos::rules::MissingSpaceAfterTodoColon),
// airflow
(Airflow, "001") => (RuleGroup::Unspecified, rules::airflow::rules::AirflowVariableNameTaskIdMismatch),
// perflint
(Perflint, "101") => (RuleGroup::Unspecified, rules::perflint::rules::UnnecessaryListCast),
(Perflint, "102") => (RuleGroup::Unspecified, rules::perflint::rules::IncorrectDictIterator),
(Perflint, "203") => (RuleGroup::Unspecified, rules::perflint::rules::TryExceptInLoop),
(Perflint, "401") => (RuleGroup::Unspecified, rules::perflint::rules::ManualListComprehension),
(Perflint, "402") => (RuleGroup::Unspecified, rules::perflint::rules::ManualListCopy),
(Perflint, "403") => (RuleGroup::Preview, rules::perflint::rules::ManualDictComprehension),
// flake8-fixme
(Flake8Fixme, "001") => (RuleGroup::Unspecified, rules::flake8_fixme::rules::LineContainsFixme),
(Flake8Fixme, "002") => (RuleGroup::Unspecified, rules::flake8_fixme::rules::LineContainsTodo),
(Flake8Fixme, "003") => (RuleGroup::Unspecified, rules::flake8_fixme::rules::LineContainsXxx),
(Flake8Fixme, "004") => (RuleGroup::Unspecified, rules::flake8_fixme::rules::LineContainsHack),
// flake8-slots
(Flake8Slots, "000") => (RuleGroup::Unspecified, rules::flake8_slots::rules::NoSlotsInStrSubclass),
(Flake8Slots, "001") => (RuleGroup::Unspecified, rules::flake8_slots::rules::NoSlotsInTupleSubclass),
(Flake8Slots, "002") => (RuleGroup::Unspecified, rules::flake8_slots::rules::NoSlotsInNamedtupleSubclass),
// refurb
#[allow(deprecated)]
(Refurb, "113") => (RuleGroup::Nursery, rules::refurb::rules::RepeatedAppend),
#[allow(deprecated)]
(Refurb, "131") => (RuleGroup::Nursery, rules::refurb::rules::DeleteFullSlice),
#[allow(deprecated)]
(Refurb, "132") => (RuleGroup::Nursery, rules::refurb::rules::CheckAndRemoveFromSet),
(Refurb, "140") => (RuleGroup::Preview, rules::refurb::rules::ReimplementedStarmap),
(Refurb, "145") => (RuleGroup::Preview, rules::refurb::rules::SliceCopy),
(Refurb, "148") => (RuleGroup::Preview, rules::refurb::rules::UnnecessaryEnumerate),
// flake8-logging
(Flake8Logging, "001") => (RuleGroup::Preview, rules::flake8_logging::rules::DirectLoggerInstantiation),
(Flake8Logging, "002") => (RuleGroup::Preview, rules::flake8_logging::rules::InvalidGetLoggerArgument),
(Flake8Logging, "007") => (RuleGroup::Preview, rules::flake8_logging::rules::ExceptionWithoutExcInfo),
(Flake8Logging, "009") => (RuleGroup::Preview, rules::flake8_logging::rules::UndocumentedWarn),
_ => return None,
})
}

View file

@ -0,0 +1 @@
pub(crate) mod shebang;

View file

@ -0,0 +1,70 @@
use std::ops::Deref;
use ruff_python_trivia::Cursor;
/// A shebang directive (e.g., `#!/usr/bin/env python3`).
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct ShebangDirective<'a>(&'a str);
impl<'a> ShebangDirective<'a> {
/// Parse a shebang directive from a line, or return `None` if the line does not contain a
/// shebang directive.
pub(crate) fn try_extract(line: &'a str) -> Option<Self> {
let mut cursor = Cursor::new(line);
// Trim the `#!` prefix.
if !cursor.eat_char('#') {
return None;
}
if !cursor.eat_char('!') {
return None;
}
Some(Self(cursor.chars().as_str()))
}
}
impl Deref for ShebangDirective<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.0
}
}
#[cfg(test)]
mod tests {
use insta::assert_debug_snapshot;
use super::ShebangDirective;
#[test]
fn shebang_non_match() {
let source = "not a match";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_end_of_line() {
let source = "print('test') #!/usr/bin/python";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_match() {
let source = "#!/usr/bin/env python";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_match_trailing_comment() {
let source = "#!/usr/bin/env python # trailing comment";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_leading_space() {
let source = " #!/usr/bin/env python";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
}

View file

@ -0,0 +1,5 @@
---
source: crates/ruff_linter/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
None

View file

@ -0,0 +1,5 @@
---
source: crates/ruff_linter/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
None

View file

@ -0,0 +1,9 @@
---
source: crates/ruff_linter/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
Some(
ShebangDirective(
"/usr/bin/env python",
),
)

View file

@ -0,0 +1,9 @@
---
source: crates/ruff_linter/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
Some(
ShebangDirective(
"/usr/bin/env python # trailing comment",
),
)

View file

@ -0,0 +1,5 @@
---
source: crates/ruff_linter/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
None

View file

@ -0,0 +1,92 @@
use libcst_native::{
Expression, Name, NameOrAttribute, ParenthesizableWhitespace, SimpleWhitespace, UnaryOperation,
};
fn compose_call_path_inner<'a>(expr: &'a Expression, parts: &mut Vec<&'a str>) {
match expr {
Expression::Call(expr) => {
compose_call_path_inner(&expr.func, parts);
}
Expression::Attribute(expr) => {
compose_call_path_inner(&expr.value, parts);
parts.push(expr.attr.value);
}
Expression::Name(expr) => {
parts.push(expr.value);
}
_ => {}
}
}
pub(crate) fn compose_call_path(expr: &Expression) -> Option<String> {
let mut segments = vec![];
compose_call_path_inner(expr, &mut segments);
if segments.is_empty() {
None
} else {
Some(segments.join("."))
}
}
pub(crate) fn compose_module_path(module: &NameOrAttribute) -> String {
match module {
NameOrAttribute::N(name) => name.value.to_string(),
NameOrAttribute::A(attr) => {
let name = attr.attr.value;
let prefix = compose_call_path(&attr.value);
prefix.map_or_else(|| name.to_string(), |prefix| format!("{prefix}.{name}"))
}
}
}
/// Return a [`ParenthesizableWhitespace`] containing a single space.
pub(crate) fn space() -> ParenthesizableWhitespace<'static> {
ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" "))
}
/// Ensure that a [`ParenthesizableWhitespace`] contains at least one space.
pub(crate) fn or_space(whitespace: ParenthesizableWhitespace) -> ParenthesizableWhitespace {
if whitespace == ParenthesizableWhitespace::default() {
space()
} else {
whitespace
}
}
/// Negate a condition, i.e., `a` => `not a` and `not a` => `a`.
pub(crate) fn negate<'a>(expression: &Expression<'a>) -> Expression<'a> {
if let Expression::UnaryOperation(ref expression) = expression {
if matches!(expression.operator, libcst_native::UnaryOp::Not { .. }) {
return *expression.expression.clone();
}
}
if let Expression::Name(ref expression) = expression {
match expression.value {
"True" => {
return Expression::Name(Box::new(Name {
value: "False",
lpar: vec![],
rpar: vec![],
}));
}
"False" => {
return Expression::Name(Box::new(Name {
value: "True",
lpar: vec![],
rpar: vec![],
}));
}
_ => {}
}
}
Expression::UnaryOperation(Box::new(UnaryOperation {
operator: libcst_native::UnaryOp::Not {
whitespace_after: space(),
},
expression: Box::new(expression.clone()),
lpar: vec![],
rpar: vec![],
}))
}

View file

@ -0,0 +1,258 @@
use crate::autofix::codemods::CodegenStylist;
use anyhow::{bail, Result};
use libcst_native::{
Arg, Attribute, Call, Comparison, CompoundStatement, Dict, Expression, FunctionDef,
GeneratorExp, If, Import, ImportAlias, ImportFrom, ImportNames, IndentedBlock, Lambda,
ListComp, Module, Name, SmallStatement, Statement, Suite, Tuple, With,
};
use ruff_python_codegen::Stylist;
pub(crate) fn match_module(module_text: &str) -> Result<Module> {
match libcst_native::parse_module(module_text, None) {
Ok(module) => Ok(module),
Err(_) => bail!("Failed to extract CST from source"),
}
}
pub(crate) fn match_statement(statement_text: &str) -> Result<Statement> {
match libcst_native::parse_statement(statement_text) {
Ok(statement) => Ok(statement),
Err(_) => bail!("Failed to extract statement from source"),
}
}
pub(crate) fn match_import<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut Import<'b>> {
if let Statement::Simple(expr) = statement {
if let Some(SmallStatement::Import(expr)) = expr.body.first_mut() {
Ok(expr)
} else {
bail!("Expected SmallStatement::Import")
}
} else {
bail!("Expected Statement::Simple")
}
}
pub(crate) fn match_import_from<'a, 'b>(
statement: &'a mut Statement<'b>,
) -> Result<&'a mut ImportFrom<'b>> {
if let Statement::Simple(expr) = statement {
if let Some(SmallStatement::ImportFrom(expr)) = expr.body.first_mut() {
Ok(expr)
} else {
bail!("Expected SmallStatement::ImportFrom")
}
} else {
bail!("Expected Statement::Simple")
}
}
pub(crate) fn match_aliases<'a, 'b>(
import_from: &'a mut ImportFrom<'b>,
) -> Result<&'a mut Vec<ImportAlias<'b>>> {
if let ImportNames::Aliases(aliases) = &mut import_from.names {
Ok(aliases)
} else {
bail!("Expected ImportNames::Aliases")
}
}
pub(crate) fn match_call<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Call<'b>> {
if let Expression::Call(call) = expression {
Ok(call)
} else {
bail!("Expected Expression::Call")
}
}
pub(crate) fn match_call_mut<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut Call<'b>> {
if let Expression::Call(call) = expression {
Ok(call)
} else {
bail!("Expected Expression::Call")
}
}
pub(crate) fn match_comparison<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut Comparison<'b>> {
if let Expression::Comparison(comparison) = expression {
Ok(comparison)
} else {
bail!("Expected Expression::Comparison")
}
}
pub(crate) fn match_dict<'a, 'b>(expression: &'a mut Expression<'b>) -> Result<&'a mut Dict<'b>> {
if let Expression::Dict(dict) = expression {
Ok(dict)
} else {
bail!("Expected Expression::Dict")
}
}
pub(crate) fn match_attribute<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut Attribute<'b>> {
if let Expression::Attribute(attribute) = expression {
Ok(attribute)
} else {
bail!("Expected Expression::Attribute")
}
}
pub(crate) fn match_name<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Name<'b>> {
if let Expression::Name(name) = expression {
Ok(name)
} else {
bail!("Expected Expression::Name")
}
}
pub(crate) fn match_arg<'a, 'b>(call: &'a Call<'b>) -> Result<&'a Arg<'b>> {
if let Some(arg) = call.args.first() {
Ok(arg)
} else {
bail!("Expected Arg")
}
}
pub(crate) fn match_generator_exp<'a, 'b>(
expression: &'a Expression<'b>,
) -> Result<&'a GeneratorExp<'b>> {
if let Expression::GeneratorExp(generator_exp) = expression {
Ok(generator_exp)
} else {
bail!("Expected Expression::GeneratorExp")
}
}
pub(crate) fn match_tuple<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Tuple<'b>> {
if let Expression::Tuple(tuple) = expression {
Ok(tuple)
} else {
bail!("Expected Expression::Tuple")
}
}
pub(crate) fn match_list_comp<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a ListComp<'b>> {
if let Expression::ListComp(list_comp) = expression {
Ok(list_comp)
} else {
bail!("Expected Expression::ListComp")
}
}
pub(crate) fn match_lambda<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Lambda<'b>> {
if let Expression::Lambda(lambda) = expression {
Ok(lambda)
} else {
bail!("Expected Expression::Lambda")
}
}
pub(crate) fn match_function_def<'a, 'b>(
statement: &'a mut Statement<'b>,
) -> Result<&'a mut FunctionDef<'b>> {
if let Statement::Compound(compound) = statement {
if let CompoundStatement::FunctionDef(function_def) = compound {
Ok(function_def)
} else {
bail!("Expected CompoundStatement::FunctionDef")
}
} else {
bail!("Expected Statement::Compound")
}
}
pub(crate) fn match_indented_block<'a, 'b>(
suite: &'a mut Suite<'b>,
) -> Result<&'a mut IndentedBlock<'b>> {
if let Suite::IndentedBlock(indented_block) = suite {
Ok(indented_block)
} else {
bail!("Expected Suite::IndentedBlock")
}
}
pub(crate) fn match_with<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut With<'b>> {
if let Statement::Compound(compound) = statement {
if let CompoundStatement::With(with) = compound {
Ok(with)
} else {
bail!("Expected CompoundStatement::With")
}
} else {
bail!("Expected Statement::Compound")
}
}
pub(crate) fn match_if<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut If<'b>> {
if let Statement::Compound(compound) = statement {
if let CompoundStatement::If(if_) = compound {
Ok(if_)
} else {
bail!("Expected CompoundStatement::If")
}
} else {
bail!("Expected Statement::Compound")
}
}
/// Given the source code for an expression, return the parsed [`Expression`].
///
/// If the expression is not guaranteed to be valid as a standalone expression (e.g., if it may
/// span multiple lines and/or require parentheses), use [`transform_expression`] instead.
pub(crate) fn match_expression(expression_text: &str) -> Result<Expression> {
match libcst_native::parse_expression(expression_text) {
Ok(expression) => Ok(expression),
Err(_) => bail!("Failed to extract expression from source"),
}
}
/// Run a transformation function over an expression.
///
/// Passing an expression to [`match_expression`] directly can lead to parse errors if the
/// expression is not a valid standalone expression (e.g., it was parenthesized in the original
/// source). This method instead wraps the expression in "fake" parentheses, runs the
/// transformation, then removes the "fake" parentheses.
pub(crate) fn transform_expression(
source_code: &str,
stylist: &Stylist,
func: impl FnOnce(Expression) -> Result<Expression>,
) -> Result<String> {
// Wrap the expression in parentheses.
let source_code = format!("({source_code})");
let expression = match_expression(&source_code)?;
// Run the function on the expression.
let expression = func(expression)?;
// Codegen the expression.
let mut source_code = expression.codegen_stylist(stylist);
// Drop the outer parentheses.
source_code.drain(0..1);
source_code.drain(source_code.len() - 1..source_code.len());
Ok(source_code)
}
/// Like [`transform_expression`], but operates on the source code of the expression, rather than
/// the parsed [`Expression`]. This _shouldn't_ exist, but does to accommodate lifetime issues.
pub(crate) fn transform_expression_text(
source_code: &str,
func: impl FnOnce(String) -> Result<String>,
) -> Result<String> {
// Wrap the expression in parentheses.
let source_code = format!("({source_code})");
// Run the function on the expression.
let mut transformed = func(source_code)?;
// Drop the outer parentheses.
transformed.drain(0..1);
transformed.drain(transformed.len() - 1..transformed.len());
Ok(transformed)
}

View file

@ -0,0 +1,2 @@
pub(crate) mod helpers;
pub(crate) mod matchers;

View file

@ -0,0 +1,622 @@
//! Extract `# noqa`, `# isort: skip`, and `# TODO` directives from tokenized source.
use std::str::FromStr;
use bitflags::bitflags;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::Tok;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use ruff_python_index::Indexer;
use ruff_source_file::Locator;
use crate::noqa::NoqaMapping;
use crate::settings::Settings;
bitflags! {
#[derive(Debug, Copy, Clone)]
pub struct Flags: u8 {
const NOQA = 0b0000_0001;
const ISORT = 0b0000_0010;
}
}
impl Flags {
pub fn from_settings(settings: &Settings) -> Self {
if settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_imports())
{
Self::NOQA | Self::ISORT
} else {
Self::NOQA
}
}
}
#[derive(Default, Debug)]
pub struct IsortDirectives {
/// Ranges for which sorting is disabled
pub exclusions: Vec<TextRange>,
/// Text positions at which splits should be inserted
pub splits: Vec<TextSize>,
pub skip_file: bool,
}
impl IsortDirectives {
pub fn is_excluded(&self, offset: TextSize) -> bool {
for range in &self.exclusions {
if range.contains(offset) {
return true;
}
if range.start() > offset {
break;
}
}
false
}
}
pub struct Directives {
pub noqa_line_for: NoqaMapping,
pub isort: IsortDirectives,
}
pub fn extract_directives(
lxr: &[LexResult],
flags: Flags,
locator: &Locator,
indexer: &Indexer,
) -> Directives {
Directives {
noqa_line_for: if flags.intersects(Flags::NOQA) {
extract_noqa_line_for(lxr, locator, indexer)
} else {
NoqaMapping::default()
},
isort: if flags.intersects(Flags::ISORT) {
extract_isort_directives(lxr, locator)
} else {
IsortDirectives::default()
},
}
}
/// Extract a mapping from logical line to noqa line.
fn extract_noqa_line_for(lxr: &[LexResult], locator: &Locator, indexer: &Indexer) -> NoqaMapping {
let mut string_mappings = Vec::new();
for (tok, range) in lxr.iter().flatten() {
match tok {
Tok::EndOfFile => {
break;
}
// For multi-line strings, we expect `noqa` directives on the last line of the
// string.
Tok::String {
triple_quoted: true,
..
} => {
if locator.contains_line_break(*range) {
string_mappings.push(TextRange::new(
locator.line_start(range.start()),
range.end(),
));
}
}
_ => {}
}
}
let mut continuation_mappings = Vec::new();
// For continuations, we expect `noqa` directives on the last line of the
// continuation.
let mut last: Option<TextRange> = None;
for continuation_line in indexer.continuation_line_starts() {
let line_end = locator.full_line_end(*continuation_line);
if let Some(last_range) = last.take() {
if last_range.end() == *continuation_line {
last = Some(TextRange::new(last_range.start(), line_end));
continue;
}
// new continuation
continuation_mappings.push(last_range);
}
last = Some(TextRange::new(*continuation_line, line_end));
}
if let Some(last_range) = last.take() {
continuation_mappings.push(last_range);
}
// Merge the mappings in sorted order
let mut mappings =
NoqaMapping::with_capacity(continuation_mappings.len() + string_mappings.len());
let mut continuation_mappings = continuation_mappings.into_iter().peekable();
let mut string_mappings = string_mappings.into_iter().peekable();
while let (Some(continuation), Some(string)) =
(continuation_mappings.peek(), string_mappings.peek())
{
if continuation.start() <= string.start() {
mappings.push_mapping(continuation_mappings.next().unwrap());
} else {
mappings.push_mapping(string_mappings.next().unwrap());
}
}
for mapping in continuation_mappings {
mappings.push_mapping(mapping);
}
for mapping in string_mappings {
mappings.push_mapping(mapping);
}
mappings
}
/// Extract a set of ranges over which to disable isort.
fn extract_isort_directives(lxr: &[LexResult], locator: &Locator) -> IsortDirectives {
let mut exclusions: Vec<TextRange> = Vec::default();
let mut splits: Vec<TextSize> = Vec::default();
let mut off: Option<TextSize> = None;
for &(ref tok, range) in lxr.iter().flatten() {
let Tok::Comment(comment_text) = tok else {
continue;
};
// `isort` allows for `# isort: skip` and `# isort: skip_file` to include or
// omit a space after the colon. The remaining action comments are
// required to include the space, and must appear on their own lines.
let comment_text = comment_text.trim_end();
if matches!(comment_text, "# isort: split" | "# ruff: isort: split") {
splits.push(range.start());
} else if matches!(
comment_text,
"# isort: skip_file"
| "# isort:skip_file"
| "# ruff: isort: skip_file"
| "# ruff: isort:skip_file"
) {
return IsortDirectives {
skip_file: true,
..IsortDirectives::default()
};
} else if off.is_some() {
if comment_text == "# isort: on" || comment_text == "# ruff: isort: on" {
if let Some(exclusion_start) = off {
exclusions.push(TextRange::new(exclusion_start, range.start()));
}
off = None;
}
} else {
if comment_text.contains("isort: skip") || comment_text.contains("isort:skip") {
exclusions.push(locator.line_range(range.start()));
} else if comment_text == "# isort: off" || comment_text == "# ruff: isort: off" {
off = Some(range.start());
}
}
}
if let Some(start) = off {
// Enforce unterminated `isort: off`.
exclusions.push(TextRange::new(start, locator.contents().text_len()));
}
IsortDirectives {
exclusions,
splits,
..IsortDirectives::default()
}
}
/// A comment that contains a [`TodoDirective`]
pub(crate) struct TodoComment<'a> {
/// The comment's text
pub(crate) content: &'a str,
/// The directive found within the comment.
pub(crate) directive: TodoDirective<'a>,
/// The comment's actual [`TextRange`].
pub(crate) range: TextRange,
/// The comment range's position in [`Indexer`].comment_ranges()
pub(crate) range_index: usize,
}
impl<'a> TodoComment<'a> {
/// Attempt to transform a normal comment into a [`TodoComment`].
pub(crate) fn from_comment(
content: &'a str,
range: TextRange,
range_index: usize,
) -> Option<Self> {
TodoDirective::from_comment(content, range).map(|directive| Self {
content,
directive,
range,
range_index,
})
}
}
#[derive(Debug, PartialEq)]
pub(crate) struct TodoDirective<'a> {
/// The actual directive
pub(crate) content: &'a str,
/// The directive's [`TextRange`] in the file.
pub(crate) range: TextRange,
/// The directive's kind: HACK, XXX, FIXME, or TODO.
pub(crate) kind: TodoDirectiveKind,
}
impl<'a> TodoDirective<'a> {
/// Extract a [`TodoDirective`] from a comment.
pub(crate) fn from_comment(comment: &'a str, comment_range: TextRange) -> Option<Self> {
// The directive's offset from the start of the comment.
let mut relative_offset = TextSize::new(0);
let mut subset_opt = Some(comment);
// Loop over `#`-delimited sections of the comment to check for directives. This will
// correctly handle cases like `# foo # TODO`.
while let Some(subset) = subset_opt {
let trimmed = subset.trim_start_matches('#').trim_start();
let offset = subset.text_len() - trimmed.text_len();
relative_offset += offset;
// If we detect a TodoDirectiveKind variant substring in the comment, construct and
// return the appropriate TodoDirective
if let Ok(directive_kind) = trimmed.parse::<TodoDirectiveKind>() {
let len = directive_kind.len();
return Some(Self {
content: &comment[TextRange::at(relative_offset, len)],
range: TextRange::at(comment_range.start() + relative_offset, len),
kind: directive_kind,
});
}
// Shrink the subset to check for the next phrase starting with "#".
subset_opt = if let Some(new_offset) = trimmed.find('#') {
relative_offset += TextSize::try_from(new_offset).unwrap();
subset.get(relative_offset.to_usize()..)
} else {
None
};
}
None
}
}
#[derive(Debug, PartialEq)]
pub(crate) enum TodoDirectiveKind {
Todo,
Fixme,
Xxx,
Hack,
}
impl FromStr for TodoDirectiveKind {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
// The lengths of the respective variant strings: TODO, FIXME, HACK, XXX
for length in [3, 4, 5] {
let Some(substr) = s.get(..length) else {
break;
};
match substr.to_lowercase().as_str() {
"fixme" => {
return Ok(TodoDirectiveKind::Fixme);
}
"hack" => {
return Ok(TodoDirectiveKind::Hack);
}
"todo" => {
return Ok(TodoDirectiveKind::Todo);
}
"xxx" => {
return Ok(TodoDirectiveKind::Xxx);
}
_ => continue,
}
}
Err(())
}
}
impl TodoDirectiveKind {
fn len(&self) -> TextSize {
match self {
TodoDirectiveKind::Xxx => TextSize::new(3),
TodoDirectiveKind::Hack | TodoDirectiveKind::Todo => TextSize::new(4),
TodoDirectiveKind::Fixme => TextSize::new(5),
}
}
}
#[cfg(test)]
mod tests {
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{lexer, Mode};
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_python_index::Indexer;
use ruff_source_file::Locator;
use crate::directives::{
extract_isort_directives, extract_noqa_line_for, TodoDirective, TodoDirectiveKind,
};
use crate::noqa::NoqaMapping;
fn noqa_mappings(contents: &str) -> NoqaMapping {
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
extract_noqa_line_for(&lxr, &locator, &indexer)
}
#[test]
fn noqa_extraction() {
let contents = "x = 1
y = 2 \
+ 1
z = x + 1";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let contents = "
x = 1
y = 2
z = x + 1";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let contents = "x = 1
y = 2
z = x + 1
";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let contents = "x = 1
y = 2
z = x + 1
";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let contents = "x = '''abc
def
ghi
'''
y = 2
z = x + 1";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(22))])
);
let contents = "x = 1
y = '''abc
def
ghi
'''
z = 2";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(6), TextSize::from(28))])
);
let contents = "x = 1
y = '''abc
def
ghi
'''";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(6), TextSize::from(28))])
);
let contents = r"x = \
1";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(6))])
);
let contents = r"from foo import \
bar as baz, \
qux as quux";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(36))])
);
let contents = r"
# Foo
from foo import \
bar as baz, \
qux as quux # Baz
x = \
1
y = \
2";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([
TextRange::new(TextSize::from(7), TextSize::from(43)),
TextRange::new(TextSize::from(65), TextSize::from(71)),
TextRange::new(TextSize::from(77), TextSize::from(83)),
])
);
}
#[test]
fn isort_exclusions() {
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off
x = 1
y = 2
# isort: on
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(25))])
);
let contents = "# isort: off
x = 1
# isort: off
y = 2
# isort: on
z = x + 1
# isort: on";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(38))])
);
let contents = "# isort: off
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::at(TextSize::from(0), contents.text_len())])
);
let contents = "# isort: skip_file
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off
x = 1
# isort: on
y = 2
# isort: skip_file
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
}
#[test]
fn isort_splits() {
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
Vec::new()
);
let contents = "x = 1
y = 2
# isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(12)]
);
let contents = "x = 1
y = 2 # isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(13)]
);
}
#[test]
fn todo_directives() {
let test_comment = "# TODO: todo tag";
let test_comment_range = TextRange::at(TextSize::new(0), test_comment.text_len());
let expected = TodoDirective {
content: "TODO",
range: TextRange::new(TextSize::new(2), TextSize::new(6)),
kind: TodoDirectiveKind::Todo,
};
assert_eq!(
expected,
TodoDirective::from_comment(test_comment, test_comment_range).unwrap()
);
let test_comment = "#TODO: todo tag";
let test_comment_range = TextRange::at(TextSize::new(0), test_comment.text_len());
let expected = TodoDirective {
content: "TODO",
range: TextRange::new(TextSize::new(1), TextSize::new(5)),
kind: TodoDirectiveKind::Todo,
};
assert_eq!(
expected,
TodoDirective::from_comment(test_comment, test_comment_range).unwrap()
);
let test_comment = "# fixme: fixme tag";
let test_comment_range = TextRange::at(TextSize::new(0), test_comment.text_len());
let expected = TodoDirective {
content: "fixme",
range: TextRange::new(TextSize::new(2), TextSize::new(7)),
kind: TodoDirectiveKind::Fixme,
};
assert_eq!(
expected,
TodoDirective::from_comment(test_comment, test_comment_range).unwrap()
);
let test_comment = "# noqa # TODO: todo";
let test_comment_range = TextRange::at(TextSize::new(0), test_comment.text_len());
let expected = TodoDirective {
content: "TODO",
range: TextRange::new(TextSize::new(9), TextSize::new(13)),
kind: TodoDirectiveKind::Todo,
};
assert_eq!(
expected,
TodoDirective::from_comment(test_comment, test_comment_range).unwrap()
);
let test_comment = "# no directive";
let test_comment_range = TextRange::at(TextSize::new(0), test_comment.text_len());
assert_eq!(
None,
TodoDirective::from_comment(test_comment, test_comment_range)
);
}
}

View file

@ -0,0 +1,104 @@
//! Doc line extraction. In this context, a doc line is a line consisting of a
//! standalone comment or a constant string statement.
use std::iter::FusedIterator;
use ruff_python_ast::{self as ast, Constant, Expr, Stmt, Suite};
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::Tok;
use ruff_text_size::{Ranged, TextSize};
use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor};
use ruff_source_file::{Locator, UniversalNewlineIterator};
/// Extract doc lines (standalone comments) from a token sequence.
pub(crate) fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {
DocLines::new(lxr)
}
pub(crate) struct DocLines<'a> {
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
prev: TextSize,
}
impl<'a> DocLines<'a> {
fn new(lxr: &'a [LexResult]) -> Self {
Self {
inner: lxr.iter().flatten(),
prev: TextSize::default(),
}
}
}
impl Iterator for DocLines<'_> {
type Item = TextSize;
fn next(&mut self) -> Option<Self::Item> {
let mut at_start_of_line = true;
loop {
let (tok, range) = self.inner.next()?;
match tok {
Tok::Comment(..) => {
if at_start_of_line {
break Some(range.start());
}
}
Tok::Newline | Tok::NonLogicalNewline => {
at_start_of_line = true;
}
Tok::Indent | Tok::Dedent => {
// ignore
}
_ => {
at_start_of_line = false;
}
}
self.prev = range.end();
}
}
}
impl FusedIterator for DocLines<'_> {}
struct StringLinesVisitor<'a> {
string_lines: Vec<TextSize>,
locator: &'a Locator<'a>,
}
impl StatementVisitor<'_> for StringLinesVisitor<'_> {
fn visit_stmt(&mut self, stmt: &Stmt) {
if let Stmt::Expr(ast::StmtExpr { value, range: _ }) = stmt {
if let Expr::Constant(ast::ExprConstant {
value: Constant::Str(..),
..
}) = value.as_ref()
{
for line in UniversalNewlineIterator::with_offset(
self.locator.slice(value.as_ref()),
value.start(),
) {
self.string_lines.push(line.start());
}
}
}
walk_stmt(self, stmt);
}
}
impl<'a> StringLinesVisitor<'a> {
fn new(locator: &'a Locator<'a>) -> Self {
Self {
string_lines: Vec::new(),
locator,
}
}
}
/// Extract doc lines (standalone strings) start positions from an AST.
pub(crate) fn doc_lines_from_ast(python_ast: &Suite, locator: &Locator) -> Vec<TextSize> {
let mut visitor = StringLinesVisitor::new(locator);
visitor.visit_body(python_ast);
visitor.string_lines
}

View file

@ -0,0 +1,75 @@
//! Extract docstrings from an AST.
use ruff_python_ast::{self as ast, Constant, Expr, Stmt};
use ruff_python_semantic::{Definition, DefinitionId, Definitions, Member, MemberKind};
/// Extract a docstring from a function or class body.
pub(crate) fn docstring_from(suite: &[Stmt]) -> Option<&Expr> {
let stmt = suite.first()?;
// Require the docstring to be a standalone expression.
let Stmt::Expr(ast::StmtExpr { value, range: _ }) = stmt else {
return None;
};
// Only match strings.
if !matches!(
value.as_ref(),
Expr::Constant(ast::ExprConstant {
value: Constant::Str(_),
..
})
) {
return None;
}
Some(value)
}
/// Extract a docstring from a `Definition`.
pub(crate) fn extract_docstring<'a>(definition: &'a Definition<'a>) -> Option<&'a Expr> {
match definition {
Definition::Module(module) => docstring_from(module.python_ast),
Definition::Member(member) => docstring_from(member.body()),
}
}
#[derive(Copy, Clone)]
pub(crate) enum ExtractionTarget<'a> {
Class(&'a ast::StmtClassDef),
Function(&'a ast::StmtFunctionDef),
}
/// Extract a `Definition` from the AST node defined by a `Stmt`.
pub(crate) fn extract_definition<'a>(
target: ExtractionTarget<'a>,
parent: DefinitionId,
definitions: &Definitions<'a>,
) -> Member<'a> {
match target {
ExtractionTarget::Function(function) => match &definitions[parent] {
Definition::Module(..) => Member {
parent,
kind: MemberKind::Function(function),
},
Definition::Member(Member {
kind: MemberKind::Class(_) | MemberKind::NestedClass(_),
..
}) => Member {
parent,
kind: MemberKind::Method(function),
},
Definition::Member(_) => Member {
parent,
kind: MemberKind::NestedFunction(function),
},
},
ExtractionTarget::Class(class) => match &definitions[parent] {
Definition::Module(_) => Member {
parent,
kind: MemberKind::Class(class),
},
Definition::Member(_) => Member {
parent,
kind: MemberKind::NestedClass(class),
},
},
}
}

View file

@ -0,0 +1,38 @@
//! Abstractions for Google-style docstrings.
use crate::docstrings::sections::SectionKind;
pub(crate) static GOOGLE_SECTIONS: &[SectionKind] = &[
SectionKind::Attributes,
SectionKind::Examples,
SectionKind::Methods,
SectionKind::Notes,
SectionKind::Raises,
SectionKind::References,
SectionKind::Returns,
SectionKind::SeeAlso,
SectionKind::Yields,
// Google-only
SectionKind::Args,
SectionKind::Arguments,
SectionKind::Attention,
SectionKind::Caution,
SectionKind::Danger,
SectionKind::Error,
SectionKind::Example,
SectionKind::Hint,
SectionKind::Important,
SectionKind::KeywordArgs,
SectionKind::KeywordArguments,
SectionKind::Note,
SectionKind::Notes,
SectionKind::OtherArgs,
SectionKind::OtherArguments,
SectionKind::Return,
SectionKind::Tip,
SectionKind::Todo,
SectionKind::Warning,
SectionKind::Warnings,
SectionKind::Warns,
SectionKind::Yield,
];

View file

@ -0,0 +1,78 @@
use std::fmt::{Debug, Formatter};
use std::ops::Deref;
use ruff_python_ast::Expr;
use ruff_python_semantic::Definition;
use ruff_text_size::{Ranged, TextRange};
pub(crate) mod extraction;
pub(crate) mod google;
pub(crate) mod numpy;
pub(crate) mod sections;
pub(crate) mod styles;
#[derive(Debug)]
pub(crate) struct Docstring<'a> {
pub(crate) definition: &'a Definition<'a>,
pub(crate) expr: &'a Expr,
/// The content of the docstring, including the leading and trailing quotes.
pub(crate) contents: &'a str,
/// The range of the docstring body (without the quotes). The range is relative to [`Self::contents`].
pub(crate) body_range: TextRange,
pub(crate) indentation: &'a str,
}
impl<'a> Docstring<'a> {
pub(crate) fn body(&self) -> DocstringBody {
DocstringBody { docstring: self }
}
pub(crate) fn leading_quote(&self) -> &'a str {
&self.contents[TextRange::up_to(self.body_range.start())]
}
pub(crate) fn triple_quoted(&self) -> bool {
let leading_quote = self.leading_quote();
leading_quote.ends_with("\"\"\"") || leading_quote.ends_with("'''")
}
}
impl Ranged for Docstring<'_> {
fn range(&self) -> TextRange {
self.expr.range()
}
}
#[derive(Copy, Clone)]
pub(crate) struct DocstringBody<'a> {
docstring: &'a Docstring<'a>,
}
impl<'a> DocstringBody<'a> {
pub(crate) fn as_str(self) -> &'a str {
&self.docstring.contents[self.docstring.body_range]
}
}
impl Ranged for DocstringBody<'_> {
fn range(&self) -> TextRange {
self.docstring.body_range + self.docstring.start()
}
}
impl Deref for DocstringBody<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl Debug for DocstringBody<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DocstringBody")
.field("text", &self.as_str())
.field("range", &self.range())
.finish()
}
}

View file

@ -0,0 +1,21 @@
//! Abstractions for NumPy-style docstrings.
use crate::docstrings::sections::SectionKind;
pub(crate) static NUMPY_SECTIONS: &[SectionKind] = &[
SectionKind::Attributes,
SectionKind::Examples,
SectionKind::Methods,
SectionKind::Notes,
SectionKind::Raises,
SectionKind::References,
SectionKind::Returns,
SectionKind::SeeAlso,
SectionKind::Yields,
// NumPy-only
SectionKind::ExtendedSummary,
SectionKind::OtherParams,
SectionKind::OtherParameters,
SectionKind::Parameters,
SectionKind::ShortSummary,
];

View file

@ -0,0 +1,443 @@
use std::fmt::{Debug, Formatter};
use std::iter::FusedIterator;
use ruff_python_ast::docstrings::{leading_space, leading_words};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use strum_macros::EnumIter;
use ruff_source_file::{Line, UniversalNewlineIterator, UniversalNewlines};
use crate::docstrings::styles::SectionStyle;
use crate::docstrings::{Docstring, DocstringBody};
#[derive(EnumIter, PartialEq, Eq, Debug, Clone, Copy)]
pub(crate) enum SectionKind {
Args,
Arguments,
Attention,
Attributes,
Caution,
Danger,
Error,
Example,
Examples,
ExtendedSummary,
Hint,
Important,
KeywordArgs,
KeywordArguments,
Methods,
Note,
Notes,
OtherArgs,
OtherArguments,
OtherParams,
OtherParameters,
Parameters,
Raises,
References,
Return,
Returns,
SeeAlso,
ShortSummary,
Tip,
Todo,
Warning,
Warnings,
Warns,
Yield,
Yields,
}
impl SectionKind {
pub(crate) fn from_str(s: &str) -> Option<Self> {
match s.to_ascii_lowercase().as_str() {
"args" => Some(Self::Args),
"arguments" => Some(Self::Arguments),
"attention" => Some(Self::Attention),
"attributes" => Some(Self::Attributes),
"caution" => Some(Self::Caution),
"danger" => Some(Self::Danger),
"error" => Some(Self::Error),
"example" => Some(Self::Example),
"examples" => Some(Self::Examples),
"extended summary" => Some(Self::ExtendedSummary),
"hint" => Some(Self::Hint),
"important" => Some(Self::Important),
"keyword args" => Some(Self::KeywordArgs),
"keyword arguments" => Some(Self::KeywordArguments),
"methods" => Some(Self::Methods),
"note" => Some(Self::Note),
"notes" => Some(Self::Notes),
"other args" => Some(Self::OtherArgs),
"other arguments" => Some(Self::OtherArguments),
"other params" => Some(Self::OtherParams),
"other parameters" => Some(Self::OtherParameters),
"parameters" => Some(Self::Parameters),
"raises" => Some(Self::Raises),
"references" => Some(Self::References),
"return" => Some(Self::Return),
"returns" => Some(Self::Returns),
"see also" => Some(Self::SeeAlso),
"short summary" => Some(Self::ShortSummary),
"tip" => Some(Self::Tip),
"todo" => Some(Self::Todo),
"warning" => Some(Self::Warning),
"warnings" => Some(Self::Warnings),
"warns" => Some(Self::Warns),
"yield" => Some(Self::Yield),
"yields" => Some(Self::Yields),
_ => None,
}
}
pub(crate) fn as_str(self) -> &'static str {
match self {
Self::Args => "Args",
Self::Arguments => "Arguments",
Self::Attention => "Attention",
Self::Attributes => "Attributes",
Self::Caution => "Caution",
Self::Danger => "Danger",
Self::Error => "Error",
Self::Example => "Example",
Self::Examples => "Examples",
Self::ExtendedSummary => "Extended Summary",
Self::Hint => "Hint",
Self::Important => "Important",
Self::KeywordArgs => "Keyword Args",
Self::KeywordArguments => "Keyword Arguments",
Self::Methods => "Methods",
Self::Note => "Note",
Self::Notes => "Notes",
Self::OtherArgs => "Other Args",
Self::OtherArguments => "Other Arguments",
Self::OtherParams => "Other Params",
Self::OtherParameters => "Other Parameters",
Self::Parameters => "Parameters",
Self::Raises => "Raises",
Self::References => "References",
Self::Return => "Return",
Self::Returns => "Returns",
Self::SeeAlso => "See Also",
Self::ShortSummary => "Short Summary",
Self::Tip => "Tip",
Self::Todo => "Todo",
Self::Warning => "Warning",
Self::Warnings => "Warnings",
Self::Warns => "Warns",
Self::Yield => "Yield",
Self::Yields => "Yields",
}
}
}
pub(crate) struct SectionContexts<'a> {
contexts: Vec<SectionContextData>,
docstring: &'a Docstring<'a>,
}
impl<'a> SectionContexts<'a> {
/// Extract all `SectionContext` values from a docstring.
pub(crate) fn from_docstring(docstring: &'a Docstring<'a>, style: SectionStyle) -> Self {
let contents = docstring.body();
let mut contexts = Vec::new();
let mut last: Option<SectionContextData> = None;
let mut lines = contents.universal_newlines().peekable();
// Skip the first line, which is the summary.
let mut previous_line = lines.next();
while let Some(line) = lines.next() {
if let Some(section_kind) = suspected_as_section(&line, style) {
let indent = leading_space(&line);
let section_name = leading_words(&line);
let section_name_range = TextRange::at(indent.text_len(), section_name.text_len());
if is_docstring_section(
&line,
section_name_range,
previous_line.as_ref(),
lines.peek(),
) {
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.start(), line.start());
contexts.push(last);
}
last = Some(SectionContextData {
kind: section_kind,
name_range: section_name_range + line.start(),
range: TextRange::empty(line.start()),
summary_full_end: line.full_end(),
});
}
}
previous_line = Some(line);
}
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.start(), contents.text_len());
contexts.push(last);
}
Self {
contexts,
docstring,
}
}
pub(crate) fn len(&self) -> usize {
self.contexts.len()
}
pub(crate) fn iter(&self) -> SectionContextsIter {
SectionContextsIter {
docstring_body: self.docstring.body(),
inner: self.contexts.iter(),
}
}
}
impl<'a> IntoIterator for &'a SectionContexts<'a> {
type IntoIter = SectionContextsIter<'a>;
type Item = SectionContext<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl Debug for SectionContexts<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
pub(crate) struct SectionContextsIter<'a> {
docstring_body: DocstringBody<'a>,
inner: std::slice::Iter<'a, SectionContextData>,
}
impl<'a> Iterator for SectionContextsIter<'a> {
type Item = SectionContext<'a>;
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner.next()?;
Some(SectionContext {
data: next,
docstring_body: self.docstring_body,
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a> DoubleEndedIterator for SectionContextsIter<'a> {
fn next_back(&mut self) -> Option<Self::Item> {
let back = self.inner.next_back()?;
Some(SectionContext {
data: back,
docstring_body: self.docstring_body,
})
}
}
impl FusedIterator for SectionContextsIter<'_> {}
impl ExactSizeIterator for SectionContextsIter<'_> {}
#[derive(Debug)]
struct SectionContextData {
kind: SectionKind,
/// Range of the section name, relative to the [`Docstring::body`]
name_range: TextRange,
/// Range from the start to the end of the section, relative to the [`Docstring::body`]
range: TextRange,
/// End of the summary, relative to the [`Docstring::body`]
summary_full_end: TextSize,
}
impl Ranged for SectionContextData {
fn range(&self) -> TextRange {
self.range
}
}
pub(crate) struct SectionContext<'a> {
data: &'a SectionContextData,
docstring_body: DocstringBody<'a>,
}
impl<'a> SectionContext<'a> {
/// The `kind` of the section, e.g. [`SectionKind::Args`] or [`SectionKind::Returns`].
pub(crate) const fn kind(&self) -> SectionKind {
self.data.kind
}
/// The name of the section as it appears in the docstring, e.g. "Args" or "Returns".
pub(crate) fn section_name(&self) -> &'a str {
&self.docstring_body.as_str()[self.data.name_range]
}
/// Returns the rest of the summary line after the section name.
pub(crate) fn summary_after_section_name(&self) -> &'a str {
&self.summary_line()[usize::from(self.data.name_range.end() - self.data.range.start())..]
}
fn offset(&self) -> TextSize {
self.docstring_body.start()
}
/// The absolute range of the section name
pub(crate) fn section_name_range(&self) -> TextRange {
self.data.name_range + self.offset()
}
/// The absolute range of the summary line, excluding any trailing newline character.
pub(crate) fn summary_range(&self) -> TextRange {
TextRange::at(self.range().start(), self.summary_line().text_len())
}
/// Range of the summary line relative to [`Docstring::body`], including the trailing newline character.
fn summary_full_range_relative(&self) -> TextRange {
TextRange::new(self.range_relative().start(), self.data.summary_full_end)
}
/// Returns the range of this section relative to [`Docstring::body`]
const fn range_relative(&self) -> TextRange {
self.data.range
}
/// The absolute range of the full-section.
pub(crate) fn range(&self) -> TextRange {
self.range_relative() + self.offset()
}
/// Summary line without the trailing newline characters
pub(crate) fn summary_line(&self) -> &'a str {
let full_summary = &self.docstring_body.as_str()[self.summary_full_range_relative()];
let mut bytes = full_summary.bytes().rev();
let newline_width = match bytes.next() {
Some(b'\n') => {
if bytes.next() == Some(b'\r') {
2
} else {
1
}
}
Some(b'\r') => 1,
_ => 0,
};
&full_summary[..full_summary.len() - newline_width]
}
/// Returns the text of the last line of the previous section or an empty string if it is the first section.
pub(crate) fn previous_line(&self) -> Option<&'a str> {
let previous =
&self.docstring_body.as_str()[TextRange::up_to(self.range_relative().start())];
previous.universal_newlines().last().map(|l| l.as_str())
}
/// Returns the lines belonging to this section after the summary line.
pub(crate) fn following_lines(&self) -> UniversalNewlineIterator<'a> {
let lines = self.following_lines_str();
UniversalNewlineIterator::with_offset(lines, self.offset() + self.data.summary_full_end)
}
fn following_lines_str(&self) -> &'a str {
&self.docstring_body.as_str()[self.following_range_relative()]
}
/// Returns the range to the following lines relative to [`Docstring::body`].
const fn following_range_relative(&self) -> TextRange {
TextRange::new(self.data.summary_full_end, self.range_relative().end())
}
/// Returns the absolute range of the following lines.
pub(crate) fn following_range(&self) -> TextRange {
self.following_range_relative() + self.offset()
}
}
impl Ranged for SectionContext<'_> {
fn range(&self) -> TextRange {
self.range()
}
}
impl Debug for SectionContext<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SectionContext")
.field("kind", &self.kind())
.field("section_name", &self.section_name())
.field("summary_line", &self.summary_line())
.field("following_lines", &&self.following_lines_str())
.finish()
}
}
fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind> {
if let Some(kind) = SectionKind::from_str(leading_words(line)) {
if style.sections().contains(&kind) {
return Some(kind);
}
}
None
}
/// Check if the suspected context is really a section header.
fn is_docstring_section(
line: &Line,
section_name_range: TextRange,
previous_line: Option<&Line>,
next_line: Option<&Line>,
) -> bool {
// Determine whether the current line looks like a section header, e.g., "Args:".
let section_name_suffix = line[usize::from(section_name_range.end())..].trim();
let this_looks_like_a_section_name =
section_name_suffix == ":" || section_name_suffix.is_empty();
if !this_looks_like_a_section_name {
return false;
}
// Determine whether the next line is an underline, e.g., "-----".
let next_line_is_underline = next_line.is_some_and(|next_line| {
let next_line = next_line.trim();
if next_line.is_empty() {
false
} else {
let next_line_is_underline = next_line.chars().all(|char| matches!(char, '-' | '='));
next_line_is_underline
}
});
if next_line_is_underline {
return true;
}
// Determine whether the previous line looks like the end of a paragraph.
let previous_line_looks_like_end_of_paragraph = previous_line.map_or(true, |previous_line| {
let previous_line = previous_line.trim();
let previous_line_ends_with_punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
.into_iter()
.any(|char| previous_line.ends_with(char));
previous_line_ends_with_punctuation || previous_line.is_empty()
});
if !previous_line_looks_like_end_of_paragraph {
return false;
}
true
}

View file

@ -0,0 +1,18 @@
use crate::docstrings::google::GOOGLE_SECTIONS;
use crate::docstrings::numpy::NUMPY_SECTIONS;
use crate::docstrings::sections::SectionKind;
#[derive(Copy, Clone)]
pub(crate) enum SectionStyle {
Numpy,
Google,
}
impl SectionStyle {
pub(crate) fn sections(&self) -> &[SectionKind] {
match self {
SectionStyle::Numpy => NUMPY_SECTIONS,
SectionStyle::Google => GOOGLE_SECTIONS,
}
}
}

View file

@ -0,0 +1,84 @@
use std::path::{Path, PathBuf};
use globset::GlobMatcher;
use log::debug;
use path_absolutize::Absolutize;
use crate::registry::RuleSet;
/// Create a set with codes matching the pattern/code pairs.
pub(crate) fn ignores_from_path(
path: &Path,
pattern_code_pairs: &[(GlobMatcher, GlobMatcher, RuleSet)],
) -> RuleSet {
let file_name = path.file_name().expect("Unable to parse filename");
pattern_code_pairs
.iter()
.filter_map(|(absolute, basename, rules)| {
if basename.is_match(file_name) {
debug!(
"Adding per-file ignores for {:?} due to basename match on {:?}: {:?}",
path,
basename.glob().regex(),
rules
);
Some(rules)
} else if absolute.is_match(path) {
debug!(
"Adding per-file ignores for {:?} due to absolute match on {:?}: {:?}",
path,
absolute.glob().regex(),
rules
);
Some(rules)
} else {
None
}
})
.flatten()
.collect()
}
/// Convert any path to an absolute path (based on the current working
/// directory).
pub fn normalize_path<P: AsRef<Path>>(path: P) -> PathBuf {
let path = path.as_ref();
if let Ok(path) = path.absolutize() {
return path.to_path_buf();
}
path.to_path_buf()
}
/// Convert any path to an absolute path (based on the specified project root).
pub fn normalize_path_to<P: AsRef<Path>, R: AsRef<Path>>(path: P, project_root: R) -> PathBuf {
let path = path.as_ref();
if let Ok(path) = path.absolutize_from(project_root.as_ref()) {
return path.to_path_buf();
}
path.to_path_buf()
}
/// Convert an absolute path to be relative to the current working directory.
pub fn relativize_path<P: AsRef<Path>>(path: P) -> String {
let path = path.as_ref();
#[cfg(target_arch = "wasm32")]
let cwd = Path::new(".");
#[cfg(not(target_arch = "wasm32"))]
let cwd = path_absolutize::path_dedot::CWD.as_path();
if let Ok(path) = path.strip_prefix(cwd) {
return format!("{}", path.display());
}
format!("{}", path.display())
}
/// Convert an absolute path to be relative to the specified project root.
pub fn relativize_path_to<P: AsRef<Path>, R: AsRef<Path>>(path: P, project_root: R) -> String {
format!(
"{}",
pathdiff::diff_paths(&path, project_root)
.expect("Could not diff paths")
.display()
)
}

View file

@ -0,0 +1,454 @@
//! Insert statements into Python code.
use std::ops::Add;
use ruff_python_ast::{PySourceType, Stmt};
use ruff_python_parser::{lexer, AsMode, Tok};
use ruff_text_size::{Ranged, TextSize};
use ruff_diagnostics::Edit;
use ruff_python_ast::helpers::is_docstring_stmt;
use ruff_python_codegen::Stylist;
use ruff_python_trivia::{textwrap::indent, PythonWhitespace};
use ruff_source_file::{Locator, UniversalNewlineIterator};
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum Placement<'a> {
/// The content will be inserted inline with the existing code (i.e., within semicolon-delimited
/// statements).
Inline,
/// The content will be inserted on its own line.
OwnLine,
/// The content will be inserted as an indented block.
Indented(&'a str),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) struct Insertion<'a> {
/// The content to add before the insertion.
prefix: &'a str,
/// The location at which to insert.
location: TextSize,
/// The content to add after the insertion.
suffix: &'a str,
/// The line placement of insertion.
placement: Placement<'a>,
}
impl<'a> Insertion<'a> {
/// Create an [`Insertion`] to insert (e.g.) an import statement at the start of a given
/// file, along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// ```
///
/// The insertion returned will begin at the start of the `import os` statement, and will
/// include a trailing newline.
pub(super) fn start_of_file(
body: &[Stmt],
locator: &Locator,
stylist: &Stylist,
) -> Insertion<'static> {
// Skip over any docstrings.
let mut location = if let Some(location) = match_docstring_end(body) {
// If the first token after the docstring is a semicolon, insert after the semicolon as
// an inline statement.
if let Some(offset) = match_leading_semicolon(locator.after(location)) {
return Insertion::inline(" ", location.add(offset).add(TextSize::of(';')), ";");
}
// Otherwise, advance to the next row.
locator.full_line_end(location)
} else {
TextSize::default()
};
// Skip over commented lines, with whitespace separation.
for line in UniversalNewlineIterator::with_offset(locator.after(location), location) {
let trimmed_line = line.trim_whitespace_start();
if trimmed_line.is_empty() {
continue;
}
if trimmed_line.starts_with('#') {
location = line.full_end();
} else {
break;
}
}
Insertion::own_line("", location, stylist.line_ending().as_str())
}
/// Create an [`Insertion`] to insert (e.g.) an import after the end of the given
/// [`Stmt`], along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// import math
///
///
/// def foo():
/// pass
/// ```
///
/// The insertion returned will begin after the newline after the last import statement, which
/// in this case is the line after `import math`, and will include a trailing newline.
///
/// The statement itself is assumed to be at the top-level of the module.
pub(super) fn end_of_statement(
stmt: &Stmt,
locator: &Locator,
stylist: &Stylist,
) -> Insertion<'static> {
let location = stmt.end();
if let Some(offset) = match_leading_semicolon(locator.after(location)) {
// If the first token after the statement is a semicolon, insert after the semicolon as
// an inline statement.
Insertion::inline(" ", location.add(offset).add(TextSize::of(';')), ";")
} else {
// Otherwise, insert on the next line.
Insertion::own_line(
"",
locator.full_line_end(location),
stylist.line_ending().as_str(),
)
}
}
/// Create an [`Insertion`] to insert (e.g.) an import statement at the start of a given
/// block, along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// if TYPE_CHECKING:
/// import os
/// ```
///
/// The insertion returned will begin at the start of the `import os` statement, and will
/// include a trailing newline.
///
/// The block itself is assumed to be at the top-level of the module.
pub(super) fn start_of_block(
mut location: TextSize,
locator: &Locator<'a>,
stylist: &Stylist,
source_type: PySourceType,
) -> Insertion<'a> {
enum Awaiting {
Colon(u32),
Newline,
Indent,
}
let mut state = Awaiting::Colon(0);
for (tok, range) in
lexer::lex_starts_at(locator.after(location), source_type.as_mode(), location).flatten()
{
match state {
// Iterate until we find the colon indicating the start of the block body.
Awaiting::Colon(depth) => match tok {
Tok::Colon if depth == 0 => {
state = Awaiting::Newline;
}
Tok::Lpar | Tok::Lbrace | Tok::Lsqb => {
state = Awaiting::Colon(depth.saturating_add(1));
}
Tok::Rpar | Tok::Rbrace | Tok::Rsqb => {
state = Awaiting::Colon(depth.saturating_sub(1));
}
_ => {}
},
// Once we've seen the colon, we're looking for a newline; otherwise, there's no
// block body (e.g. `if True: pass`).
Awaiting::Newline => match tok {
Tok::Comment(..) => {}
Tok::Newline => {
state = Awaiting::Indent;
}
_ => {
location = range.start();
break;
}
},
// Once we've seen the newline, we're looking for the indentation of the block body.
Awaiting::Indent => match tok {
Tok::Comment(..) => {}
Tok::NonLogicalNewline => {}
Tok::Indent => {
// This is like:
// ```py
// if True:
// pass
// ```
// Where `range` is the indentation before the `pass` token.
return Insertion::indented(
"",
range.start(),
stylist.line_ending().as_str(),
locator.slice(range),
);
}
_ => {
location = range.start();
break;
}
},
}
}
// This is like: `if True: pass`, where `location` is the start of the `pass` token.
Insertion::inline("", location, "; ")
}
/// Convert this [`Insertion`] into an [`Edit`] that inserts the given content.
pub(super) fn into_edit(self, content: &str) -> Edit {
let Insertion {
prefix,
location,
suffix,
placement,
} = self;
let content = format!("{prefix}{content}{suffix}");
Edit::insertion(
match placement {
Placement::Indented(indentation) if !indentation.is_empty() => {
indent(&content, indentation).to_string()
}
_ => content,
},
location,
)
}
/// Returns `true` if this [`Insertion`] is inline.
pub(super) fn is_inline(&self) -> bool {
matches!(self.placement, Placement::Inline)
}
/// Create an [`Insertion`] that inserts content inline (i.e., within semicolon-delimited
/// statements).
fn inline(prefix: &'a str, location: TextSize, suffix: &'a str) -> Self {
Self {
prefix,
location,
suffix,
placement: Placement::Inline,
}
}
/// Create an [`Insertion`] that starts on its own line.
fn own_line(prefix: &'a str, location: TextSize, suffix: &'a str) -> Self {
Self {
prefix,
location,
suffix,
placement: Placement::OwnLine,
}
}
/// Create an [`Insertion`] that starts on its own line, with the given indentation.
fn indented(
prefix: &'a str,
location: TextSize,
suffix: &'a str,
indentation: &'a str,
) -> Self {
Self {
prefix,
location,
suffix,
placement: Placement::Indented(indentation),
}
}
}
/// Find the end of the last docstring.
fn match_docstring_end(body: &[Stmt]) -> Option<TextSize> {
let mut iter = body.iter();
let Some(mut stmt) = iter.next() else {
return None;
};
if !is_docstring_stmt(stmt) {
return None;
}
for next in iter {
if !is_docstring_stmt(next) {
break;
}
stmt = next;
}
Some(stmt.end())
}
/// If a line starts with a semicolon, return its offset.
fn match_leading_semicolon(s: &str) -> Option<TextSize> {
for (offset, c) in s.char_indices() {
match c {
' ' | '\t' => continue,
';' => return Some(TextSize::try_from(offset).unwrap()),
_ => break,
}
}
None
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_python_ast::PySourceType;
use ruff_python_codegen::Stylist;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{parse_suite, Mode};
use ruff_source_file::{LineEnding, Locator};
use ruff_text_size::TextSize;
use super::Insertion;
#[test]
fn start_of_file() -> Result<()> {
fn insert(contents: &str) -> Result<Insertion> {
let program = parse_suite(contents, "<filename>")?;
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(contents, Mode::Module);
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(&tokens, &locator);
Ok(Insertion::start_of_file(&program, &locator, &stylist))
}
let contents = "";
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(0), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!""""#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(19), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(20), "\n")
);
let contents = r#"
"""Hello, world!"""
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(40), "\n")
);
let contents = r#"
x = 1
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(0), "\n")
);
let contents = r#"
#!/usr/bin/env python3
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(23), "\n")
);
let contents = r#"
#!/usr/bin/env python3
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(43), "\n")
);
let contents = r#"
"""Hello, world!"""
#!/usr/bin/env python3
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(43), "\n")
);
let contents = r#"
"""%s""" % "Hello, world!"
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::own_line("", TextSize::from(0), "\n")
);
let contents = r#"
"""Hello, world!"""; x = 1
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::inline(" ", TextSize::from(20), ";")
);
let contents = r#"
"""Hello, world!"""; x = 1; y = \
2
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::inline(" ", TextSize::from(20), ";")
);
Ok(())
}
#[test]
fn start_of_block() {
fn insert(contents: &str, offset: TextSize) -> Insertion {
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(contents, Mode::Module);
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(&tokens, &locator);
Insertion::start_of_block(offset, &locator, &stylist, PySourceType::default())
}
let contents = "if True: pass";
assert_eq!(
insert(contents, TextSize::from(0)),
Insertion::inline("", TextSize::from(9), "; ")
);
let contents = r#"
if True:
pass
"#
.trim_start();
assert_eq!(
insert(contents, TextSize::from(0)),
Insertion::indented("", TextSize::from(9), "\n", " ")
);
}
}

View file

@ -0,0 +1,500 @@
//! Code modification struct to add and modify import statements.
//!
//! Enables rules to make module members available (that may be not yet be imported) during fix
//! execution.
use std::error::Error;
use anyhow::Result;
use libcst_native::{ImportAlias, Name, NameOrAttribute};
use ruff_python_ast::{self as ast, PySourceType, Stmt, Suite};
use ruff_text_size::{Ranged, TextSize};
use ruff_diagnostics::Edit;
use ruff_python_ast::imports::{AnyImport, Import, ImportFrom};
use ruff_python_codegen::Stylist;
use ruff_python_semantic::SemanticModel;
use ruff_python_trivia::textwrap::indent;
use ruff_source_file::Locator;
use crate::autofix;
use crate::autofix::codemods::CodegenStylist;
use crate::cst::matchers::{match_aliases, match_import_from, match_statement};
use crate::importer::insertion::Insertion;
mod insertion;
pub(crate) struct Importer<'a> {
/// The Python AST to which we are adding imports.
python_ast: &'a Suite,
/// The [`Locator`] for the Python AST.
locator: &'a Locator<'a>,
/// The [`Stylist`] for the Python AST.
stylist: &'a Stylist<'a>,
/// The list of visited, top-level runtime imports in the Python AST.
runtime_imports: Vec<&'a Stmt>,
/// The list of visited, top-level `if TYPE_CHECKING:` blocks in the Python AST.
type_checking_blocks: Vec<&'a Stmt>,
}
impl<'a> Importer<'a> {
pub(crate) fn new(
python_ast: &'a Suite,
locator: &'a Locator<'a>,
stylist: &'a Stylist<'a>,
) -> Self {
Self {
python_ast,
locator,
stylist,
runtime_imports: Vec::default(),
type_checking_blocks: Vec::default(),
}
}
/// Visit a top-level import statement.
pub(crate) fn visit_import(&mut self, import: &'a Stmt) {
self.runtime_imports.push(import);
}
/// Visit a top-level type-checking block.
pub(crate) fn visit_type_checking_block(&mut self, type_checking_block: &'a Stmt) {
self.type_checking_blocks.push(type_checking_block);
}
/// Add an import statement to import the given module.
///
/// If there are no existing imports, the new import will be added at the top
/// of the file. Otherwise, it will be added after the most recent top-level
/// import statement.
pub(crate) fn add_import(&self, import: &AnyImport, at: TextSize) -> Edit {
let required_import = import.to_string();
if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
Insertion::end_of_statement(stmt, self.locator, self.stylist)
.into_edit(&required_import)
} else {
// Insert at the start of the file.
Insertion::start_of_file(self.python_ast, self.locator, self.stylist)
.into_edit(&required_import)
}
}
/// Move an existing import to the top-level, thereby making it available at runtime.
///
/// If there are no existing imports, the new import will be added at the top
/// of the file. Otherwise, it will be added after the most recent top-level
/// import statement.
pub(crate) fn runtime_import_edit(
&self,
import: &ImportedMembers,
at: TextSize,
) -> Result<RuntimeImportEdit> {
// Generate the modified import statement.
let content = autofix::codemods::retain_imports(
&import.names,
import.statement,
self.locator,
self.stylist,
)?;
// Add the import to the top-level.
let insertion = if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
Insertion::end_of_statement(stmt, self.locator, self.stylist)
} else {
// Insert at the start of the file.
Insertion::start_of_file(self.python_ast, self.locator, self.stylist)
};
let add_import_edit = insertion.into_edit(&content);
Ok(RuntimeImportEdit { add_import_edit })
}
/// Move an existing import into a `TYPE_CHECKING` block.
///
/// If there are no existing `TYPE_CHECKING` blocks, a new one will be added at the top
/// of the file. Otherwise, it will be added after the most recent top-level
/// `TYPE_CHECKING` block.
pub(crate) fn typing_import_edit(
&self,
import: &ImportedMembers,
at: TextSize,
semantic: &SemanticModel,
source_type: PySourceType,
) -> Result<TypingImportEdit> {
// Generate the modified import statement.
let content = autofix::codemods::retain_imports(
&import.names,
import.statement,
self.locator,
self.stylist,
)?;
// Import the `TYPE_CHECKING` symbol from the typing module.
let (type_checking_edit, type_checking) = self.get_or_import_symbol(
&ImportRequest::import_from("typing", "TYPE_CHECKING"),
at,
semantic,
)?;
// Add the import to a `TYPE_CHECKING` block.
let add_import_edit = if let Some(block) = self.preceding_type_checking_block(at) {
// Add the import to the `TYPE_CHECKING` block.
self.add_to_type_checking_block(&content, block.start(), source_type)
} else {
// Add the import to a new `TYPE_CHECKING` block.
self.add_type_checking_block(
&format!(
"{}if {type_checking}:{}{}",
self.stylist.line_ending().as_str(),
self.stylist.line_ending().as_str(),
indent(&content, self.stylist.indentation())
),
at,
)?
};
Ok(TypingImportEdit {
type_checking_edit,
add_import_edit,
})
}
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make
/// the symbol available in the current scope along with the bound name of the symbol.
///
/// Attempts to reuse existing imports when possible.
pub(crate) fn get_or_import_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<(Edit, String), ResolutionError> {
self.get_symbol(symbol, at, semantic)?
.map_or_else(|| self.import_symbol(symbol, at, semantic), Ok)
}
/// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`].
fn get_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<Option<(Edit, String)>, ResolutionError> {
// If the symbol is already available in the current scope, use it.
let Some(imported_name) =
semantic.resolve_qualified_import_name(symbol.module, symbol.member)
else {
return Ok(None);
};
// If the symbol source (i.e., the import statement) comes after the current location,
// abort. For example, we could be generating an edit within a function, and the import
// could be defined in the module scope, but after the function definition. In this case,
// it's unclear whether we can use the symbol (the function could be called between the
// import and the current location, and thus the symbol would not be available). It's also
// unclear whether should add an import statement at the start of the file, since it could
// be shadowed between the import and the current location.
if imported_name.start() > at {
return Err(ResolutionError::ImportAfterUsage);
}
// If the symbol source (i.e., the import statement) is in a typing-only context, but we're
// in a runtime context, abort.
if imported_name.context().is_typing() && semantic.execution_context().is_runtime() {
return Err(ResolutionError::IncompatibleContext);
}
// We also add a no-op edit to force conflicts with any other fixes that might try to
// remove the import. Consider:
//
// ```py
// import sys
//
// quit()
// ```
//
// Assume you omit this no-op edit. If you run Ruff with `unused-imports` and
// `sys-exit-alias` over this snippet, it will generate two fixes: (1) remove the unused
// `sys` import; and (2) replace `quit()` with `sys.exit()`, under the assumption that `sys`
// is already imported and available.
//
// By adding this no-op edit, we force the `unused-imports` fix to conflict with the
// `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass.
let import_edit = Edit::range_replacement(
self.locator.slice(imported_name.range()).to_string(),
imported_name.range(),
);
Ok(Some((import_edit, imported_name.into_name())))
}
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make
/// the symbol available in the current scope along with the bound name of the symbol.
///
/// For example, assuming `module` is `"functools"` and `member` is `"lru_cache"`, this function
/// could return an [`Edit`] to add `import functools` to the start of the file, alongside with
/// the name on which the `lru_cache` symbol would be made available (`"functools.lru_cache"`).
fn import_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<(Edit, String), ResolutionError> {
if let Some(stmt) = self.find_import_from(symbol.module, at) {
// Case 1: `from functools import lru_cache` is in scope, and we're trying to reference
// `functools.cache`; thus, we add `cache` to the import, and return `"cache"` as the
// bound name.
if semantic.is_available(symbol.member) {
let Ok(import_edit) = self.add_member(stmt, symbol.member) else {
return Err(ResolutionError::InvalidEdit);
};
Ok((import_edit, symbol.member.to_string()))
} else {
Err(ResolutionError::ConflictingName(symbol.member.to_string()))
}
} else {
match symbol.style {
ImportStyle::Import => {
// Case 2a: No `functools` import is in scope; thus, we add `import functools`,
// and return `"functools.cache"` as the bound name.
if semantic.is_available(symbol.module) {
let import_edit =
self.add_import(&AnyImport::Import(Import::module(symbol.module)), at);
Ok((
import_edit,
format!(
"{module}.{member}",
module = symbol.module,
member = symbol.member
),
))
} else {
Err(ResolutionError::ConflictingName(symbol.module.to_string()))
}
}
ImportStyle::ImportFrom => {
// Case 2b: No `functools` import is in scope; thus, we add
// `from functools import cache`, and return `"cache"` as the bound name.
if semantic.is_available(symbol.member) {
let import_edit = self.add_import(
&AnyImport::ImportFrom(ImportFrom::member(
symbol.module,
symbol.member,
)),
at,
);
Ok((import_edit, symbol.member.to_string()))
} else {
Err(ResolutionError::ConflictingName(symbol.member.to_string()))
}
}
}
}
}
/// Return the top-level [`Stmt`] that imports the given module using `Stmt::ImportFrom`
/// preceding the given position, if any.
fn find_import_from(&self, module: &str, at: TextSize) -> Option<&Stmt> {
let mut import_from = None;
for stmt in &self.runtime_imports {
if stmt.start() >= at {
break;
}
if let Stmt::ImportFrom(ast::StmtImportFrom {
module: name,
names,
level,
range: _,
}) = stmt
{
if level.map_or(true, |level| level.to_u32() == 0)
&& name.as_ref().is_some_and(|name| name == module)
&& names.iter().all(|alias| alias.name.as_str() != "*")
{
import_from = Some(*stmt);
}
}
}
import_from
}
/// Add the given member to an existing `Stmt::ImportFrom` statement.
fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
let mut statement = match_statement(self.locator.slice(stmt))?;
let import_from = match_import_from(&mut statement)?;
let aliases = match_aliases(import_from)?;
aliases.push(ImportAlias {
name: NameOrAttribute::N(Box::new(Name {
value: member,
lpar: vec![],
rpar: vec![],
})),
asname: None,
comma: aliases.last().and_then(|alias| alias.comma.clone()),
});
Ok(Edit::range_replacement(
statement.codegen_stylist(self.stylist),
stmt.range(),
))
}
/// Add a `TYPE_CHECKING` block to the given module.
fn add_type_checking_block(&self, content: &str, at: TextSize) -> Result<Edit> {
let insertion = if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
Insertion::end_of_statement(stmt, self.locator, self.stylist)
} else {
// Insert at the start of the file.
Insertion::start_of_file(self.python_ast, self.locator, self.stylist)
};
if insertion.is_inline() {
Err(anyhow::anyhow!(
"Cannot insert `TYPE_CHECKING` block inline"
))
} else {
Ok(insertion.into_edit(content))
}
}
/// Add an import statement to an existing `TYPE_CHECKING` block.
fn add_to_type_checking_block(
&self,
content: &str,
at: TextSize,
source_type: PySourceType,
) -> Edit {
Insertion::start_of_block(at, self.locator, self.stylist, source_type).into_edit(content)
}
/// Return the import statement that precedes the given position, if any.
fn preceding_import(&self, at: TextSize) -> Option<&'a Stmt> {
self.runtime_imports
.partition_point(|stmt| stmt.start() < at)
.checked_sub(1)
.map(|idx| self.runtime_imports[idx])
}
/// Return the `TYPE_CHECKING` block that precedes the given position, if any.
fn preceding_type_checking_block(&self, at: TextSize) -> Option<&'a Stmt> {
let block = self.type_checking_blocks.first()?;
if block.start() <= at {
Some(block)
} else {
None
}
}
}
/// An edit to the top-level of a module, making it available at runtime.
#[derive(Debug)]
pub(crate) struct RuntimeImportEdit {
/// The edit to add the import to the top-level of the module.
add_import_edit: Edit,
}
impl RuntimeImportEdit {
pub(crate) fn into_edits(self) -> Vec<Edit> {
vec![self.add_import_edit]
}
}
/// An edit to an import to a typing-only context.
#[derive(Debug)]
pub(crate) struct TypingImportEdit {
/// The edit to add the `TYPE_CHECKING` symbol to the module.
type_checking_edit: Edit,
/// The edit to add the import to a `TYPE_CHECKING` block.
add_import_edit: Edit,
}
impl TypingImportEdit {
pub(crate) fn into_edits(self) -> Vec<Edit> {
vec![self.type_checking_edit, self.add_import_edit]
}
}
#[derive(Debug)]
enum ImportStyle {
/// Import the symbol using the `import` statement (e.g. `import foo; foo.bar`).
Import,
/// Import the symbol using the `from` statement (e.g. `from foo import bar; bar`).
ImportFrom,
}
#[derive(Debug)]
pub(crate) struct ImportRequest<'a> {
/// The module from which the symbol can be imported (e.g., `foo`, in `from foo import bar`).
module: &'a str,
/// The member to import (e.g., `bar`, in `from foo import bar`).
member: &'a str,
/// The preferred style to use when importing the symbol (e.g., `import foo` or
/// `from foo import bar`), if it's not already in scope.
style: ImportStyle,
}
impl<'a> ImportRequest<'a> {
/// Create a new `ImportRequest` from a module and member. If not present in the scope,
/// the symbol should be imported using the "import" statement.
pub(crate) fn import(module: &'a str, member: &'a str) -> Self {
Self {
module,
member,
style: ImportStyle::Import,
}
}
/// Create a new `ImportRequest` from a module and member. If not present in the scope,
/// the symbol should be imported using the "import from" statement.
pub(crate) fn import_from(module: &'a str, member: &'a str) -> Self {
Self {
module,
member,
style: ImportStyle::ImportFrom,
}
}
}
/// An existing list of module or member imports, located within an import statement.
pub(crate) struct ImportedMembers<'a> {
/// The import statement.
pub(crate) statement: &'a Stmt,
/// The "names" of the imported members.
pub(crate) names: Vec<&'a str>,
}
/// The result of an [`Importer::get_or_import_symbol`] call.
#[derive(Debug)]
pub(crate) enum ResolutionError {
/// The symbol is imported, but the import came after the current location.
ImportAfterUsage,
/// The symbol is imported, but in an incompatible context (e.g., in typing-only context, while
/// we're in a runtime context).
IncompatibleContext,
/// The symbol can't be imported, because another symbol is bound to the same name.
ConflictingName(String),
/// The symbol can't be imported due to an error in editing an existing import statement.
InvalidEdit,
}
impl std::fmt::Display for ResolutionError {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ResolutionError::ImportAfterUsage => {
fmt.write_str("Unable to use existing symbol due to late binding")
}
ResolutionError::IncompatibleContext => {
fmt.write_str("Unable to use existing symbol due to incompatible context")
}
ResolutionError::ConflictingName(binding) => std::write!(
fmt,
"Unable to insert `{binding}` into scope due to name conflict"
),
ResolutionError::InvalidEdit => {
fmt.write_str("Unable to modify existing import statement")
}
}
}
}
impl Error for ResolutionError {}

View file

@ -0,0 +1,122 @@
//! Extract docstrings via tokenization.
//!
//! See: <https://github.com/zheller/flake8-quotes/blob/ef0d9a90249a080e460b70ab62bf4b65e5aa5816/flake8_quotes/docstring_detection.py#L29>
//!
//! TODO(charlie): Consolidate with the existing AST-based docstring extraction.
use ruff_python_parser::Tok;
#[derive(Default, Copy, Clone)]
enum State {
// Start of the module: first string gets marked as a docstring.
#[default]
ExpectModuleDocstring,
// After seeing a class definition, we're waiting for the block colon (and do bracket
// counting).
ExpectClassColon,
// After seeing the block colon in a class definition, we expect a docstring.
ExpectClassDocstring,
// Same as ExpectClassColon, but for function definitions.
ExpectFunctionColon,
// Same as ExpectClassDocstring, but for function definitions.
ExpectFunctionDocstring,
// Skip tokens until we observe a `class` or `def`.
Other,
}
#[derive(Default)]
pub(crate) struct StateMachine {
state: State,
bracket_count: usize,
}
impl StateMachine {
pub(crate) fn consume(&mut self, tok: &Tok) -> bool {
match tok {
Tok::NonLogicalNewline
| Tok::Newline
| Tok::Indent
| Tok::Dedent
| Tok::Comment(..) => false,
Tok::String { .. } => {
if matches!(
self.state,
State::ExpectModuleDocstring
| State::ExpectClassDocstring
| State::ExpectFunctionDocstring
) {
self.state = State::Other;
true
} else {
false
}
}
Tok::Class => {
self.state = State::ExpectClassColon;
self.bracket_count = 0;
false
}
Tok::Def => {
self.state = State::ExpectFunctionColon;
self.bracket_count = 0;
false
}
Tok::Colon => {
if self.bracket_count == 0 {
if matches!(self.state, State::ExpectClassColon) {
self.state = State::ExpectClassDocstring;
} else if matches!(self.state, State::ExpectFunctionColon) {
self.state = State::ExpectFunctionDocstring;
}
}
false
}
Tok::Lpar | Tok::Lbrace | Tok::Lsqb => {
self.bracket_count = self.bracket_count.saturating_add(1);
if matches!(
self.state,
State::ExpectModuleDocstring
| State::ExpectClassDocstring
| State::ExpectFunctionDocstring
) {
self.state = State::Other;
}
false
}
Tok::Rpar | Tok::Rbrace | Tok::Rsqb => {
self.bracket_count = self.bracket_count.saturating_sub(1);
if matches!(
self.state,
State::ExpectModuleDocstring
| State::ExpectClassDocstring
| State::ExpectFunctionDocstring
) {
self.state = State::Other;
}
false
}
_ => {
if matches!(
self.state,
State::ExpectModuleDocstring
| State::ExpectClassDocstring
| State::ExpectFunctionDocstring
) {
self.state = State::Other;
}
false
}
}
}
}

View file

@ -0,0 +1 @@
pub(crate) mod docstring_detection;

View file

@ -0,0 +1,45 @@
//! This is the library for the [Ruff] Python linter.
//!
//! **The API is currently completely unstable**
//! and subject to change drastically.
//!
//! [Ruff]: https://github.com/astral-sh/ruff
#[cfg(feature = "clap")]
pub use rule_selector::clap_completion::RuleSelectorParser;
pub use rule_selector::RuleSelector;
pub use rules::pycodestyle::rules::{IOError, SyntaxError};
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
mod autofix;
mod checkers;
pub mod codes;
mod comments;
mod cst;
pub mod directives;
mod doc_lines;
mod docstrings;
pub mod fs;
mod importer;
mod lex;
pub mod line_width;
pub mod linter;
pub mod logging;
pub mod message;
mod noqa;
pub mod packaging;
pub mod pyproject_toml;
pub mod registry;
mod renamer;
mod rule_redirects;
pub mod rule_selector;
pub mod rules;
pub mod settings;
pub mod source_kind;
pub mod upstream_categories;
#[cfg(any(test, fuzzing))]
pub mod test;
pub const RUFF_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");

View file

@ -0,0 +1,247 @@
use ruff_cache::{CacheKey, CacheKeyHasher};
use serde::{Deserialize, Serialize};
use std::error::Error;
use std::hash::Hasher;
use std::num::{NonZeroU16, NonZeroU8, ParseIntError};
use std::str::FromStr;
use unicode_width::UnicodeWidthChar;
use ruff_macros::CacheKey;
/// The length of a line of text that is considered too long.
///
/// The allowed range of values is 1..=320
#[derive(Clone, Copy, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct LineLength(NonZeroU16);
impl LineLength {
/// Maximum allowed value for a valid [`LineLength`]
pub const MAX: u16 = 320;
/// Return the numeric value for this [`LineLength`]
pub fn value(&self) -> u16 {
self.0.get()
}
}
impl Default for LineLength {
fn default() -> Self {
Self(NonZeroU16::new(88).unwrap())
}
}
impl CacheKey for LineLength {
fn cache_key(&self, state: &mut CacheKeyHasher) {
state.write_u16(self.0.get());
}
}
/// Error type returned when parsing a [`LineLength`] from a string fails
pub enum ParseLineWidthError {
/// The string could not be parsed as a valid [u16]
ParseError(ParseIntError),
/// The [u16] value of the string is not a valid [LineLength]
TryFromIntError(LineLengthFromIntError),
}
impl std::fmt::Debug for ParseLineWidthError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for ParseLineWidthError {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ParseLineWidthError::ParseError(err) => std::fmt::Display::fmt(err, fmt),
ParseLineWidthError::TryFromIntError(err) => std::fmt::Display::fmt(err, fmt),
}
}
}
impl Error for ParseLineWidthError {}
impl FromStr for LineLength {
type Err = ParseLineWidthError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let value = u16::from_str(s).map_err(ParseLineWidthError::ParseError)?;
let value = Self::try_from(value).map_err(ParseLineWidthError::TryFromIntError)?;
Ok(value)
}
}
/// Error type returned when converting a u16 to a [`LineLength`] fails
#[derive(Clone, Copy, Debug)]
pub struct LineLengthFromIntError(pub u16);
impl TryFrom<u16> for LineLength {
type Error = LineLengthFromIntError;
fn try_from(value: u16) -> Result<Self, Self::Error> {
match NonZeroU16::try_from(value) {
Ok(value) if value.get() <= Self::MAX => Ok(LineLength(value)),
Ok(_) | Err(_) => Err(LineLengthFromIntError(value)),
}
}
}
impl std::fmt::Display for LineLengthFromIntError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(
f,
"The line width must be a value between 1 and {}.",
LineLength::MAX
)
}
}
impl From<LineLength> for u16 {
fn from(value: LineLength) -> Self {
value.0.get()
}
}
impl From<LineLength> for NonZeroU16 {
fn from(value: LineLength) -> Self {
value.0
}
}
/// A measure of the width of a line of text.
///
/// This is used to determine if a line is too long.
/// It should be compared to a [`LineLength`].
#[derive(Clone, Copy, Debug)]
pub struct LineWidthBuilder {
/// The width of the line.
width: usize,
/// The column of the line.
/// This is used to calculate the width of tabs.
column: usize,
/// The tab size to use when calculating the width of tabs.
tab_size: TabSize,
}
impl Default for LineWidthBuilder {
fn default() -> Self {
Self::new(TabSize::default())
}
}
impl PartialEq for LineWidthBuilder {
fn eq(&self, other: &Self) -> bool {
self.width == other.width
}
}
impl Eq for LineWidthBuilder {}
impl PartialOrd for LineWidthBuilder {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for LineWidthBuilder {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.width.cmp(&other.width)
}
}
impl LineWidthBuilder {
pub fn get(&self) -> usize {
self.width
}
/// Creates a new `LineWidth` with the given tab size.
pub fn new(tab_size: TabSize) -> Self {
LineWidthBuilder {
width: 0,
column: 0,
tab_size,
}
}
fn update(mut self, chars: impl Iterator<Item = char>) -> Self {
let tab_size: usize = self.tab_size.as_usize();
for c in chars {
match c {
'\t' => {
let tab_offset = tab_size - (self.column % tab_size);
self.width += tab_offset;
self.column += tab_offset;
}
'\n' | '\r' => {
self.width = 0;
self.column = 0;
}
_ => {
self.width += c.width().unwrap_or(0);
self.column += 1;
}
}
}
self
}
/// Adds the given text to the line width.
#[must_use]
pub fn add_str(self, text: &str) -> Self {
self.update(text.chars())
}
/// Adds the given character to the line width.
#[must_use]
pub fn add_char(self, c: char) -> Self {
self.update(std::iter::once(c))
}
/// Adds the given width to the line width.
/// Also adds the given width to the column.
/// It is generally better to use [`LineWidthBuilder::add_str`] or [`LineWidthBuilder::add_char`].
/// The width and column should be the same for the corresponding text.
/// Currently, this is only used to add spaces.
#[must_use]
pub fn add_width(mut self, width: usize) -> Self {
self.width += width;
self.column += width;
self
}
}
impl PartialEq<LineLength> for LineWidthBuilder {
fn eq(&self, other: &LineLength) -> bool {
self.width == (other.value() as usize)
}
}
impl PartialOrd<LineLength> for LineWidthBuilder {
fn partial_cmp(&self, other: &LineLength) -> Option<std::cmp::Ordering> {
self.width.partial_cmp(&(other.value() as usize))
}
}
/// The size of a tab.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, CacheKey)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct TabSize(NonZeroU8);
impl TabSize {
pub(crate) fn as_usize(self) -> usize {
self.0.get() as usize
}
}
impl Default for TabSize {
fn default() -> Self {
Self(NonZeroU8::new(4).unwrap())
}
}
impl From<NonZeroU8> for TabSize {
fn from(tab_size: NonZeroU8) -> Self {
Self(tab_size)
}
}

View file

@ -0,0 +1,736 @@
use std::borrow::Cow;
use std::ops::Deref;
use std::path::Path;
use anyhow::{anyhow, Result};
use colored::Colorize;
use itertools::Itertools;
use log::error;
use rustc_hash::FxHashMap;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::imports::ImportMap;
use ruff_python_ast::PySourceType;
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{AsMode, ParseError};
use ruff_source_file::{Locator, SourceFileBuilder};
use ruff_text_size::Ranged;
use crate::autofix::{fix_file, FixResult};
use crate::checkers::ast::check_ast;
use crate::checkers::filesystem::check_file_path;
use crate::checkers::imports::check_imports;
use crate::checkers::noqa::check_noqa;
use crate::checkers::physical_lines::check_physical_lines;
use crate::checkers::tokens::check_tokens;
use crate::directives::Directives;
use crate::doc_lines::{doc_lines_from_ast, doc_lines_from_tokens};
use crate::logging::DisplayParseError;
use crate::message::Message;
use crate::noqa::add_noqa;
use crate::registry::{AsRule, Rule};
use crate::rules::pycodestyle;
use crate::settings::{flags, Settings};
use crate::source_kind::SourceKind;
use crate::{directives, fs};
/// A [`Result`]-like type that returns both data and an error. Used to return
/// diagnostics even in the face of parse errors, since many diagnostics can be
/// generated without a full AST.
pub struct LinterResult<T> {
pub data: T,
pub error: Option<ParseError>,
}
impl<T> LinterResult<T> {
const fn new(data: T, error: Option<ParseError>) -> Self {
Self { data, error }
}
fn map<U, F: FnOnce(T) -> U>(self, f: F) -> LinterResult<U> {
LinterResult::new(f(self.data), self.error)
}
}
pub type FixTable = FxHashMap<Rule, usize>;
pub struct FixerResult<'a> {
/// The result returned by the linter, after applying any fixes.
pub result: LinterResult<(Vec<Message>, Option<ImportMap>)>,
/// The resulting source code, after applying any fixes.
pub transformed: Cow<'a, SourceKind>,
/// The number of fixes applied for each [`Rule`].
pub fixed: FixTable,
}
/// Generate `Diagnostic`s from the source code contents at the
/// given `Path`.
#[allow(clippy::too_many_arguments)]
pub fn check_path(
path: &Path,
package: Option<&Path>,
tokens: Vec<LexResult>,
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
directives: &Directives,
settings: &Settings,
noqa: flags::Noqa,
source_kind: &SourceKind,
source_type: PySourceType,
) -> LinterResult<(Vec<Diagnostic>, Option<ImportMap>)> {
// Aggregate all diagnostics.
let mut diagnostics = vec![];
let mut imports = None;
let mut error = None;
// Collect doc lines. This requires a rare mix of tokens (for comments) and AST
// (for docstrings), which demands special-casing at this level.
let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong);
let mut doc_lines = vec![];
if use_doc_lines {
doc_lines.extend(doc_lines_from_tokens(&tokens));
}
// Run the token-based rules.
if settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_tokens())
{
diagnostics.extend(check_tokens(
&tokens,
path,
locator,
indexer,
settings,
source_type.is_stub(),
));
}
// Run the filesystem-based rules.
if settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_filesystem())
{
diagnostics.extend(check_file_path(path, package, settings));
}
// Run the logical line-based rules.
if settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_logical_lines())
{
diagnostics.extend(crate::checkers::logical_lines::check_logical_lines(
&tokens, locator, stylist, settings,
));
}
// Run the AST-based rules.
let use_ast = settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_ast());
let use_imports = !directives.isort.skip_file
&& settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_imports());
if use_ast || use_imports || use_doc_lines {
match ruff_python_parser::parse_program_tokens(
tokens,
&path.to_string_lossy(),
source_type.is_ipynb(),
) {
Ok(python_ast) => {
if use_ast {
diagnostics.extend(check_ast(
&python_ast,
locator,
stylist,
indexer,
&directives.noqa_line_for,
settings,
noqa,
path,
package,
source_type,
));
}
if use_imports {
let (import_diagnostics, module_imports) = check_imports(
&python_ast,
locator,
indexer,
&directives.isort,
settings,
stylist,
path,
package,
source_kind,
source_type,
);
imports = module_imports;
diagnostics.extend(import_diagnostics);
}
if use_doc_lines {
doc_lines.extend(doc_lines_from_ast(&python_ast, locator));
}
}
Err(parse_error) => {
// Always add a diagnostic for the syntax error, regardless of whether
// `Rule::SyntaxError` is enabled. We avoid propagating the syntax error
// if it's disabled via any of the usual mechanisms (e.g., `noqa`,
// `per-file-ignores`), and the easiest way to detect that suppression is
// to see if the diagnostic persists to the end of the function.
pycodestyle::rules::syntax_error(&mut diagnostics, &parse_error, locator);
error = Some(parse_error);
}
}
}
// Deduplicate and reorder any doc lines.
if use_doc_lines {
doc_lines.sort_unstable();
doc_lines.dedup();
}
// Run the lines-based rules.
if settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_physical_lines())
{
diagnostics.extend(check_physical_lines(
locator, stylist, indexer, &doc_lines, settings,
));
}
// Ignore diagnostics based on per-file-ignores.
if !diagnostics.is_empty() && !settings.per_file_ignores.is_empty() {
let ignores = fs::ignores_from_path(path, &settings.per_file_ignores);
if !ignores.is_empty() {
diagnostics.retain(|diagnostic| !ignores.contains(diagnostic.kind.rule()));
}
};
// Enforce `noqa` directives.
if (noqa.into() && !diagnostics.is_empty())
|| settings
.rules
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_noqa())
{
let ignored = check_noqa(
&mut diagnostics,
path,
locator,
indexer.comment_ranges(),
&directives.noqa_line_for,
error.is_none(),
settings,
);
if noqa.into() {
for index in ignored.iter().rev() {
diagnostics.swap_remove(*index);
}
}
}
// If there was a syntax error, check if it should be discarded.
if error.is_some() {
// If the syntax error was removed by _any_ of the above disablement methods (e.g., a
// `noqa` directive, or a `per-file-ignore`), discard it.
if !diagnostics
.iter()
.any(|diagnostic| diagnostic.kind.rule() == Rule::SyntaxError)
{
error = None;
}
// If the syntax error _diagnostic_ is disabled, discard the _diagnostic_.
if !settings.rules.enabled(Rule::SyntaxError) {
diagnostics.retain(|diagnostic| diagnostic.kind.rule() != Rule::SyntaxError);
}
}
LinterResult::new((diagnostics, imports), error)
}
const MAX_ITERATIONS: usize = 100;
/// Add any missing `# noqa` pragmas to the source code at the given `Path`.
pub fn add_noqa_to_path(
path: &Path,
package: Option<&Path>,
source_kind: &SourceKind,
source_type: PySourceType,
settings: &Settings,
) -> Result<usize> {
let contents = source_kind.source_code();
// Tokenize once.
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(contents, source_type.as_mode());
// Map row and column locations to byte slices (lazily).
let locator = Locator::new(contents);
// Detect the current code style (lazily).
let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code.
let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = directives::extract_directives(
&tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics, ignoring any existing `noqa` directives.
let LinterResult {
data: diagnostics,
error,
} = check_path(
path,
package,
tokens,
&locator,
&stylist,
&indexer,
&directives,
settings,
flags::Noqa::Disabled,
source_kind,
source_type,
);
// Log any parse errors.
if let Some(err) = error {
error!(
"{}",
DisplayParseError::new(err, locator.to_source_code(), source_kind)
);
}
// Add any missing `# noqa` pragmas.
// TODO(dhruvmanila): Add support for Jupyter Notebooks
add_noqa(
path,
&diagnostics.0,
&locator,
indexer.comment_ranges(),
&directives.noqa_line_for,
stylist.line_ending(),
)
}
/// Generate a [`Message`] for each [`Diagnostic`] triggered by the given source
/// code.
pub fn lint_only(
path: &Path,
package: Option<&Path>,
settings: &Settings,
noqa: flags::Noqa,
source_kind: &SourceKind,
source_type: PySourceType,
) -> LinterResult<(Vec<Message>, Option<ImportMap>)> {
// Tokenize once.
let tokens: Vec<LexResult> =
ruff_python_parser::tokenize(source_kind.source_code(), source_type.as_mode());
// Map row and column locations to byte slices (lazily).
let locator = Locator::new(source_kind.source_code());
// Detect the current code style (lazily).
let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code.
let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = directives::extract_directives(
&tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics.
let result = check_path(
path,
package,
tokens,
&locator,
&stylist,
&indexer,
&directives,
settings,
noqa,
source_kind,
source_type,
);
result.map(|(diagnostics, imports)| {
(
diagnostics_to_messages(diagnostics, path, &locator, &directives),
imports,
)
})
}
/// Convert from diagnostics to messages.
fn diagnostics_to_messages(
diagnostics: Vec<Diagnostic>,
path: &Path,
locator: &Locator,
directives: &Directives,
) -> Vec<Message> {
let file = once_cell::unsync::Lazy::new(|| {
let mut builder =
SourceFileBuilder::new(path.to_string_lossy().as_ref(), locator.contents());
if let Some(line_index) = locator.line_index() {
builder.set_line_index(line_index.clone());
}
builder.finish()
});
diagnostics
.into_iter()
.map(|diagnostic| {
let noqa_offset = directives.noqa_line_for.resolve(diagnostic.start());
Message::from_diagnostic(diagnostic, file.deref().clone(), noqa_offset)
})
.collect()
}
/// Generate `Diagnostic`s from source code content, iteratively autofixing
/// until stable.
pub fn lint_fix<'a>(
path: &Path,
package: Option<&Path>,
noqa: flags::Noqa,
settings: &Settings,
source_kind: &'a SourceKind,
source_type: PySourceType,
) -> Result<FixerResult<'a>> {
let mut transformed = Cow::Borrowed(source_kind);
// Track the number of fixed errors across iterations.
let mut fixed = FxHashMap::default();
// As an escape hatch, bail after 100 iterations.
let mut iterations = 0;
// Track whether the _initial_ source code was parseable.
let mut parseable = false;
// Continuously autofix until the source code stabilizes.
loop {
// Tokenize once.
let tokens: Vec<LexResult> =
ruff_python_parser::tokenize(transformed.source_code(), source_type.as_mode());
// Map row and column locations to byte slices (lazily).
let locator = Locator::new(transformed.source_code());
// Detect the current code style (lazily).
let stylist = Stylist::from_tokens(&tokens, &locator);
// Extra indices from the code.
let indexer = Indexer::from_tokens(&tokens, &locator);
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = directives::extract_directives(
&tokens,
directives::Flags::from_settings(settings),
&locator,
&indexer,
);
// Generate diagnostics.
let result = check_path(
path,
package,
tokens,
&locator,
&stylist,
&indexer,
&directives,
settings,
noqa,
source_kind,
source_type,
);
if iterations == 0 {
parseable = result.error.is_none();
} else {
// If the source code was parseable on the first pass, but is no
// longer parseable on a subsequent pass, then we've introduced a
// syntax error. Return the original code.
if parseable && result.error.is_some() {
report_autofix_syntax_error(
path,
transformed.source_code(),
&result.error.unwrap(),
fixed.keys().copied(),
);
return Err(anyhow!("Autofix introduced a syntax error"));
}
}
// Apply autofix.
if let Some(FixResult {
code: fixed_contents,
fixes: applied,
source_map,
}) = fix_file(&result.data.0, &locator)
{
if iterations < MAX_ITERATIONS {
// Count the number of fixed errors.
for (rule, count) in applied {
*fixed.entry(rule).or_default() += count;
}
transformed = Cow::Owned(transformed.updated(fixed_contents, &source_map));
// Increment the iteration count.
iterations += 1;
// Re-run the linter pass (by avoiding the break).
continue;
}
report_failed_to_converge_error(path, transformed.source_code(), &result.data.0);
}
return Ok(FixerResult {
result: result.map(|(diagnostics, imports)| {
(
diagnostics_to_messages(diagnostics, path, &locator, &directives),
imports,
)
}),
transformed,
fixed,
});
}
}
fn collect_rule_codes(rules: impl IntoIterator<Item = Rule>) -> String {
rules
.into_iter()
.map(|rule| rule.noqa_code().to_string())
.sorted_unstable()
.dedup()
.join(", ")
}
#[allow(clippy::print_stderr)]
fn report_failed_to_converge_error(path: &Path, transformed: &str, diagnostics: &[Diagnostic]) {
let codes = collect_rule_codes(diagnostics.iter().map(|diagnostic| diagnostic.kind.rule()));
if cfg!(debug_assertions) {
eprintln!(
"{}{} Failed to converge after {} iterations in `{}` with rule codes {}:---\n{}\n---",
"debug error".red().bold(),
":".bold(),
MAX_ITERATIONS,
fs::relativize_path(path),
codes,
transformed,
);
} else {
eprintln!(
r#"
{}{} Failed to converge after {} iterations.
This indicates a bug in Ruff. If you could open an issue at:
https://github.com/astral-sh/ruff/issues/new?title=%5BInfinite%20loop%5D
...quoting the contents of `{}`, the rule codes {}, along with the `pyproject.toml` settings and executed command, we'd be very appreciative!
"#,
"error".red().bold(),
":".bold(),
MAX_ITERATIONS,
fs::relativize_path(path),
codes
);
}
}
#[allow(clippy::print_stderr)]
fn report_autofix_syntax_error(
path: &Path,
transformed: &str,
error: &ParseError,
rules: impl IntoIterator<Item = Rule>,
) {
let codes = collect_rule_codes(rules);
if cfg!(debug_assertions) {
eprintln!(
"{}{} Autofix introduced a syntax error in `{}` with rule codes {}: {}\n---\n{}\n---",
"error".red().bold(),
":".bold(),
fs::relativize_path(path),
codes,
error,
transformed,
);
} else {
eprintln!(
r#"
{}{} Autofix introduced a syntax error. Reverting all changes.
This indicates a bug in Ruff. If you could open an issue at:
https://github.com/astral-sh/ruff/issues/new?title=%5BAutofix%20error%5D
...quoting the contents of `{}`, the rule codes {}, along with the `pyproject.toml` settings and executed command, we'd be very appreciative!
"#,
"error".red().bold(),
":".bold(),
fs::relativize_path(path),
codes,
);
}
}
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use ruff_notebook::{Notebook, NotebookError};
use crate::registry::Rule;
use crate::source_kind::SourceKind;
use crate::test::{test_contents, test_notebook_path, TestedNotebook};
use crate::{assert_messages, settings};
/// Construct a path to a Jupyter notebook in the `resources/test/fixtures/jupyter` directory.
fn notebook_path(path: impl AsRef<Path>) -> std::path::PathBuf {
Path::new("../ruff_notebook/resources/test/fixtures/jupyter").join(path)
}
#[test]
fn test_import_sorting() -> Result<(), NotebookError> {
let actual = notebook_path("isort.ipynb");
let expected = notebook_path("isort_expected.ipynb");
let TestedNotebook {
messages,
source_notebook,
..
} = test_notebook_path(
&actual,
expected,
&settings::Settings::for_rule(Rule::UnsortedImports),
)?;
assert_messages!(messages, actual, source_notebook);
Ok(())
}
#[test]
fn test_ipy_escape_command() -> Result<(), NotebookError> {
let actual = notebook_path("ipy_escape_command.ipynb");
let expected = notebook_path("ipy_escape_command_expected.ipynb");
let TestedNotebook {
messages,
source_notebook,
..
} = test_notebook_path(
&actual,
expected,
&settings::Settings::for_rule(Rule::UnusedImport),
)?;
assert_messages!(messages, actual, source_notebook);
Ok(())
}
#[test]
fn test_unused_variable() -> Result<(), NotebookError> {
let actual = notebook_path("unused_variable.ipynb");
let expected = notebook_path("unused_variable_expected.ipynb");
let TestedNotebook {
messages,
source_notebook,
..
} = test_notebook_path(
&actual,
expected,
&settings::Settings::for_rule(Rule::UnusedVariable),
)?;
assert_messages!(messages, actual, source_notebook);
Ok(())
}
#[test]
fn test_json_consistency() -> Result<()> {
let actual_path = notebook_path("before_fix.ipynb");
let expected_path = notebook_path("after_fix.ipynb");
let TestedNotebook {
linted_notebook: fixed_notebook,
..
} = test_notebook_path(
actual_path,
&expected_path,
&settings::Settings::for_rule(Rule::UnusedImport),
)?;
let mut writer = Vec::new();
fixed_notebook.write(&mut writer)?;
let actual = String::from_utf8(writer)?;
let expected = std::fs::read_to_string(expected_path)?;
assert_eq!(actual, expected);
Ok(())
}
#[test_case(Path::new("before_fix.ipynb"), true; "trailing_newline")]
#[test_case(Path::new("no_trailing_newline.ipynb"), false; "no_trailing_newline")]
fn test_trailing_newline(path: &Path, trailing_newline: bool) -> Result<()> {
let notebook = Notebook::from_path(&notebook_path(path))?;
assert_eq!(notebook.trailing_newline(), trailing_newline);
let mut writer = Vec::new();
notebook.write(&mut writer)?;
let string = String::from_utf8(writer)?;
assert_eq!(string.ends_with('\n'), trailing_newline);
Ok(())
}
// Version <4.5, don't emit cell ids
#[test_case(Path::new("no_cell_id.ipynb"), false; "no_cell_id")]
// Version 4.5, cell ids are missing and need to be added
#[test_case(Path::new("add_missing_cell_id.ipynb"), true; "add_missing_cell_id")]
fn test_cell_id(path: &Path, has_id: bool) -> Result<()> {
let source_notebook = Notebook::from_path(&notebook_path(path))?;
let source_kind = SourceKind::IpyNotebook(source_notebook);
let (_, transformed) = test_contents(
&source_kind,
path,
&settings::Settings::for_rule(Rule::UnusedImport),
);
let linted_notebook = transformed.into_owned().expect_ipy_notebook();
let mut writer = Vec::new();
linted_notebook.write(&mut writer)?;
let actual = String::from_utf8(writer)?;
if has_id {
assert!(actual.contains(r#""id": ""#));
} else {
assert!(!actual.contains(r#""id":"#));
}
Ok(())
}
}

View file

@ -0,0 +1,293 @@
use std::fmt::{Display, Formatter, Write};
use std::path::Path;
use std::sync::Mutex;
use anyhow::Result;
use colored::Colorize;
use fern;
use log::Level;
use once_cell::sync::Lazy;
use ruff_python_parser::{ParseError, ParseErrorType};
use ruff_source_file::{OneIndexed, SourceCode, SourceLocation};
use crate::fs;
use crate::source_kind::SourceKind;
use ruff_notebook::Notebook;
pub static WARNINGS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default);
/// Warn a user once, with uniqueness determined by the given ID.
#[macro_export]
macro_rules! warn_user_once_by_id {
($id:expr, $($arg:tt)*) => {
use colored::Colorize;
use log::warn;
if let Ok(mut states) = $crate::logging::WARNINGS.lock() {
if !states.contains(&$id) {
let message = format!("{}", format_args!($($arg)*));
warn!("{}", message.bold());
states.push($id);
}
}
};
}
/// Warn a user once, with uniqueness determined by the calling location itself.
#[macro_export]
macro_rules! warn_user_once {
($($arg:tt)*) => {
use colored::Colorize;
use log::warn;
static WARNED: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
if !WARNED.swap(true, std::sync::atomic::Ordering::SeqCst) {
let message = format!("{}", format_args!($($arg)*));
warn!("{}", message.bold());
}
};
}
#[macro_export]
macro_rules! warn_user {
($($arg:tt)*) => {{
use colored::Colorize;
use log::warn;
let message = format!("{}", format_args!($($arg)*));
warn!("{}", message.bold());
}};
}
#[macro_export]
macro_rules! notify_user {
($($arg:tt)*) => {
println!(
"[{}] {}",
chrono::Local::now()
.format("%H:%M:%S %p")
.to_string()
.dimmed(),
format_args!($($arg)*)
)
}
}
#[derive(Debug, Default, PartialOrd, Ord, PartialEq, Eq, Copy, Clone)]
pub enum LogLevel {
/// No output ([`log::LevelFilter::Off`]).
Silent,
/// Only show lint violations, with no decorative output
/// ([`log::LevelFilter::Off`]).
Quiet,
/// All user-facing output ([`log::LevelFilter::Info`]).
#[default]
Default,
/// All user-facing output ([`log::LevelFilter::Debug`]).
Verbose,
}
impl LogLevel {
#[allow(clippy::trivially_copy_pass_by_ref)]
const fn level_filter(&self) -> log::LevelFilter {
match self {
LogLevel::Default => log::LevelFilter::Info,
LogLevel::Verbose => log::LevelFilter::Debug,
LogLevel::Quiet => log::LevelFilter::Off,
LogLevel::Silent => log::LevelFilter::Off,
}
}
}
pub fn set_up_logging(level: &LogLevel) -> Result<()> {
fern::Dispatch::new()
.format(|out, message, record| match record.level() {
Level::Error => {
out.finish(format_args!(
"{}{} {}",
"error".red().bold(),
":".bold(),
message
));
}
Level::Warn => {
out.finish(format_args!(
"{}{} {}",
"warning".yellow().bold(),
":".bold(),
message
));
}
Level::Info | Level::Debug | Level::Trace => {
out.finish(format_args!(
"{}[{}][{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
record.target(),
record.level(),
message
));
}
})
.level(level.level_filter())
.level_for("globset", log::LevelFilter::Warn)
.chain(std::io::stderr())
.apply()?;
Ok(())
}
pub struct DisplayParseError<'a> {
error: ParseError,
source_code: SourceCode<'a, 'a>,
source_kind: &'a SourceKind,
}
impl<'a> DisplayParseError<'a> {
pub fn new(
error: ParseError,
source_code: SourceCode<'a, 'a>,
source_kind: &'a SourceKind,
) -> Self {
Self {
error,
source_code,
source_kind,
}
}
}
impl Display for DisplayParseError<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{header} {path}{colon}",
header = "Failed to parse".bold(),
path = fs::relativize_path(Path::new(&self.error.source_path)).bold(),
colon = ":".cyan(),
)?;
let source_location = self.source_code.source_location(self.error.offset);
// If we're working on a Jupyter notebook, translate the positions
// with respect to the cell and row in the cell. This is the same
// format as the `TextEmitter`.
let error_location =
if let Some(jupyter_index) = self.source_kind.as_ipy_notebook().map(Notebook::index) {
write!(
f,
"cell {cell}{colon}",
cell = jupyter_index
.cell(source_location.row.get())
.unwrap_or_default(),
colon = ":".cyan(),
)?;
SourceLocation {
row: OneIndexed::new(
jupyter_index
.cell_row(source_location.row.get())
.unwrap_or(1) as usize,
)
.unwrap(),
column: source_location.column,
}
} else {
source_location
};
write!(
f,
"{row}{colon}{column}{colon} {inner}",
row = error_location.row,
column = error_location.column,
colon = ":".cyan(),
inner = &DisplayParseErrorType(&self.error.error)
)
}
}
pub(crate) struct DisplayParseErrorType<'a>(&'a ParseErrorType);
impl<'a> DisplayParseErrorType<'a> {
pub(crate) fn new(error: &'a ParseErrorType) -> Self {
Self(error)
}
}
impl Display for DisplayParseErrorType<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self.0 {
ParseErrorType::Eof => write!(f, "Expected token but reached end of file."),
ParseErrorType::ExtraToken(ref tok) => write!(
f,
"Got extraneous token: {tok}",
tok = TruncateAtNewline(&tok)
),
ParseErrorType::InvalidToken => write!(f, "Got invalid token"),
ParseErrorType::UnrecognizedToken(ref tok, ref expected) => {
if let Some(expected) = expected.as_ref() {
write!(
f,
"Expected '{expected}', but got {tok}",
tok = TruncateAtNewline(&tok)
)
} else {
write!(f, "Unexpected token {tok}", tok = TruncateAtNewline(&tok))
}
}
ParseErrorType::Lexical(ref error) => write!(f, "{error}"),
}
}
}
/// Truncates the display text before the first newline character to avoid line breaks.
struct TruncateAtNewline<'a>(&'a dyn Display);
impl Display for TruncateAtNewline<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
struct TruncateAdapter<'a> {
inner: &'a mut dyn Write,
after_new_line: bool,
}
impl Write for TruncateAdapter<'_> {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
if self.after_new_line {
Ok(())
} else {
if let Some(end) = s.find(['\n', '\r']) {
self.inner.write_str(&s[..end])?;
self.inner.write_str("\u{23ce}...")?;
self.after_new_line = true;
Ok(())
} else {
self.inner.write_str(s)
}
}
}
}
write!(
TruncateAdapter {
inner: f,
after_new_line: false,
},
"{}",
self.0
)
}
}
#[cfg(test)]
mod tests {
use crate::logging::LogLevel;
#[test]
fn ordering() {
assert!(LogLevel::Default > LogLevel::Silent);
assert!(LogLevel::Default >= LogLevel::Default);
assert!(LogLevel::Quiet > LogLevel::Silent);
assert!(LogLevel::Verbose > LogLevel::Default);
assert!(LogLevel::Verbose > LogLevel::Silent);
}
}

View file

@ -0,0 +1,59 @@
use std::io::Write;
use ruff_source_file::SourceLocation;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
/// Generate error logging commands for Azure Pipelines format.
/// See [documentation](https://learn.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash#logissue-log-an-error-or-warning)
#[derive(Default)]
pub struct AzureEmitter;
impl Emitter for AzureEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()> {
for message in messages {
let location = if context.is_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
SourceLocation::default()
} else {
message.compute_start_location()
};
writeln!(
writer,
"##vso[task.logissue type=error\
;sourcepath={filename};linenumber={line};columnnumber={col};code={code};]{body}",
filename = message.filename(),
line = location.row,
col = location.column,
code = message.kind.rule().noqa_code(),
body = message.kind.body,
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::AzureEmitter;
#[test]
fn output() {
let mut emitter = AzureEmitter;
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

View file

@ -0,0 +1,200 @@
use std::fmt::{Display, Formatter};
use std::num::NonZeroUsize;
use colored::{Color, ColoredString, Colorize, Styles};
use ruff_text_size::{Ranged, TextRange, TextSize};
use similar::{ChangeTag, TextDiff};
use ruff_diagnostics::{Applicability, Fix};
use ruff_source_file::{OneIndexed, SourceFile};
use crate::message::Message;
/// Renders a diff that shows the code fixes.
///
/// The implementation isn't fully fledged out and only used by tests. Before using in production, try
/// * Improve layout
/// * Replace tabs with spaces for a consistent experience across terminals
/// * Replace zero-width whitespaces
/// * Print a simpler diff if only a single line has changed
/// * Compute the diff from the [`Edit`] because diff calculation is expensive.
pub(super) struct Diff<'a> {
fix: &'a Fix,
source_code: &'a SourceFile,
}
impl<'a> Diff<'a> {
pub(crate) fn from_message(message: &'a Message) -> Option<Diff> {
message.fix.as_ref().map(|fix| Diff {
source_code: &message.file,
fix,
})
}
}
impl Display for Diff<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut output = String::with_capacity(self.source_code.source_text().len());
let mut last_end = TextSize::default();
for edit in self.fix.edits() {
output.push_str(
self.source_code
.slice(TextRange::new(last_end, edit.start())),
);
output.push_str(edit.content().unwrap_or_default());
last_end = edit.end();
}
output.push_str(&self.source_code.source_text()[usize::from(last_end)..]);
let diff = TextDiff::from_lines(self.source_code.source_text(), &output);
let message = match self.fix.applicability() {
Applicability::Automatic => "Fix",
Applicability::Suggested => "Suggested fix",
Applicability::Manual => "Possible fix",
Applicability::Unspecified => "Suggested fix", /* For backwards compatibility, unspecified fixes are 'suggested' */
};
writeln!(f, " {}", message.blue())?;
let (largest_old, largest_new) = diff
.ops()
.last()
.map(|op| (op.old_range().start, op.new_range().start))
.unwrap_or_default();
let digit_with =
calculate_print_width(OneIndexed::from_zero_indexed(largest_new.max(largest_old)));
for (idx, group) in diff.grouped_ops(3).iter().enumerate() {
if idx > 0 {
writeln!(f, "{:-^1$}", "-", 80)?;
}
for op in group {
for change in diff.iter_inline_changes(op) {
let sign = match change.tag() {
ChangeTag::Delete => "-",
ChangeTag::Insert => "+",
ChangeTag::Equal => " ",
};
let line_style = LineStyle::from(change.tag());
let old_index = change.old_index().map(OneIndexed::from_zero_indexed);
let new_index = change.new_index().map(OneIndexed::from_zero_indexed);
write!(
f,
"{} {} |{}",
Line {
index: old_index,
width: digit_with
},
Line {
index: new_index,
width: digit_with
},
line_style.apply_to(sign).bold()
)?;
for (emphasized, value) in change.iter_strings_lossy() {
if emphasized {
write!(f, "{}", line_style.apply_to(&value).underline().on_black())?;
} else {
write!(f, "{}", line_style.apply_to(&value))?;
}
}
if change.missing_newline() {
writeln!(f)?;
}
}
}
}
Ok(())
}
}
struct LineStyle {
fgcolor: Option<Color>,
style: Option<Styles>,
}
impl LineStyle {
fn apply_to(&self, input: &str) -> ColoredString {
let mut colored = ColoredString::from(input);
if let Some(color) = self.fgcolor {
colored = colored.color(color);
}
if let Some(style) = self.style {
match style {
Styles::Clear => colored.clear(),
Styles::Bold => colored.bold(),
Styles::Dimmed => colored.dimmed(),
Styles::Underline => colored.underline(),
Styles::Reversed => colored.reversed(),
Styles::Italic => colored.italic(),
Styles::Blink => colored.blink(),
Styles::Hidden => colored.hidden(),
Styles::Strikethrough => colored.strikethrough(),
}
} else {
colored
}
}
}
impl From<ChangeTag> for LineStyle {
fn from(value: ChangeTag) -> Self {
match value {
ChangeTag::Equal => LineStyle {
fgcolor: None,
style: Some(Styles::Dimmed),
},
ChangeTag::Delete => LineStyle {
fgcolor: Some(Color::Red),
style: None,
},
ChangeTag::Insert => LineStyle {
fgcolor: Some(Color::Green),
style: None,
},
}
}
}
struct Line {
index: Option<OneIndexed>,
width: NonZeroUsize,
}
impl Display for Line {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self.index {
None => {
for _ in 0..self.width.get() {
f.write_str(" ")?;
}
Ok(())
}
Some(idx) => write!(f, "{:<width$}", idx, width = self.width.get()),
}
}
}
/// Calculate the length of the string representation of `value`
pub(super) fn calculate_print_width(mut value: OneIndexed) -> NonZeroUsize {
const TEN: OneIndexed = OneIndexed::from_zero_indexed(9);
let mut width = OneIndexed::ONE;
while value >= TEN {
value = OneIndexed::new(value.get() / 10).unwrap_or(OneIndexed::MIN);
width = width.checked_add(1).unwrap();
}
width
}

View file

@ -0,0 +1,74 @@
use std::io::Write;
use ruff_source_file::SourceLocation;
use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
/// Generate error workflow command in GitHub Actions format.
/// See: [GitHub documentation](https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message)
#[derive(Default)]
pub struct GithubEmitter;
impl Emitter for GithubEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()> {
for message in messages {
let source_location = message.compute_start_location();
let location = if context.is_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
SourceLocation::default()
} else {
source_location.clone()
};
let end_location = message.compute_end_location();
write!(
writer,
"::error title=Ruff \
({code}),file={file},line={row},col={column},endLine={end_row},endColumn={end_column}::",
code = message.kind.rule().noqa_code(),
file = message.filename(),
row = source_location.row,
column = source_location.column,
end_row = end_location.row,
end_column = end_location.column,
)?;
writeln!(
writer,
"{path}:{row}:{column}: {code} {body}",
path = relativize_path(message.filename()),
row = location.row,
column = location.column,
code = message.kind.rule().noqa_code(),
body = message.kind.body,
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::GithubEmitter;
#[test]
fn output() {
let mut emitter = GithubEmitter;
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

View file

@ -0,0 +1,166 @@
use std::collections::hash_map::DefaultHasher;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::io::Write;
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
use serde_json::json;
use crate::fs::{relativize_path, relativize_path_to};
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
/// Generate JSON with violations in GitLab CI format
// https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool
pub struct GitlabEmitter {
project_dir: Option<String>,
}
impl Default for GitlabEmitter {
fn default() -> Self {
Self {
project_dir: std::env::var("CI_PROJECT_DIR").ok(),
}
}
}
impl Emitter for GitlabEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()> {
serde_json::to_writer_pretty(
writer,
&SerializedMessages {
messages,
context,
project_dir: self.project_dir.as_deref(),
},
)?;
Ok(())
}
}
struct SerializedMessages<'a> {
messages: &'a [Message],
context: &'a EmitterContext<'a>,
project_dir: Option<&'a str>,
}
impl Serialize for SerializedMessages<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_seq(Some(self.messages.len()))?;
let mut fingerprints = HashSet::<u64>::with_capacity(self.messages.len());
for message in self.messages {
let start_location = message.compute_start_location();
let end_location = message.compute_end_location();
let lines = if self.context.is_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
json!({
"begin": 1,
"end": 1
})
} else {
json!({
"begin": start_location.row,
"end": end_location.row
})
};
let path = self.project_dir.as_ref().map_or_else(
|| relativize_path(message.filename()),
|project_dir| relativize_path_to(message.filename(), project_dir),
);
let mut message_fingerprint = fingerprint(message, 0);
// Make sure that we do not get a fingerprint that is already in use
// by adding in the previously generated one.
while fingerprints.contains(&message_fingerprint) {
message_fingerprint = fingerprint(message, message_fingerprint);
}
fingerprints.insert(message_fingerprint);
let value = json!({
"description": format!("({}) {}", message.kind.rule().noqa_code(), message.kind.body),
"severity": "major",
"fingerprint": format!("{:x}", message_fingerprint),
"location": {
"path": path,
"lines": lines
}
});
s.serialize_element(&value)?;
}
s.end()
}
}
/// Generate a unique fingerprint to identify a violation.
fn fingerprint(message: &Message, salt: u64) -> u64 {
let Message {
kind,
range: _,
fix: _fix,
file,
noqa_offset: _,
} = message;
let mut hasher = DefaultHasher::new();
salt.hash(&mut hasher);
kind.name.hash(&mut hasher);
file.name().hash(&mut hasher);
hasher.finish()
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::GitlabEmitter;
#[test]
fn output() {
let mut emitter = GitlabEmitter::default();
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(redact_fingerprint(&content));
}
// Redact the fingerprint because the default hasher isn't stable across platforms.
fn redact_fingerprint(content: &str) -> String {
static FINGERPRINT_HAY_KEY: &str = r#""fingerprint": ""#;
let mut output = String::with_capacity(content.len());
let mut last = 0;
for (start, _) in content.match_indices(FINGERPRINT_HAY_KEY) {
let fingerprint_hash_start = start + FINGERPRINT_HAY_KEY.len();
output.push_str(&content[last..fingerprint_hash_start]);
output.push_str("<redacted>");
last = fingerprint_hash_start
+ content[fingerprint_hash_start..]
.find('"')
.expect("Expected terminating quote");
}
output.push_str(&content[last..]);
output
}
}

View file

@ -0,0 +1,225 @@
use std::fmt::{Display, Formatter};
use std::io::Write;
use std::num::NonZeroUsize;
use colored::Colorize;
use ruff_notebook::NotebookIndex;
use ruff_source_file::OneIndexed;
use crate::fs::relativize_path;
use crate::message::diff::calculate_print_width;
use crate::message::text::{MessageCodeFrame, RuleCodeAndBody};
use crate::message::{
group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation,
};
#[derive(Default)]
pub struct GroupedEmitter {
show_fix_status: bool,
show_source: bool,
}
impl GroupedEmitter {
#[must_use]
pub fn with_show_fix_status(mut self, show_fix_status: bool) -> Self {
self.show_fix_status = show_fix_status;
self
}
#[must_use]
pub fn with_show_source(mut self, show_source: bool) -> Self {
self.show_source = show_source;
self
}
}
impl Emitter for GroupedEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()> {
for (filename, messages) in group_messages_by_filename(messages) {
// Compute the maximum number of digits in the row and column, for messages in
// this file.
let mut max_row_length = OneIndexed::MIN;
let mut max_column_length = OneIndexed::MIN;
for message in &messages {
max_row_length = max_row_length.max(message.start_location.row);
max_column_length = max_column_length.max(message.start_location.column);
}
let row_length = calculate_print_width(max_row_length);
let column_length = calculate_print_width(max_column_length);
// Print the filename.
writeln!(writer, "{}:", relativize_path(filename).underline())?;
// Print each message.
for message in messages {
write!(
writer,
"{}",
DisplayGroupedMessage {
notebook_index: context.notebook_index(message.filename()),
message,
show_fix_status: self.show_fix_status,
show_source: self.show_source,
row_length,
column_length,
}
)?;
}
// Print a blank line between files, unless we're showing the source, in which case
// we'll have already printed a blank line between messages.
if !self.show_source {
writeln!(writer)?;
}
}
Ok(())
}
}
struct DisplayGroupedMessage<'a> {
message: MessageWithLocation<'a>,
show_fix_status: bool,
show_source: bool,
row_length: NonZeroUsize,
column_length: NonZeroUsize,
notebook_index: Option<&'a NotebookIndex>,
}
impl Display for DisplayGroupedMessage<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let MessageWithLocation {
message,
start_location,
} = &self.message;
write!(
f,
" {row_padding}",
row_padding =
" ".repeat(self.row_length.get() - calculate_print_width(start_location.row).get())
)?;
// Check if we're working on a jupyter notebook and translate positions with cell accordingly
let (row, col) = if let Some(jupyter_index) = self.notebook_index {
write!(
f,
"cell {cell}{sep}",
cell = jupyter_index
.cell(start_location.row.get())
.unwrap_or_default(),
sep = ":".cyan()
)?;
(
jupyter_index
.cell_row(start_location.row.get())
.unwrap_or(1) as usize,
start_location.column.get(),
)
} else {
(start_location.row.get(), start_location.column.get())
};
writeln!(
f,
"{row}{sep}{col}{col_padding} {code_and_body}",
sep = ":".cyan(),
col_padding = " ".repeat(
self.column_length.get() - calculate_print_width(start_location.column).get()
),
code_and_body = RuleCodeAndBody {
message,
show_fix_status: self.show_fix_status
},
)?;
if self.show_source {
use std::fmt::Write;
let mut padded = PadAdapter::new(f);
writeln!(
padded,
"{}",
MessageCodeFrame {
message,
notebook_index: self.notebook_index
}
)?;
}
Ok(())
}
}
/// Adapter that adds a ' ' at the start of every line without the need to copy the string.
/// Inspired by Rust's `debug_struct()` internal implementation that also uses a `PadAdapter`.
struct PadAdapter<'buf> {
buf: &'buf mut (dyn std::fmt::Write + 'buf),
on_newline: bool,
}
impl<'buf> PadAdapter<'buf> {
fn new(buf: &'buf mut (dyn std::fmt::Write + 'buf)) -> Self {
Self {
buf,
on_newline: true,
}
}
}
impl std::fmt::Write for PadAdapter<'_> {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
for s in s.split_inclusive('\n') {
if self.on_newline {
self.buf.write_str(" ")?;
}
self.on_newline = s.ends_with('\n');
self.buf.write_str(s)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::GroupedEmitter;
#[test]
fn default() {
let mut emitter = GroupedEmitter::default();
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
#[test]
fn show_source() {
let mut emitter = GroupedEmitter::default().with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
#[test]
fn fix_status() {
let mut emitter = GroupedEmitter::default()
.with_show_fix_status(true)
.with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

View file

@ -0,0 +1,117 @@
use std::io::Write;
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
use serde_json::{json, Value};
use ruff_diagnostics::Edit;
use ruff_source_file::SourceCode;
use ruff_text_size::Ranged;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
#[derive(Default)]
pub struct JsonEmitter;
impl Emitter for JsonEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
_context: &EmitterContext,
) -> anyhow::Result<()> {
serde_json::to_writer_pretty(writer, &ExpandedMessages { messages })?;
Ok(())
}
}
struct ExpandedMessages<'a> {
messages: &'a [Message],
}
impl Serialize for ExpandedMessages<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_seq(Some(self.messages.len()))?;
for message in self.messages {
let value = message_to_json_value(message);
s.serialize_element(&value)?;
}
s.end()
}
}
pub(crate) fn message_to_json_value(message: &Message) -> Value {
let source_code = message.file.to_source_code();
let fix = message.fix.as_ref().map(|fix| {
json!({
"applicability": fix.applicability(),
"message": message.kind.suggestion.as_deref(),
"edits": &ExpandedEdits { edits: fix.edits(), source_code: &source_code },
})
});
let start_location = source_code.source_location(message.start());
let end_location = source_code.source_location(message.end());
let noqa_location = source_code.source_location(message.noqa_offset);
json!({
"code": message.kind.rule().noqa_code().to_string(),
"url": message.kind.rule().url(),
"message": message.kind.body,
"fix": fix,
"location": start_location,
"end_location": end_location,
"filename": message.filename(),
"noqa_row": noqa_location.row
})
}
struct ExpandedEdits<'a> {
edits: &'a [Edit],
source_code: &'a SourceCode<'a, 'a>,
}
impl Serialize for ExpandedEdits<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut s = serializer.serialize_seq(Some(self.edits.len()))?;
for edit in self.edits {
let value = json!({
"content": edit.content().unwrap_or_default(),
"location": self.source_code.source_location(edit.start()),
"end_location": self.source_code.source_location(edit.end())
});
s.serialize_element(&value)?;
}
s.end()
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::JsonEmitter;
#[test]
fn output() {
let mut emitter = JsonEmitter;
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

View file

@ -0,0 +1,39 @@
use std::io::Write;
use crate::message::json::message_to_json_value;
use crate::message::{Emitter, EmitterContext, Message};
#[derive(Default)]
pub struct JsonLinesEmitter;
impl Emitter for JsonLinesEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
_context: &EmitterContext,
) -> anyhow::Result<()> {
let mut w = writer;
for message in messages {
serde_json::to_writer(&mut w, &message_to_json_value(message))?;
w.write_all(b"\n")?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::json_lines::JsonLinesEmitter;
use crate::message::tests::{capture_emitter_output, create_messages};
#[test]
fn output() {
let mut emitter = JsonLinesEmitter;
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

View file

@ -0,0 +1,101 @@
use std::io::Write;
use std::path::Path;
use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
use ruff_source_file::SourceLocation;
use crate::message::{
group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation,
};
use crate::registry::AsRule;
#[derive(Default)]
pub struct JunitEmitter;
impl Emitter for JunitEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()> {
let mut report = Report::new("ruff");
if messages.is_empty() {
let mut test_suite = TestSuite::new("ruff");
test_suite
.extra
.insert("package".to_string(), "org.ruff".to_string());
let mut case = TestCase::new("No errors found", TestCaseStatus::success());
case.set_classname("ruff");
test_suite.add_test_case(case);
report.add_test_suite(test_suite);
} else {
for (filename, messages) in group_messages_by_filename(messages) {
let mut test_suite = TestSuite::new(filename);
test_suite
.extra
.insert("package".to_string(), "org.ruff".to_string());
for message in messages {
let MessageWithLocation {
message,
start_location,
} = message;
let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure);
status.set_message(message.kind.body.clone());
let location = if context.is_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
SourceLocation::default()
} else {
start_location
};
status.set_description(format!(
"line {row}, col {col}, {body}",
row = location.row,
col = location.column,
body = message.kind.body
));
let mut case = TestCase::new(
format!("org.ruff.{}", message.kind.rule().noqa_code()),
status,
);
let file_path = Path::new(filename);
let file_stem = file_path.file_stem().unwrap().to_str().unwrap();
let classname = file_path.parent().unwrap().join(file_stem);
case.set_classname(classname.to_str().unwrap());
case.extra
.insert("line".to_string(), location.row.to_string());
case.extra
.insert("column".to_string(), location.column.to_string());
test_suite.add_test_case(case);
}
report.add_test_suite(test_suite);
}
}
report.serialize(writer)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::JunitEmitter;
#[test]
fn output() {
let mut emitter = JunitEmitter;
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

View file

@ -0,0 +1,235 @@
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::io::Write;
use std::ops::Deref;
use rustc_hash::FxHashMap;
pub use azure::AzureEmitter;
pub use github::GithubEmitter;
pub use gitlab::GitlabEmitter;
pub use grouped::GroupedEmitter;
pub use json::JsonEmitter;
pub use json_lines::JsonLinesEmitter;
pub use junit::JunitEmitter;
pub use pylint::PylintEmitter;
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix};
use ruff_notebook::NotebookIndex;
use ruff_source_file::{SourceFile, SourceLocation};
use ruff_text_size::{Ranged, TextRange, TextSize};
pub use text::TextEmitter;
mod azure;
mod diff;
mod github;
mod gitlab;
mod grouped;
mod json;
mod json_lines;
mod junit;
mod pylint;
mod text;
#[derive(Debug, PartialEq, Eq)]
pub struct Message {
pub kind: DiagnosticKind,
pub range: TextRange,
pub fix: Option<Fix>,
pub file: SourceFile,
pub noqa_offset: TextSize,
}
impl Message {
pub fn from_diagnostic(
diagnostic: Diagnostic,
file: SourceFile,
noqa_offset: TextSize,
) -> Self {
Self {
range: diagnostic.range(),
kind: diagnostic.kind,
fix: diagnostic.fix,
file,
noqa_offset,
}
}
pub fn filename(&self) -> &str {
self.file.name()
}
pub fn compute_start_location(&self) -> SourceLocation {
self.file.to_source_code().source_location(self.start())
}
pub fn compute_end_location(&self) -> SourceLocation {
self.file.to_source_code().source_location(self.end())
}
}
impl Ord for Message {
fn cmp(&self, other: &Self) -> Ordering {
(&self.file, self.start()).cmp(&(&other.file, other.start()))
}
}
impl PartialOrd for Message {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ranged for Message {
fn range(&self) -> TextRange {
self.range
}
}
struct MessageWithLocation<'a> {
message: &'a Message,
start_location: SourceLocation,
}
impl Deref for MessageWithLocation<'_> {
type Target = Message;
fn deref(&self) -> &Self::Target {
self.message
}
}
fn group_messages_by_filename(messages: &[Message]) -> BTreeMap<&str, Vec<MessageWithLocation>> {
let mut grouped_messages = BTreeMap::default();
for message in messages {
grouped_messages
.entry(message.filename())
.or_insert_with(Vec::new)
.push(MessageWithLocation {
message,
start_location: message.compute_start_location(),
});
}
grouped_messages
}
/// Display format for a [`Message`]s.
///
/// The emitter serializes a slice of [`Message`]'s and writes them to a [`Write`].
pub trait Emitter {
/// Serializes the `messages` and writes the output to `writer`.
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()>;
}
/// Context passed to [`Emitter`].
pub struct EmitterContext<'a> {
notebook_indexes: &'a FxHashMap<String, NotebookIndex>,
}
impl<'a> EmitterContext<'a> {
pub fn new(notebook_indexes: &'a FxHashMap<String, NotebookIndex>) -> Self {
Self { notebook_indexes }
}
/// Tests if the file with `name` is a jupyter notebook.
pub fn is_notebook(&self, name: &str) -> bool {
self.notebook_indexes.contains_key(name)
}
pub fn notebook_index(&self, name: &str) -> Option<&NotebookIndex> {
self.notebook_indexes.get(name)
}
}
#[cfg(test)]
mod tests {
use rustc_hash::FxHashMap;
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Edit, Fix};
use ruff_source_file::SourceFileBuilder;
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::message::{Emitter, EmitterContext, Message};
pub(super) fn create_messages() -> Vec<Message> {
let fib = r#"import os
def fibonacci(n):
"""Compute the nth number in the Fibonacci sequence."""
x = 1
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
"#;
let unused_import = Diagnostic::new(
DiagnosticKind {
name: "UnusedImport".to_string(),
body: "`os` imported but unused".to_string(),
suggestion: Some("Remove unused import: `os`".to_string()),
},
TextRange::new(TextSize::from(7), TextSize::from(9)),
)
.with_fix(Fix::suggested(Edit::range_deletion(TextRange::new(
TextSize::from(0),
TextSize::from(10),
))));
let fib_source = SourceFileBuilder::new("fib.py", fib).finish();
let unused_variable = Diagnostic::new(
DiagnosticKind {
name: "UnusedVariable".to_string(),
body: "Local variable `x` is assigned to but never used".to_string(),
suggestion: Some("Remove assignment to unused variable `x`".to_string()),
},
TextRange::new(TextSize::from(94), TextSize::from(95)),
)
.with_fix(Fix::suggested(Edit::deletion(
TextSize::from(94),
TextSize::from(99),
)));
let file_2 = r#"if a == 1: pass"#;
let undefined_name = Diagnostic::new(
DiagnosticKind {
name: "UndefinedName".to_string(),
body: "Undefined name `a`".to_string(),
suggestion: None,
},
TextRange::new(TextSize::from(3), TextSize::from(4)),
);
let file_2_source = SourceFileBuilder::new("undef.py", file_2).finish();
let unused_import_start = unused_import.start();
let unused_variable_start = unused_variable.start();
let undefined_name_start = undefined_name.start();
vec![
Message::from_diagnostic(unused_import, fib_source.clone(), unused_import_start),
Message::from_diagnostic(unused_variable, fib_source, unused_variable_start),
Message::from_diagnostic(undefined_name, file_2_source, undefined_name_start),
]
}
pub(super) fn capture_emitter_output(
emitter: &mut dyn Emitter,
messages: &[Message],
) -> String {
let notebook_indexes = FxHashMap::default();
let context = EmitterContext::new(&notebook_indexes);
let mut output: Vec<u8> = Vec::new();
emitter.emit(&mut output, messages, &context).unwrap();
String::from_utf8(output).expect("Output to be valid UTF-8")
}
}

View file

@ -0,0 +1,57 @@
use std::io::Write;
use ruff_source_file::OneIndexed;
use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
/// Generate violations in Pylint format.
/// See: [Flake8 documentation](https://flake8.pycqa.org/en/latest/internal/formatters.html#pylint-formatter)
#[derive(Default)]
pub struct PylintEmitter;
impl Emitter for PylintEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()> {
for message in messages {
let row = if context.is_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
OneIndexed::from_zero_indexed(0)
} else {
message.compute_start_location().row
};
writeln!(
writer,
"{path}:{row}: [{code}] {body}",
path = relativize_path(message.filename()),
code = message.kind.rule().noqa_code(),
body = message.kind.body,
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::PylintEmitter;
#[test]
fn output() {
let mut emitter = PylintEmitter;
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

View file

@ -0,0 +1,8 @@
---
source: crates/ruff_linter/src/message/azure.rs
expression: content
---
##vso[task.logissue type=error;sourcepath=fib.py;linenumber=1;columnnumber=8;code=F401;]`os` imported but unused
##vso[task.logissue type=error;sourcepath=fib.py;linenumber=6;columnnumber=5;code=F841;]Local variable `x` is assigned to but never used
##vso[task.logissue type=error;sourcepath=undef.py;linenumber=1;columnnumber=4;code=F821;]Undefined name `a`

View file

@ -0,0 +1,8 @@
---
source: crates/ruff_linter/src/message/github.rs
expression: content
---
::error title=Ruff (F401),file=fib.py,line=1,col=8,endLine=1,endColumn=10::fib.py:1:8: F401 `os` imported but unused
::error title=Ruff (F841),file=fib.py,line=6,col=5,endLine=6,endColumn=6::fib.py:6:5: F841 Local variable `x` is assigned to but never used
::error title=Ruff (F821),file=undef.py,line=1,col=4,endLine=1,endColumn=5::undef.py:1:4: F821 Undefined name `a`

View file

@ -0,0 +1,42 @@
---
source: crates/ruff_linter/src/message/gitlab.rs
expression: redact_fingerprint(&content)
---
[
{
"description": "(F401) `os` imported but unused",
"fingerprint": "<redacted>",
"location": {
"lines": {
"begin": 1,
"end": 1
},
"path": "fib.py"
},
"severity": "major"
},
{
"description": "(F841) Local variable `x` is assigned to but never used",
"fingerprint": "<redacted>",
"location": {
"lines": {
"begin": 6,
"end": 6
},
"path": "fib.py"
},
"severity": "major"
},
{
"description": "(F821) Undefined name `a`",
"fingerprint": "<redacted>",
"location": {
"lines": {
"begin": 1,
"end": 1
},
"path": "undef.py"
},
"severity": "major"
}
]

View file

@ -0,0 +1,12 @@
---
source: crates/ruff_linter/src/message/grouped.rs
expression: content
---
fib.py:
1:8 F401 `os` imported but unused
6:5 F841 Local variable `x` is assigned to but never used
undef.py:
1:4 F821 Undefined name `a`

View file

@ -0,0 +1,31 @@
---
source: crates/ruff_linter/src/message/grouped.rs
expression: content
---
fib.py:
1:8 F401 [*] `os` imported but unused
|
1 | import os
| ^^ F401
|
= help: Remove unused import: `os`
6:5 F841 [*] Local variable `x` is assigned to but never used
|
4 | def fibonacci(n):
5 | """Compute the nth number in the Fibonacci sequence."""
6 | x = 1
| ^ F841
7 | if n == 0:
8 | return 0
|
= help: Remove assignment to unused variable `x`
undef.py:
1:4 F821 Undefined name `a`
|
1 | if a == 1: pass
| ^ F821
|

View file

@ -0,0 +1,31 @@
---
source: crates/ruff_linter/src/message/grouped.rs
expression: content
---
fib.py:
1:8 F401 `os` imported but unused
|
1 | import os
| ^^ F401
|
= help: Remove unused import: `os`
6:5 F841 Local variable `x` is assigned to but never used
|
4 | def fibonacci(n):
5 | """Compute the nth number in the Fibonacci sequence."""
6 | x = 1
| ^ F841
7 | if n == 0:
8 | return 0
|
= help: Remove assignment to unused variable `x`
undef.py:
1:4 F821 Undefined name `a`
|
1 | if a == 1: pass
| ^ F821
|

View file

@ -0,0 +1,86 @@
---
source: crates/ruff_linter/src/message/json.rs
expression: content
---
[
{
"code": "F401",
"end_location": {
"column": 10,
"row": 1
},
"filename": "fib.py",
"fix": {
"applicability": "Suggested",
"edits": [
{
"content": "",
"end_location": {
"column": 1,
"row": 2
},
"location": {
"column": 1,
"row": 1
}
}
],
"message": "Remove unused import: `os`"
},
"location": {
"column": 8,
"row": 1
},
"message": "`os` imported but unused",
"noqa_row": 1,
"url": "https://docs.astral.sh/ruff/rules/unused-import"
},
{
"code": "F841",
"end_location": {
"column": 6,
"row": 6
},
"filename": "fib.py",
"fix": {
"applicability": "Suggested",
"edits": [
{
"content": "",
"end_location": {
"column": 10,
"row": 6
},
"location": {
"column": 5,
"row": 6
}
}
],
"message": "Remove assignment to unused variable `x`"
},
"location": {
"column": 5,
"row": 6
},
"message": "Local variable `x` is assigned to but never used",
"noqa_row": 6,
"url": "https://docs.astral.sh/ruff/rules/unused-variable"
},
{
"code": "F821",
"end_location": {
"column": 5,
"row": 1
},
"filename": "undef.py",
"fix": null,
"location": {
"column": 4,
"row": 1
},
"message": "Undefined name `a`",
"noqa_row": 1,
"url": "https://docs.astral.sh/ruff/rules/undefined-name"
}
]

View file

@ -0,0 +1,8 @@
---
source: crates/ruff_linter/src/message/json_lines.rs
expression: content
---
{"code":"F401","end_location":{"column":10,"row":1},"filename":"fib.py","fix":{"applicability":"Suggested","edits":[{"content":"","end_location":{"column":1,"row":2},"location":{"column":1,"row":1}}],"message":"Remove unused import: `os`"},"location":{"column":8,"row":1},"message":"`os` imported but unused","noqa_row":1,"url":"https://docs.astral.sh/ruff/rules/unused-import"}
{"code":"F841","end_location":{"column":6,"row":6},"filename":"fib.py","fix":{"applicability":"Suggested","edits":[{"content":"","end_location":{"column":10,"row":6},"location":{"column":5,"row":6}}],"message":"Remove assignment to unused variable `x`"},"location":{"column":5,"row":6},"message":"Local variable `x` is assigned to but never used","noqa_row":6,"url":"https://docs.astral.sh/ruff/rules/unused-variable"}
{"code":"F821","end_location":{"column":5,"row":1},"filename":"undef.py","fix":null,"location":{"column":4,"row":1},"message":"Undefined name `a`","noqa_row":1,"url":"https://docs.astral.sh/ruff/rules/undefined-name"}

View file

@ -0,0 +1,21 @@
---
source: crates/ruff_linter/src/message/junit.rs
expression: content
---
<?xml version="1.0" encoding="UTF-8"?>
<testsuites name="ruff" tests="3" failures="3" errors="0">
<testsuite name="fib.py" tests="2" disabled="0" errors="0" failures="2" package="org.ruff">
<testcase name="org.ruff.F401" classname="fib" line="1" column="8">
<failure message="`os` imported but unused">line 1, col 8, `os` imported but unused</failure>
</testcase>
<testcase name="org.ruff.F841" classname="fib" line="6" column="5">
<failure message="Local variable `x` is assigned to but never used">line 6, col 5, Local variable `x` is assigned to but never used</failure>
</testcase>
</testsuite>
<testsuite name="undef.py" tests="1" disabled="0" errors="0" failures="1" package="org.ruff">
<testcase name="org.ruff.F821" classname="undef" line="1" column="4">
<failure message="Undefined name `a`">line 1, col 4, Undefined name `a`</failure>
</testcase>
</testsuite>
</testsuites>

View file

@ -0,0 +1,8 @@
---
source: crates/ruff_linter/src/message/pylint.rs
expression: content
---
fib.py:1: [F401] `os` imported but unused
fib.py:6: [F841] Local variable `x` is assigned to but never used
undef.py:1: [F821] Undefined name `a`

View file

@ -0,0 +1,29 @@
---
source: crates/ruff_linter/src/message/text.rs
expression: content
---
fib.py:1:8: F401 `os` imported but unused
|
1 | import os
| ^^ F401
|
= help: Remove unused import: `os`
fib.py:6:5: F841 Local variable `x` is assigned to but never used
|
4 | def fibonacci(n):
5 | """Compute the nth number in the Fibonacci sequence."""
6 | x = 1
| ^ F841
7 | if n == 0:
8 | return 0
|
= help: Remove assignment to unused variable `x`
undef.py:1:4: F821 Undefined name `a`
|
1 | if a == 1: pass
| ^ F821
|

View file

@ -0,0 +1,29 @@
---
source: crates/ruff_linter/src/message/text.rs
expression: content
---
fib.py:1:8: F401 [*] `os` imported but unused
|
1 | import os
| ^^ F401
|
= help: Remove unused import: `os`
fib.py:6:5: F841 [*] Local variable `x` is assigned to but never used
|
4 | def fibonacci(n):
5 | """Compute the nth number in the Fibonacci sequence."""
6 | x = 1
| ^ F841
7 | if n == 0:
8 | return 0
|
= help: Remove assignment to unused variable `x`
undef.py:1:4: F821 Undefined name `a`
|
1 | if a == 1: pass
| ^ F821
|

View file

@ -0,0 +1,362 @@
use std::borrow::Cow;
use std::fmt::{Display, Formatter};
use std::io::Write;
use annotate_snippets::display_list::{DisplayList, FormatOptions};
use annotate_snippets::snippet::{Annotation, AnnotationType, Slice, Snippet, SourceAnnotation};
use bitflags::bitflags;
use colored::Colorize;
use ruff_notebook::NotebookIndex;
use ruff_source_file::{OneIndexed, SourceLocation};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::fs::relativize_path;
use crate::line_width::{LineWidthBuilder, TabSize};
use crate::message::diff::Diff;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
bitflags! {
#[derive(Default)]
struct EmitterFlags: u8 {
/// Whether to show the fix status of a diagnostic.
const SHOW_FIX_STATUS = 0b0000_0001;
/// Whether to show the diff of a fix, for diagnostics that have a fix.
const SHOW_FIX_DIFF = 0b0000_0010;
/// Whether to show the source code of a diagnostic.
const SHOW_SOURCE = 0b0000_0100;
}
}
#[derive(Default)]
pub struct TextEmitter {
flags: EmitterFlags,
}
impl TextEmitter {
#[must_use]
pub fn with_show_fix_status(mut self, show_fix_status: bool) -> Self {
self.flags
.set(EmitterFlags::SHOW_FIX_STATUS, show_fix_status);
self
}
#[must_use]
pub fn with_show_fix_diff(mut self, show_fix_diff: bool) -> Self {
self.flags.set(EmitterFlags::SHOW_FIX_DIFF, show_fix_diff);
self
}
#[must_use]
pub fn with_show_source(mut self, show_source: bool) -> Self {
self.flags.set(EmitterFlags::SHOW_SOURCE, show_source);
self
}
}
impl Emitter for TextEmitter {
fn emit(
&mut self,
writer: &mut dyn Write,
messages: &[Message],
context: &EmitterContext,
) -> anyhow::Result<()> {
for message in messages {
write!(
writer,
"{path}{sep}",
path = relativize_path(message.filename()).bold(),
sep = ":".cyan(),
)?;
let start_location = message.compute_start_location();
let notebook_index = context.notebook_index(message.filename());
// Check if we're working on a jupyter notebook and translate positions with cell accordingly
let diagnostic_location = if let Some(notebook_index) = notebook_index {
write!(
writer,
"cell {cell}{sep}",
cell = notebook_index
.cell(start_location.row.get())
.unwrap_or_default(),
sep = ":".cyan(),
)?;
SourceLocation {
row: OneIndexed::new(
notebook_index
.cell_row(start_location.row.get())
.unwrap_or(1) as usize,
)
.unwrap(),
column: start_location.column,
}
} else {
start_location
};
writeln!(
writer,
"{row}{sep}{col}{sep} {code_and_body}",
row = diagnostic_location.row,
col = diagnostic_location.column,
sep = ":".cyan(),
code_and_body = RuleCodeAndBody {
message,
show_fix_status: self.flags.intersects(EmitterFlags::SHOW_FIX_STATUS)
}
)?;
if self.flags.intersects(EmitterFlags::SHOW_SOURCE) {
writeln!(
writer,
"{}",
MessageCodeFrame {
message,
notebook_index
}
)?;
}
if self.flags.intersects(EmitterFlags::SHOW_FIX_DIFF) {
if let Some(diff) = Diff::from_message(message) {
writeln!(writer, "{diff}")?;
}
}
}
Ok(())
}
}
pub(super) struct RuleCodeAndBody<'a> {
pub(crate) message: &'a Message,
pub(crate) show_fix_status: bool,
}
impl Display for RuleCodeAndBody<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let kind = &self.message.kind;
if self.show_fix_status && self.message.fix.is_some() {
write!(
f,
"{code} {autofix}{body}",
code = kind.rule().noqa_code().to_string().red().bold(),
autofix = format_args!("[{}] ", "*".cyan()),
body = kind.body,
)
} else {
write!(
f,
"{code} {body}",
code = kind.rule().noqa_code().to_string().red().bold(),
body = kind.body,
)
}
}
}
pub(super) struct MessageCodeFrame<'a> {
pub(crate) message: &'a Message,
pub(crate) notebook_index: Option<&'a NotebookIndex>,
}
impl Display for MessageCodeFrame<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let Message {
kind, file, range, ..
} = self.message;
let suggestion = kind.suggestion.as_deref();
let footer = if suggestion.is_some() {
vec![Annotation {
id: None,
label: suggestion,
annotation_type: AnnotationType::Help,
}]
} else {
Vec::new()
};
let source_code = file.to_source_code();
let content_start_index = source_code.line_index(range.start());
let mut start_index = content_start_index.saturating_sub(2);
// If we're working with a Jupyter Notebook, skip the lines which are
// outside of the cell containing the diagnostic.
if let Some(index) = self.notebook_index {
let content_start_cell = index.cell(content_start_index.get()).unwrap_or_default();
while start_index < content_start_index {
if index.cell(start_index.get()).unwrap_or_default() == content_start_cell {
break;
}
start_index = start_index.saturating_add(1);
}
}
// Trim leading empty lines.
while start_index < content_start_index {
if !source_code.line_text(start_index).trim().is_empty() {
break;
}
start_index = start_index.saturating_add(1);
}
let content_end_index = source_code.line_index(range.end());
let mut end_index = content_end_index
.saturating_add(2)
.min(OneIndexed::from_zero_indexed(source_code.line_count()));
// If we're working with a Jupyter Notebook, skip the lines which are
// outside of the cell containing the diagnostic.
if let Some(index) = self.notebook_index {
let content_end_cell = index.cell(content_end_index.get()).unwrap_or_default();
while end_index > content_end_index {
if index.cell(end_index.get()).unwrap_or_default() == content_end_cell {
break;
}
end_index = end_index.saturating_sub(1);
}
}
// Trim trailing empty lines.
while end_index > content_end_index {
if !source_code.line_text(end_index).trim().is_empty() {
break;
}
end_index = end_index.saturating_sub(1);
}
let start_offset = source_code.line_start(start_index);
let end_offset = source_code.line_end(end_index);
let source = replace_whitespace(
source_code.slice(TextRange::new(start_offset, end_offset)),
range - start_offset,
);
let start_char = source.text[TextRange::up_to(source.annotation_range.start())]
.chars()
.count();
let char_length = source.text[source.annotation_range].chars().count();
let label = kind.rule().noqa_code().to_string();
let snippet = Snippet {
title: None,
slices: vec![Slice {
source: &source.text,
line_start: self.notebook_index.map_or_else(
|| start_index.get(),
|notebook_index| {
notebook_index
.cell_row(start_index.get())
.unwrap_or_default() as usize
},
),
annotations: vec![SourceAnnotation {
label: &label,
annotation_type: AnnotationType::Error,
range: (start_char, start_char + char_length),
}],
// The origin (file name, line number, and column number) is already encoded
// in the `label`.
origin: None,
fold: false,
}],
footer,
opt: FormatOptions {
#[cfg(test)]
color: false,
#[cfg(not(test))]
color: colored::control::SHOULD_COLORIZE.should_colorize(),
..FormatOptions::default()
},
};
writeln!(f, "{message}", message = DisplayList::from(snippet))
}
}
fn replace_whitespace(source: &str, annotation_range: TextRange) -> SourceCode {
let mut result = String::new();
let mut last_end = 0;
let mut range = annotation_range;
let mut line_width = LineWidthBuilder::new(TabSize::default());
for (index, c) in source.char_indices() {
let old_width = line_width.get();
line_width = line_width.add_char(c);
if matches!(c, '\t') {
// SAFETY: The difference is a value in the range [1..TAB_SIZE] which is guaranteed to be less than `u32`.
#[allow(clippy::cast_possible_truncation)]
let tab_width = (line_width.get() - old_width) as u32;
if index < usize::from(annotation_range.start()) {
range += TextSize::new(tab_width - 1);
} else if index < usize::from(annotation_range.end()) {
range = range.add_end(TextSize::new(tab_width - 1));
}
result.push_str(&source[last_end..index]);
for _ in 0..tab_width {
result.push(' ');
}
last_end = index + 1;
}
}
// No tabs
if result.is_empty() {
SourceCode {
annotation_range,
text: Cow::Borrowed(source),
}
} else {
result.push_str(&source[last_end..]);
SourceCode {
annotation_range: range,
text: Cow::Owned(result),
}
}
}
struct SourceCode<'a> {
text: Cow<'a, str>,
annotation_range: TextRange,
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use crate::message::tests::{capture_emitter_output, create_messages};
use crate::message::TextEmitter;
#[test]
fn default() {
let mut emitter = TextEmitter::default().with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
#[test]
fn fix_status() {
let mut emitter = TextEmitter::default()
.with_show_fix_status(true)
.with_show_source(true);
let content = capture_emitter_output(&mut emitter, &create_messages());
assert_snapshot!(content);
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,87 @@
//! Detect Python package roots and file associations.
use std::path::{Path, PathBuf};
// If we have a Python package layout like:
// - root/
// - foo/
// - __init__.py
// - bar.py
// - baz/
// - __init__.py
// - qux.py
//
// Then today, if you run with defaults (`src = ["."]`) from `root`, we'll
// detect that `foo.bar`, `foo.baz`, and `foo.baz.qux` are first-party modules
// (since, if you're in `root`, you can see `foo`).
//
// However, we'd also like it to be the case that, even if you run this command
// from `foo`, we still consider `foo.baz.qux` to be first-party when linting
// `foo/bar.py`. More specifically, for each Python file, we should find the
// root of the current package.
//
// Thus, for each file, we iterate up its ancestors, returning the last
// directory containing an `__init__.py`.
/// Return `true` if the directory at the given `Path` appears to be a Python
/// package.
pub fn is_package(path: &Path, namespace_packages: &[PathBuf]) -> bool {
path.join("__init__.py").is_file()
|| namespace_packages
.iter()
.any(|namespace_package| namespace_package == path)
}
/// Return the package root for the given Python file.
pub fn detect_package_root<'a>(
path: &'a Path,
namespace_packages: &'a [PathBuf],
) -> Option<&'a Path> {
let mut current = None;
for parent in path.ancestors() {
if !is_package(parent, namespace_packages) {
return current;
}
current = Some(parent);
}
current
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use crate::packaging::detect_package_root;
use crate::test::test_resource_path;
#[test]
fn package_detection() {
assert_eq!(
detect_package_root(&test_resource_path("package/src/package"), &[],),
Some(test_resource_path("package/src/package").as_path())
);
assert_eq!(
detect_package_root(&test_resource_path("project/python_modules/core/core"), &[],),
Some(test_resource_path("project/python_modules/core/core").as_path())
);
assert_eq!(
detect_package_root(
&test_resource_path("project/examples/docs/docs/concepts"),
&[],
),
Some(test_resource_path("project/examples/docs/docs").as_path())
);
assert_eq!(
detect_package_root(
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("setup.py")
.as_path(),
&[],
),
None,
);
}
}

View file

@ -0,0 +1,78 @@
use colored::Colorize;
use log::warn;
use pyproject_toml::{BuildSystem, Project};
use ruff_text_size::{TextRange, TextSize};
use serde::{Deserialize, Serialize};
use ruff_diagnostics::Diagnostic;
use ruff_source_file::SourceFile;
use crate::message::Message;
use crate::registry::Rule;
use crate::rules::ruff::rules::InvalidPyprojectToml;
use crate::settings::Settings;
use crate::IOError;
/// Unlike [`pyproject_toml::PyProjectToml`], in our case `build_system` is also optional
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
struct PyProjectToml {
/// Build-related data
build_system: Option<BuildSystem>,
/// Project metadata
project: Option<Project>,
}
pub fn lint_pyproject_toml(source_file: SourceFile, settings: &Settings) -> Vec<Message> {
let Some(err) = toml::from_str::<PyProjectToml>(source_file.source_text()).err() else {
return Vec::default();
};
let mut messages = Vec::new();
let range = match err.span() {
// This is bad but sometimes toml and/or serde just don't give us spans
// TODO(konstin,micha): https://github.com/astral-sh/ruff/issues/4571
None => TextRange::default(),
Some(range) => {
let Ok(end) = TextSize::try_from(range.end) else {
let message = format!(
"{} is larger than 4GB, but ruff assumes all files to be smaller",
source_file.name(),
);
if settings.rules.enabled(Rule::IOError) {
let diagnostic = Diagnostic::new(IOError { message }, TextRange::default());
messages.push(Message::from_diagnostic(
diagnostic,
source_file,
TextSize::default(),
));
} else {
warn!(
"{}{}{} {message}",
"Failed to lint ".bold(),
source_file.name().bold(),
":".bold()
);
}
return messages;
};
TextRange::new(
// start <= end, so if end < 4GB follows start < 4GB
TextSize::try_from(range.start).unwrap(),
end,
)
}
};
if settings.rules.enabled(Rule::InvalidPyprojectToml) {
let toml_err = err.message().to_string();
let diagnostic = Diagnostic::new(InvalidPyprojectToml { message: toml_err }, range);
messages.push(Message::from_diagnostic(
diagnostic,
source_file,
TextSize::default(),
));
}
messages
}

View file

@ -0,0 +1,417 @@
//! Remnant of the registry of all [`Rule`] implementations, now it's reexporting from codes.rs
//! with some helper symbols
use strum_macros::EnumIter;
pub use codes::Rule;
use ruff_macros::RuleNamespace;
pub use rule_set::{RuleSet, RuleSetIterator};
use crate::codes::{self};
mod rule_set;
pub trait AsRule {
fn rule(&self) -> Rule;
}
impl Rule {
pub fn from_code(code: &str) -> Result<Self, FromCodeError> {
let (linter, code) = Linter::parse_code(code).ok_or(FromCodeError::Unknown)?;
linter
.all_rules()
.find(|rule| rule.noqa_code().suffix() == code)
.ok_or(FromCodeError::Unknown)
}
}
#[derive(thiserror::Error, Debug)]
pub enum FromCodeError {
#[error("unknown rule code")]
Unknown,
}
#[derive(EnumIter, Debug, PartialEq, Eq, Clone, Hash, RuleNamespace)]
pub enum Linter {
/// [Pyflakes](https://pypi.org/project/pyflakes/)
#[prefix = "F"]
Pyflakes,
/// [pycodestyle](https://pypi.org/project/pycodestyle/)
#[prefix = "E"]
#[prefix = "W"]
Pycodestyle,
/// [mccabe](https://pypi.org/project/mccabe/)
#[prefix = "C90"]
McCabe,
/// [isort](https://pypi.org/project/isort/)
#[prefix = "I"]
Isort,
/// [pep8-naming](https://pypi.org/project/pep8-naming/)
#[prefix = "N"]
PEP8Naming,
/// [pydocstyle](https://pypi.org/project/pydocstyle/)
#[prefix = "D"]
Pydocstyle,
/// [pyupgrade](https://pypi.org/project/pyupgrade/)
#[prefix = "UP"]
Pyupgrade,
/// [flake8-2020](https://pypi.org/project/flake8-2020/)
#[prefix = "YTT"]
Flake82020,
/// [flake8-annotations](https://pypi.org/project/flake8-annotations/)
#[prefix = "ANN"]
Flake8Annotations,
/// [flake8-async](https://pypi.org/project/flake8-async/)
#[prefix = "ASYNC"]
Flake8Async,
/// [flake8-bandit](https://pypi.org/project/flake8-bandit/)
#[prefix = "S"]
Flake8Bandit,
/// [flake8-blind-except](https://pypi.org/project/flake8-blind-except/)
#[prefix = "BLE"]
Flake8BlindExcept,
/// [flake8-boolean-trap](https://pypi.org/project/flake8-boolean-trap/)
#[prefix = "FBT"]
Flake8BooleanTrap,
/// [flake8-bugbear](https://pypi.org/project/flake8-bugbear/)
#[prefix = "B"]
Flake8Bugbear,
/// [flake8-builtins](https://pypi.org/project/flake8-builtins/)
#[prefix = "A"]
Flake8Builtins,
/// [flake8-commas](https://pypi.org/project/flake8-commas/)
#[prefix = "COM"]
Flake8Commas,
/// [flake8-copyright](https://pypi.org/project/flake8-copyright/)
#[prefix = "CPY"]
Flake8Copyright,
/// [flake8-comprehensions](https://pypi.org/project/flake8-comprehensions/)
#[prefix = "C4"]
Flake8Comprehensions,
/// [flake8-datetimez](https://pypi.org/project/flake8-datetimez/)
#[prefix = "DTZ"]
Flake8Datetimez,
/// [flake8-debugger](https://pypi.org/project/flake8-debugger/)
#[prefix = "T10"]
Flake8Debugger,
/// [flake8-django](https://pypi.org/project/flake8-django/)
#[prefix = "DJ"]
Flake8Django,
/// [flake8-errmsg](https://pypi.org/project/flake8-errmsg/)
#[prefix = "EM"]
Flake8ErrMsg,
/// [flake8-executable](https://pypi.org/project/flake8-executable/)
#[prefix = "EXE"]
Flake8Executable,
/// [flake8-future-annotations](https://pypi.org/project/flake8-future-annotations/)
#[prefix = "FA"]
Flake8FutureAnnotations,
/// [flake8-implicit-str-concat](https://pypi.org/project/flake8-implicit-str-concat/)
#[prefix = "ISC"]
Flake8ImplicitStrConcat,
/// [flake8-import-conventions](https://github.com/joaopalmeiro/flake8-import-conventions)
#[prefix = "ICN"]
Flake8ImportConventions,
/// [flake8-logging-format](https://pypi.org/project/flake8-logging-format/)
#[prefix = "G"]
Flake8LoggingFormat,
/// [flake8-no-pep420](https://pypi.org/project/flake8-no-pep420/)
#[prefix = "INP"]
Flake8NoPep420,
/// [flake8-pie](https://pypi.org/project/flake8-pie/)
#[prefix = "PIE"]
Flake8Pie,
/// [flake8-print](https://pypi.org/project/flake8-print/)
#[prefix = "T20"]
Flake8Print,
/// [flake8-pyi](https://pypi.org/project/flake8-pyi/)
#[prefix = "PYI"]
Flake8Pyi,
/// [flake8-pytest-style](https://pypi.org/project/flake8-pytest-style/)
#[prefix = "PT"]
Flake8PytestStyle,
/// [flake8-quotes](https://pypi.org/project/flake8-quotes/)
#[prefix = "Q"]
Flake8Quotes,
/// [flake8-raise](https://pypi.org/project/flake8-raise/)
#[prefix = "RSE"]
Flake8Raise,
/// [flake8-return](https://pypi.org/project/flake8-return/)
#[prefix = "RET"]
Flake8Return,
/// [flake8-self](https://pypi.org/project/flake8-self/)
#[prefix = "SLF"]
Flake8Self,
/// [flake8-slots](https://pypi.org/project/flake8-slots/)
#[prefix = "SLOT"]
Flake8Slots,
/// [flake8-simplify](https://pypi.org/project/flake8-simplify/)
#[prefix = "SIM"]
Flake8Simplify,
/// [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
#[prefix = "TID"]
Flake8TidyImports,
/// [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
#[prefix = "TCH"]
Flake8TypeChecking,
/// [flake8-gettext](https://pypi.org/project/flake8-gettext/)
#[prefix = "INT"]
Flake8GetText,
/// [flake8-unused-arguments](https://pypi.org/project/flake8-unused-arguments/)
#[prefix = "ARG"]
Flake8UnusedArguments,
/// [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
#[prefix = "PTH"]
Flake8UsePathlib,
/// [flake8-todos](https://github.com/orsinium-labs/flake8-todos/)
#[prefix = "TD"]
Flake8Todos,
/// [flake8-fixme](https://github.com/tommilligan/flake8-fixme)
#[prefix = "FIX"]
Flake8Fixme,
/// [eradicate](https://pypi.org/project/eradicate/)
#[prefix = "ERA"]
Eradicate,
/// [pandas-vet](https://pypi.org/project/pandas-vet/)
#[prefix = "PD"]
PandasVet,
/// [pygrep-hooks](https://github.com/pre-commit/pygrep-hooks)
#[prefix = "PGH"]
PygrepHooks,
/// [Pylint](https://pypi.org/project/pylint/)
#[prefix = "PL"]
Pylint,
/// [tryceratops](https://pypi.org/project/tryceratops/)
#[prefix = "TRY"]
Tryceratops,
/// [flynt](https://pypi.org/project/flynt/)
#[prefix = "FLY"]
Flynt,
/// NumPy-specific rules
#[prefix = "NPY"]
Numpy,
/// [Airflow](https://pypi.org/project/apache-airflow/)
#[prefix = "AIR"]
Airflow,
/// [Perflint](https://pypi.org/project/perflint/)
#[prefix = "PERF"]
Perflint,
/// [refurb](https://pypi.org/project/refurb/)
#[prefix = "FURB"]
Refurb,
/// [flake8-logging](https://pypi.org/project/flake8-logging/)
#[prefix = "LOG"]
Flake8Logging,
/// Ruff-specific rules
#[prefix = "RUF"]
Ruff,
}
pub trait RuleNamespace: Sized {
/// Returns the prefix that every single code that ruff uses to identify
/// rules from this linter starts with. In the case that multiple
/// `#[prefix]`es are configured for the variant in the `Linter` enum
/// definition this is the empty string.
fn common_prefix(&self) -> &'static str;
/// Attempts to parse the given rule code. If the prefix is recognized
/// returns the respective variant along with the code with the common
/// prefix stripped.
fn parse_code(code: &str) -> Option<(Self, &str)>;
fn name(&self) -> &'static str;
fn url(&self) -> Option<&'static str>;
}
#[derive(is_macro::Is, Copy, Clone)]
pub enum LintSource {
Ast,
Io,
PhysicalLines,
LogicalLines,
Tokens,
Imports,
Noqa,
Filesystem,
PyprojectToml,
}
impl Rule {
/// The source for the diagnostic (either the AST, the filesystem, or the
/// physical lines).
pub const fn lint_source(&self) -> LintSource {
match self {
Rule::InvalidPyprojectToml => LintSource::PyprojectToml,
Rule::UnusedNOQA => LintSource::Noqa,
Rule::BidirectionalUnicode
| Rule::BlankLineWithWhitespace
| Rule::DocLineTooLong
| Rule::LineTooLong
| Rule::MissingCopyrightNotice
| Rule::MissingNewlineAtEndOfFile
| Rule::MixedSpacesAndTabs
| Rule::TrailingWhitespace => LintSource::PhysicalLines,
Rule::AmbiguousUnicodeCharacterComment
| Rule::AmbiguousUnicodeCharacterDocstring
| Rule::AmbiguousUnicodeCharacterString
| Rule::AvoidableEscapedQuote
| Rule::BadQuotesDocstring
| Rule::BadQuotesInlineString
| Rule::BadQuotesMultilineString
| Rule::BlanketNOQA
| Rule::BlanketTypeIgnore
| Rule::CommentedOutCode
| Rule::ExtraneousParentheses
| Rule::InvalidCharacterBackspace
| Rule::InvalidCharacterEsc
| Rule::InvalidCharacterNul
| Rule::InvalidCharacterSub
| Rule::InvalidCharacterZeroWidthSpace
| Rule::InvalidEscapeSequence
| Rule::InvalidTodoCapitalization
| Rule::InvalidTodoTag
| Rule::LineContainsFixme
| Rule::LineContainsHack
| Rule::LineContainsTodo
| Rule::LineContainsXxx
| Rule::MissingSpaceAfterTodoColon
| Rule::MissingTodoAuthor
| Rule::MissingTodoColon
| Rule::MissingTodoDescription
| Rule::MissingTodoLink
| Rule::MissingTrailingComma
| Rule::MultiLineImplicitStringConcatenation
| Rule::MultipleStatementsOnOneLineColon
| Rule::MultipleStatementsOnOneLineSemicolon
| Rule::ProhibitedTrailingComma
| Rule::ShebangLeadingWhitespace
| Rule::ShebangMissingExecutableFile
| Rule::ShebangMissingPython
| Rule::ShebangNotExecutable
| Rule::ShebangNotFirstLine
| Rule::SingleLineImplicitStringConcatenation
| Rule::TabIndentation
| Rule::TrailingCommaOnBareTuple
| Rule::TypeCommentInStub
| Rule::UselessSemicolon
| Rule::UTF8EncodingDeclaration => LintSource::Tokens,
Rule::IOError => LintSource::Io,
Rule::UnsortedImports | Rule::MissingRequiredImport => LintSource::Imports,
Rule::ImplicitNamespacePackage | Rule::InvalidModuleName => LintSource::Filesystem,
Rule::IndentationWithInvalidMultiple
| Rule::IndentationWithInvalidMultipleComment
| Rule::MissingWhitespace
| Rule::MissingWhitespaceAfterKeyword
| Rule::MissingWhitespaceAroundArithmeticOperator
| Rule::MissingWhitespaceAroundBitwiseOrShiftOperator
| Rule::MissingWhitespaceAroundModuloOperator
| Rule::MissingWhitespaceAroundOperator
| Rule::MissingWhitespaceAroundParameterEquals
| Rule::MultipleLeadingHashesForBlockComment
| Rule::MultipleSpacesAfterComma
| Rule::MultipleSpacesAfterKeyword
| Rule::MultipleSpacesAfterOperator
| Rule::MultipleSpacesBeforeKeyword
| Rule::MultipleSpacesBeforeOperator
| Rule::NoIndentedBlock
| Rule::NoIndentedBlockComment
| Rule::NoSpaceAfterBlockComment
| Rule::NoSpaceAfterInlineComment
| Rule::OverIndented
| Rule::TabAfterComma
| Rule::TabAfterKeyword
| Rule::TabAfterOperator
| Rule::TabBeforeKeyword
| Rule::TabBeforeOperator
| Rule::TooFewSpacesBeforeInlineComment
| Rule::UnexpectedIndentation
| Rule::UnexpectedIndentationComment
| Rule::UnexpectedSpacesAroundKeywordParameterEquals
| Rule::WhitespaceAfterOpenBracket
| Rule::WhitespaceBeforeCloseBracket
| Rule::WhitespaceBeforeParameters
| Rule::WhitespaceBeforePunctuation => LintSource::LogicalLines,
_ => LintSource::Ast,
}
}
/// Return the URL for the rule documentation, if it exists.
pub fn url(&self) -> Option<String> {
self.explanation()
.is_some()
.then(|| format!("{}/rules/{}", env!("CARGO_PKG_HOMEPAGE"), self.as_ref()))
}
}
/// Pairs of checks that shouldn't be enabled together.
pub const INCOMPATIBLE_CODES: &[(Rule, Rule, &str); 2] = &[
(
Rule::BlankLineBeforeClass,
Rule::OneBlankLineBeforeClass,
"`one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are \
incompatible. Ignoring `one-blank-line-before-class`.",
),
(
Rule::MultiLineSummaryFirstLine,
Rule::MultiLineSummarySecondLine,
"`multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are \
incompatible. Ignoring `multi-line-summary-second-line`.",
),
];
#[cfg(test)]
mod tests {
use std::mem::size_of;
use strum::IntoEnumIterator;
use super::{Linter, Rule, RuleNamespace};
#[test]
fn test_rule_naming_convention() {
// The disallowed rule names are defined in a separate file so that they can also be picked up by add_rule.py.
let patterns: Vec<_> = include_str!("../resources/test/disallowed_rule_names.txt")
.trim()
.split('\n')
.map(|line| {
glob::Pattern::new(line).expect("malformed pattern in disallowed_rule_names.txt")
})
.collect();
for rule in Rule::iter() {
let rule_name = rule.as_ref();
for pattern in &patterns {
assert!(
!pattern.matches(rule_name),
"{rule_name} does not match naming convention, see CONTRIBUTING.md"
);
}
}
}
#[test]
fn check_code_serialization() {
for rule in Rule::iter() {
assert!(
Rule::from_code(&format!("{}", rule.noqa_code())).is_ok(),
"{rule:?} could not be round-trip serialized."
);
}
}
#[test]
fn test_linter_parse_code() {
for rule in Rule::iter() {
let code = format!("{}", rule.noqa_code());
let (linter, rest) =
Linter::parse_code(&code).unwrap_or_else(|| panic!("couldn't parse {code:?}"));
assert_eq!(code, format!("{}{rest}", linter.common_prefix()));
}
}
#[test]
fn rule_size() {
assert_eq!(2, size_of::<Rule>());
}
}

View file

@ -0,0 +1,377 @@
use crate::registry::Rule;
use ruff_macros::CacheKey;
use std::fmt::{Debug, Formatter};
use std::iter::FusedIterator;
const RULESET_SIZE: usize = 11;
/// A set of [`Rule`]s.
///
/// Uses a bitset where a bit of one signals that the Rule with that [u16] is in this set.
#[derive(Clone, Default, CacheKey, PartialEq, Eq)]
pub struct RuleSet([u64; RULESET_SIZE]);
impl RuleSet {
const EMPTY: [u64; RULESET_SIZE] = [0; RULESET_SIZE];
// 64 fits into a u16 without truncation
#[allow(clippy::cast_possible_truncation)]
const SLICE_BITS: u16 = u64::BITS as u16;
/// Returns an empty rule set.
pub const fn empty() -> Self {
Self(Self::EMPTY)
}
pub fn clear(&mut self) {
self.0 = Self::EMPTY;
}
#[inline]
pub const fn from_rule(rule: Rule) -> Self {
let rule = rule as u16;
let index = (rule / Self::SLICE_BITS) as usize;
debug_assert!(
index < Self::EMPTY.len(),
"Rule index out of bounds. Increase the size of the bitset array."
);
// The bit-position of this specific rule in the slice
let shift = rule % Self::SLICE_BITS;
// Set the index for that rule to 1
let mask = 1 << shift;
let mut bits = Self::EMPTY;
bits[index] = mask;
Self(bits)
}
#[inline]
pub const fn from_rules(rules: &[Rule]) -> Self {
let mut set = RuleSet::empty();
let mut i = 0;
// Uses a while because for loops are not allowed in const functions.
while i < rules.len() {
set = set.union(&RuleSet::from_rule(rules[i]));
i += 1;
}
set
}
/// Returns the union of the two rule sets `self` and `other`
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set_1 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
/// let set_2 = RuleSet::from_rules(&[
/// Rule::BadQuotesInlineString,
/// Rule::BooleanPositionalValueInCall,
/// ]);
///
/// let union = set_1.union(&set_2);
///
/// assert!(union.contains(Rule::AmbiguousFunctionName));
/// assert!(union.contains(Rule::AnyType));
/// assert!(union.contains(Rule::BadQuotesInlineString));
/// assert!(union.contains(Rule::BooleanPositionalValueInCall));
/// ```
#[must_use]
pub const fn union(mut self, other: &Self) -> Self {
let mut i = 0;
while i < self.0.len() {
self.0[i] |= other.0[i];
i += 1;
}
self
}
/// Returns `self` without any of the rules contained in `other`.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set_1 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
/// let set_2 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::Debugger]);
///
/// let subtract = set_1.subtract(&set_2);
///
/// assert!(subtract.contains(Rule::AnyType));
/// assert!(!subtract.contains(Rule::AmbiguousFunctionName));
/// ```
#[must_use]
pub const fn subtract(mut self, other: &Self) -> Self {
let mut i = 0;
while i < self.0.len() {
self.0[i] &= !other.0[i];
i += 1;
}
self
}
/// Returns true if `self` and `other` contain at least one common rule.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set_1 = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// assert!(set_1.intersects(&RuleSet::from_rules(&[
/// Rule::AnyType,
/// Rule::BadQuotesInlineString
/// ])));
///
/// assert!(!set_1.intersects(&RuleSet::from_rules(&[
/// Rule::BooleanPositionalValueInCall,
/// Rule::BadQuotesInlineString
/// ])));
/// ```
pub const fn intersects(&self, other: &Self) -> bool {
let mut i = 0;
while i < self.0.len() {
if self.0[i] & other.0[i] != 0 {
return true;
}
i += 1;
}
false
}
/// Returns `true` if this set contains no rules, `false` otherwise.
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// assert!(RuleSet::empty().is_empty());
/// assert!(
/// !RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::BadQuotesInlineString])
/// .is_empty()
/// );
/// ```
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of rules in this set.
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// assert_eq!(RuleSet::empty().len(), 0);
/// assert_eq!(
/// RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::BadQuotesInlineString]).len(),
/// 2
/// );
pub const fn len(&self) -> usize {
let mut len: u32 = 0;
let mut i = 0;
while i < self.0.len() {
len += self.0[i].count_ones();
i += 1;
}
len as usize
}
/// Inserts `rule` into the set.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let mut set = RuleSet::empty();
///
/// assert!(!set.contains(Rule::AnyType));
///
/// set.insert(Rule::AnyType);
///
/// assert!(set.contains(Rule::AnyType));
/// ```
pub fn insert(&mut self, rule: Rule) {
let set = std::mem::take(self);
*self = set.union(&RuleSet::from_rule(rule));
}
/// Removes `rule` from the set.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let mut set = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// set.remove(Rule::AmbiguousFunctionName);
///
/// assert!(set.contains(Rule::AnyType));
/// assert!(!set.contains(Rule::AmbiguousFunctionName));
/// ```
pub fn remove(&mut self, rule: Rule) {
let set = std::mem::take(self);
*self = set.subtract(&RuleSet::from_rule(rule));
}
/// Returns `true` if `rule` is in this set.
///
/// ## Examples
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// assert!(set.contains(Rule::AmbiguousFunctionName));
/// assert!(!set.contains(Rule::BreakOutsideLoop));
/// ```
pub const fn contains(&self, rule: Rule) -> bool {
let rule = rule as u16;
let index = rule as usize / Self::SLICE_BITS as usize;
let shift = rule % Self::SLICE_BITS;
let mask = 1 << shift;
self.0[index] & mask != 0
}
/// Returns an iterator over the rules in this set.
///
/// ## Examples
///
/// ```rust
/// # use ruff_linter::registry::{Rule, RuleSet};
/// let set = RuleSet::from_rules(&[Rule::AmbiguousFunctionName, Rule::AnyType]);
///
/// let iter: Vec<_> = set.iter().collect();
///
/// assert_eq!(iter, vec![Rule::AnyType, Rule::AmbiguousFunctionName]);
/// ```
pub fn iter(&self) -> RuleSetIterator {
RuleSetIterator {
set: self.clone(),
index: 0,
}
}
}
impl Debug for RuleSet {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
impl FromIterator<Rule> for RuleSet {
fn from_iter<T: IntoIterator<Item = Rule>>(iter: T) -> Self {
let mut set = RuleSet::empty();
for rule in iter {
set.insert(rule);
}
set
}
}
impl Extend<Rule> for RuleSet {
fn extend<T: IntoIterator<Item = Rule>>(&mut self, iter: T) {
let set = std::mem::take(self);
*self = set.union(&RuleSet::from_iter(iter));
}
}
impl IntoIterator for RuleSet {
type IntoIter = RuleSetIterator;
type Item = Rule;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl IntoIterator for &RuleSet {
type IntoIter = RuleSetIterator;
type Item = Rule;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct RuleSetIterator {
set: RuleSet,
index: u16,
}
impl Iterator for RuleSetIterator {
type Item = Rule;
fn next(&mut self) -> Option<Self::Item> {
loop {
let slice = self.set.0.get_mut(self.index as usize)?;
// `trailing_zeros` is guaranteed to return a value in [0;64]
#[allow(clippy::cast_possible_truncation)]
let bit = slice.trailing_zeros() as u16;
if bit < RuleSet::SLICE_BITS {
*slice ^= 1 << bit;
let rule_value = self.index * RuleSet::SLICE_BITS + bit;
// SAFETY: RuleSet guarantees that only valid rules are stored in the set.
#[allow(unsafe_code)]
return Some(unsafe { std::mem::transmute(rule_value) });
}
self.index += 1;
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.set.len();
(len, Some(len))
}
}
impl ExactSizeIterator for RuleSetIterator {}
impl FusedIterator for RuleSetIterator {}
#[cfg(test)]
mod tests {
use crate::registry::{Rule, RuleSet};
use strum::IntoEnumIterator;
/// Tests that the set can contain all rules
#[test]
fn test_all_rules() {
for rule in Rule::iter() {
let set = RuleSet::from_rule(rule);
assert!(set.contains(rule));
}
let all_rules_set: RuleSet = Rule::iter().collect();
let all_rules: Vec<_> = all_rules_set.iter().collect();
let expected_rules: Vec<_> = Rule::iter().collect();
assert_eq!(all_rules, expected_rules);
}
#[test]
fn remove_not_existing_rule_from_set() {
let mut set = RuleSet::default();
set.remove(Rule::AmbiguousFunctionName);
assert!(!set.contains(Rule::AmbiguousFunctionName));
assert!(set.is_empty());
assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![]);
}
}

View file

@ -0,0 +1,262 @@
//! Code modification struct to support symbol renaming within a scope.
use anyhow::{anyhow, Result};
use itertools::Itertools;
use ruff_diagnostics::Edit;
use ruff_python_semantic::{Binding, BindingKind, Scope, ScopeId, SemanticModel};
use ruff_text_size::Ranged;
pub(crate) struct Renamer;
impl Renamer {
/// Rename a symbol (from `name` to `target`).
///
/// ## How it works
///
/// The renaming algorithm is as follows:
///
/// 1. Determine the scope in which the rename should occur. This is typically the scope passed
/// in by the caller. However, if a symbol is `nonlocal` or `global`, then the rename needs
/// to occur in the scope in which the symbol is declared. For example, attempting to rename
/// `x` in `foo` below should trigger a rename in the module scope:
///
/// ```python
/// x = 1
///
/// def foo():
/// global x
/// x = 2
/// ```
///
/// 1. Determine whether the symbol is rebound in another scope. This is effectively the inverse
/// of the previous step: when attempting to rename `x` in the module scope, we need to
/// detect that `x` is rebound in the `foo` scope. Determine every scope in which the symbol
/// is rebound, and add it to the set of scopes in which the rename should occur.
///
/// 1. Start with the first scope in the stack. Take the first [`Binding`] in the scope, for the
/// given name. For example, in the following snippet, we'd start by examining the `x = 1`
/// binding:
///
/// ```python
/// if True:
/// x = 1
/// print(x)
/// else:
/// x = 2
/// print(x)
///
/// print(x)
/// ```
///
/// 1. Rename the [`Binding`]. In most cases, this is a simple replacement. For example,
/// renaming `x` to `y` above would require replacing `x = 1` with `y = 1`. After the
/// first replacement in the snippet above, we'd have:
///
/// ```python
/// if True:
/// y = 1
/// print(x)
/// else:
/// x = 2
/// print(x)
///
/// print(x)
/// ```
///
/// Note that, when renaming imports, we need to instead rename (or add) an alias. For
/// example, to rename `pandas` to `pd`, we may need to rewrite `import pandas` to
/// `import pandas as pd`, rather than `import pd`.
///
/// 1. Rename every reference to the [`Binding`]. For example, renaming the references to the
/// `x = 1` binding above would give us:
///
/// ```python
/// if True:
/// y = 1
/// print(y)
/// else:
/// x = 2
/// print(x)
///
/// print(x)
/// ```
///
/// 1. Rename every delayed annotation. (See [`SemanticModel::delayed_annotations`].)
///
/// 1. Repeat the above process for every [`Binding`] in the scope with the given name.
/// After renaming the `x = 2` binding, we'd have:
///
/// ```python
/// if True:
/// y = 1
/// print(y)
/// else:
/// y = 2
/// print(y)
///
/// print(y)
/// ```
///
/// 1. Repeat the above process for every scope in the stack.
pub(crate) fn rename(
name: &str,
target: &str,
scope: &Scope,
semantic: &SemanticModel,
) -> Result<(Edit, Vec<Edit>)> {
let mut edits = vec![];
// Determine whether the symbol is `nonlocal` or `global`. (A symbol can't be both; Python
// raises a `SyntaxError`.) If the symbol is `nonlocal` or `global`, we need to rename it in
// the scope in which it's declared, rather than the current scope. For example, given:
//
// ```python
// x = 1
//
// def foo():
// global x
// ```
//
// When renaming `x` in `foo`, we detect that `x` is a global, and back out to the module
// scope.
let scope_id = scope.get_all(name).find_map(|binding_id| {
let binding = semantic.binding(binding_id);
match binding.kind {
BindingKind::Global => Some(ScopeId::global()),
BindingKind::Nonlocal(symbol_id) => Some(symbol_id),
_ => None,
}
});
let scope = scope_id.map_or(scope, |scope_id| &semantic.scopes[scope_id]);
edits.extend(Renamer::rename_in_scope(name, target, scope, semantic));
// Find any scopes in which the symbol is referenced as `nonlocal` or `global`. For example,
// given:
//
// ```python
// x = 1
//
// def foo():
// global x
//
// def bar():
// global x
// ```
//
// When renaming `x` in `foo`, we detect that `x` is a global, and back out to the module
// scope. But we need to rename `x` in `bar` too.
//
// Note that it's impossible for a symbol to be referenced as both `nonlocal` and `global`
// in the same program. If a symbol is referenced as `global`, then it must be defined in
// the module scope. If a symbol is referenced as `nonlocal`, then it _can't_ be defined in
// the module scope (because `nonlocal` can only be used in a nested scope).
for scope_id in scope
.get_all(name)
.filter_map(|binding_id| semantic.rebinding_scopes(binding_id))
.flatten()
.dedup()
.copied()
{
let scope = &semantic.scopes[scope_id];
edits.extend(Renamer::rename_in_scope(name, target, scope, semantic));
}
// Deduplicate any edits.
edits.sort();
edits.dedup();
let edit = edits
.pop()
.ok_or(anyhow!("Unable to rename any references to `{name}`"))?;
Ok((edit, edits))
}
/// Rename a symbol in a single [`Scope`].
fn rename_in_scope(
name: &str,
target: &str,
scope: &Scope,
semantic: &SemanticModel,
) -> Vec<Edit> {
let mut edits = vec![];
// Iterate over every binding to the name in the scope.
for binding_id in scope.get_all(name) {
let binding = semantic.binding(binding_id);
// Rename the binding.
if let Some(edit) = Renamer::rename_binding(binding, name, target) {
edits.push(edit);
// Rename any delayed annotations.
if let Some(annotations) = semantic.delayed_annotations(binding_id) {
edits.extend(annotations.iter().filter_map(|annotation_id| {
let annotation = semantic.binding(*annotation_id);
Renamer::rename_binding(annotation, name, target)
}));
}
// Rename the references to the binding.
edits.extend(binding.references().map(|reference_id| {
let reference = semantic.reference(reference_id);
Edit::range_replacement(target.to_string(), reference.range())
}));
}
}
// Deduplicate any edits. In some cases, a reference can be both a read _and_ a write. For
// example, `x += 1` is both a read of and a write to `x`.
edits.sort();
edits.dedup();
edits
}
/// Rename a [`Binding`] reference.
fn rename_binding(binding: &Binding, name: &str, target: &str) -> Option<Edit> {
match &binding.kind {
BindingKind::Import(_) | BindingKind::FromImport(_) => {
if binding.is_alias() {
// Ex) Rename `import pandas as alias` to `import pandas as pd`.
Some(Edit::range_replacement(target.to_string(), binding.range()))
} else {
// Ex) Rename `import pandas` to `import pandas as pd`.
Some(Edit::range_replacement(
format!("{name} as {target}"),
binding.range(),
))
}
}
BindingKind::SubmoduleImport(import) => {
// Ex) Rename `import pandas.core` to `import pandas as pd`.
let module_name = import.call_path.first().unwrap();
Some(Edit::range_replacement(
format!("{module_name} as {target}"),
binding.range(),
))
}
// Avoid renaming builtins and other "special" bindings.
BindingKind::FutureImport | BindingKind::Builtin | BindingKind::Export(_) => None,
// By default, replace the binding's name with the target name.
BindingKind::Annotation
| BindingKind::Argument
| BindingKind::TypeParam
| BindingKind::NamedExprAssignment
| BindingKind::UnpackedAssignment
| BindingKind::Assignment
| BindingKind::BoundException
| BindingKind::LoopVar
| BindingKind::Global
| BindingKind::Nonlocal(_)
| BindingKind::ClassDefinition(_)
| BindingKind::FunctionDefinition(_)
| BindingKind::Deletion
| BindingKind::UnboundException(_) => {
Some(Edit::range_replacement(target.to_string(), binding.range()))
}
}
}
}

View file

@ -0,0 +1,102 @@
use std::collections::HashMap;
use once_cell::sync::Lazy;
/// Returns the redirect target for the given code.
pub(crate) fn get_redirect_target(code: &str) -> Option<&'static str> {
REDIRECTS.get(code).copied()
}
/// Returns the code and the redirect target if the given code is a redirect.
/// (The same code is returned to obtain it with a static lifetime).
pub(crate) fn get_redirect(code: &str) -> Option<(&'static str, &'static str)> {
REDIRECTS.get_key_value(code).map(|(k, v)| (*k, *v))
}
static REDIRECTS: Lazy<HashMap<&'static str, &'static str>> = Lazy::new(|| {
HashMap::from_iter([
// The following are here because we don't yet have the many-to-one mapping enabled.
("SIM111", "SIM110"),
// The following are deprecated.
("C9", "C90"),
("T1", "T10"),
("T2", "T20"),
// TODO(charlie): Remove by 2023-02-01.
("R", "RET"),
("R5", "RET5"),
("R50", "RET50"),
("R501", "RET501"),
("R502", "RET502"),
("R503", "RET503"),
("R504", "RET504"),
("R505", "RET505"),
("R506", "RET506"),
("R507", "RET507"),
("R508", "RET508"),
("IC", "ICN"),
("IC0", "ICN0"),
("IC00", "ICN00"),
("IC001", "ICN001"),
("IC002", "ICN001"),
("IC003", "ICN001"),
("IC004", "ICN001"),
// TODO(charlie): Remove by 2023-01-01.
("U", "UP"),
("U0", "UP0"),
("U00", "UP00"),
("U001", "UP001"),
("U003", "UP003"),
("U004", "UP004"),
("U005", "UP005"),
("U006", "UP006"),
("U007", "UP007"),
("U008", "UP008"),
("U009", "UP009"),
("U01", "UP01"),
("U010", "UP010"),
("U011", "UP011"),
("U012", "UP012"),
("U013", "UP013"),
("U014", "UP014"),
("U015", "UP015"),
("U016", "UP016"),
("U017", "UP017"),
("U019", "UP019"),
// TODO(charlie): Remove by 2023-02-01.
("I2", "TID2"),
("I25", "TID25"),
("I252", "TID252"),
("M", "RUF100"),
("M0", "RUF100"),
("M001", "RUF100"),
// TODO(charlie): Remove by 2023-02-01.
("PDV", "PD"),
("PDV0", "PD0"),
("PDV002", "PD002"),
("PDV003", "PD003"),
("PDV004", "PD004"),
("PDV007", "PD007"),
("PDV008", "PD008"),
("PDV009", "PD009"),
("PDV01", "PD01"),
("PDV010", "PD010"),
("PDV011", "PD011"),
("PDV012", "PD012"),
("PDV013", "PD013"),
("PDV015", "PD015"),
("PDV9", "PD9"),
("PDV90", "PD90"),
("PDV901", "PD901"),
// TODO(charlie): Remove by 2023-04-01.
("TYP", "TCH"),
("TYP001", "TCH001"),
// TODO(charlie): Remove by 2023-06-01.
("RUF004", "B026"),
("PIE802", "C419"),
("PLW0130", "B033"),
("T001", "FIX001"),
("T002", "FIX002"),
("T003", "FIX003"),
("T004", "FIX004"),
])
});

View file

@ -0,0 +1,437 @@
use std::str::FromStr;
use serde::de::{self, Visitor};
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
use strum_macros::EnumIter;
use crate::codes::RuleCodePrefix;
use crate::codes::RuleIter;
use crate::registry::{Linter, Rule, RuleNamespace};
use crate::rule_redirects::get_redirect;
use crate::settings::types::PreviewMode;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum RuleSelector {
/// Select all rules (includes rules in preview if enabled)
All,
/// Legacy category to select all rules in the "nursery" which predated preview mode
#[deprecated(note = "The nursery was replaced with 'preview mode' which has no selector")]
Nursery,
/// Legacy category to select both the `mccabe` and `flake8-comprehensions` linters
/// via a single selector.
C,
/// Legacy category to select both the `flake8-debugger` and `flake8-print` linters
/// via a single selector.
T,
/// Select all rules for a given linter.
Linter(Linter),
/// Select all rules for a given linter with a given prefix.
Prefix {
prefix: RuleCodePrefix,
redirected_from: Option<&'static str>,
},
/// Select an individual rule with a given prefix.
Rule {
prefix: RuleCodePrefix,
redirected_from: Option<&'static str>,
},
}
impl From<Linter> for RuleSelector {
fn from(linter: Linter) -> Self {
Self::Linter(linter)
}
}
impl FromStr for RuleSelector {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"ALL" => Ok(Self::All),
#[allow(deprecated)]
"NURSERY" => Ok(Self::Nursery),
"C" => Ok(Self::C),
"T" => Ok(Self::T),
_ => {
let (s, redirected_from) = match get_redirect(s) {
Some((from, target)) => (target, Some(from)),
None => (s, None),
};
let (linter, code) =
Linter::parse_code(s).ok_or_else(|| ParseError::Unknown(s.to_string()))?;
if code.is_empty() {
return Ok(Self::Linter(linter));
}
// Does the selector select a single rule?
let prefix = RuleCodePrefix::parse(&linter, code)
.map_err(|_| ParseError::Unknown(s.to_string()))?;
if is_single_rule_selector(&prefix) {
Ok(Self::Rule {
prefix,
redirected_from,
})
} else {
Ok(Self::Prefix {
prefix,
redirected_from,
})
}
}
}
}
}
/// Returns `true` if the [`RuleCodePrefix`] matches a single rule exactly
/// (e.g., `E225`, as opposed to `E2`).
pub(crate) fn is_single_rule_selector(prefix: &RuleCodePrefix) -> bool {
let mut rules = prefix.rules();
// The selector must match a single rule.
let Some(rule) = rules.next() else {
return false;
};
if rules.next().is_some() {
return false;
}
// The rule must match the selector exactly.
rule.noqa_code().suffix() == prefix.short_code()
}
#[derive(Debug, thiserror::Error)]
pub enum ParseError {
#[error("Unknown rule selector: `{0}`")]
// TODO(martin): tell the user how to discover rule codes via the CLI once such a command is
// implemented (but that should of course be done only in ruff_cli and not here)
Unknown(String),
}
impl RuleSelector {
pub fn prefix_and_code(&self) -> (&'static str, &'static str) {
match self {
RuleSelector::All => ("", "ALL"),
#[allow(deprecated)]
RuleSelector::Nursery => ("", "NURSERY"),
RuleSelector::C => ("", "C"),
RuleSelector::T => ("", "T"),
RuleSelector::Prefix { prefix, .. } | RuleSelector::Rule { prefix, .. } => {
(prefix.linter().common_prefix(), prefix.short_code())
}
RuleSelector::Linter(l) => (l.common_prefix(), ""),
}
}
}
impl Serialize for RuleSelector {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let (prefix, code) = self.prefix_and_code();
serializer.serialize_str(&format!("{prefix}{code}"))
}
}
impl<'de> Deserialize<'de> for RuleSelector {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
// We are not simply doing:
// let s: &str = Deserialize::deserialize(deserializer)?;
// FromStr::from_str(s).map_err(de::Error::custom)
// here because the toml crate apparently doesn't support that
// (as of toml v0.6.0 running `cargo test` failed with the above two lines)
deserializer.deserialize_str(SelectorVisitor)
}
}
struct SelectorVisitor;
impl Visitor<'_> for SelectorVisitor {
type Value = RuleSelector;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str(
"expected a string code identifying a linter or specific rule, or a partial rule code or ALL to refer to all rules",
)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
FromStr::from_str(v).map_err(de::Error::custom)
}
}
impl RuleSelector {
/// Return all matching rules, regardless of whether they're in preview.
pub fn all_rules(&self) -> impl Iterator<Item = Rule> + '_ {
match self {
RuleSelector::All => RuleSelectorIter::All(Rule::iter()),
#[allow(deprecated)]
RuleSelector::Nursery => {
RuleSelectorIter::Nursery(Rule::iter().filter(Rule::is_nursery))
}
RuleSelector::C => RuleSelectorIter::Chain(
Linter::Flake8Comprehensions
.rules()
.chain(Linter::McCabe.rules()),
),
RuleSelector::T => RuleSelectorIter::Chain(
Linter::Flake8Debugger
.rules()
.chain(Linter::Flake8Print.rules()),
),
RuleSelector::Linter(linter) => RuleSelectorIter::Vec(linter.rules()),
RuleSelector::Prefix { prefix, .. } | RuleSelector::Rule { prefix, .. } => {
RuleSelectorIter::Vec(prefix.clone().rules())
}
}
}
/// Returns rules matching the selector, taking into account whether preview mode is enabled.
pub fn rules(&self, preview: PreviewMode) -> impl Iterator<Item = Rule> + '_ {
#[allow(deprecated)]
self.all_rules().filter(move |rule| {
// Always include rules that are not in preview or the nursery
!(rule.is_preview() || rule.is_nursery())
// Backwards compatibility allows selection of nursery rules by exact code or dedicated group
|| ((matches!(self, RuleSelector::Rule { .. }) || matches!(self, RuleSelector::Nursery { .. })) && rule.is_nursery())
// Enabling preview includes all preview or nursery rules
|| preview.is_enabled()
})
}
}
pub enum RuleSelectorIter {
All(RuleIter),
Nursery(std::iter::Filter<RuleIter, fn(&Rule) -> bool>),
Chain(std::iter::Chain<std::vec::IntoIter<Rule>, std::vec::IntoIter<Rule>>),
Vec(std::vec::IntoIter<Rule>),
}
impl Iterator for RuleSelectorIter {
type Item = Rule;
fn next(&mut self) -> Option<Self::Item> {
match self {
RuleSelectorIter::All(iter) => iter.next(),
RuleSelectorIter::Nursery(iter) => iter.next(),
RuleSelectorIter::Chain(iter) => iter.next(),
RuleSelectorIter::Vec(iter) => iter.next(),
}
}
}
#[cfg(feature = "schemars")]
mod schema {
use itertools::Itertools;
use schemars::JsonSchema;
use schemars::_serde_json::Value;
use schemars::schema::{InstanceType, Schema, SchemaObject};
use strum::IntoEnumIterator;
use crate::registry::RuleNamespace;
use crate::rule_selector::{Linter, RuleCodePrefix};
use crate::RuleSelector;
impl JsonSchema for RuleSelector {
fn schema_name() -> String {
"RuleSelector".to_string()
}
fn json_schema(_gen: &mut schemars::gen::SchemaGenerator) -> Schema {
Schema::Object(SchemaObject {
instance_type: Some(InstanceType::String.into()),
enum_values: Some(
[
// Include the non-standard "ALL" and "NURSERY" selectors.
"ALL".to_string(),
"NURSERY".to_string(),
// Include the legacy "C" and "T" selectors.
"C".to_string(),
"T".to_string(),
// Include some common redirect targets for those legacy selectors.
"C9".to_string(),
"T1".to_string(),
"T2".to_string(),
]
.into_iter()
.chain(
RuleCodePrefix::iter()
.map(|p| {
let prefix = p.linter().common_prefix();
let code = p.short_code();
format!("{prefix}{code}")
})
.chain(Linter::iter().filter_map(|l| {
let prefix = l.common_prefix();
(!prefix.is_empty()).then(|| prefix.to_string())
})),
)
// Filter out rule gated behind `#[cfg(feature = "unreachable-code")]`, which is
// off-by-default
.filter(|prefix| prefix != "RUF014")
.sorted()
.map(Value::String)
.collect(),
),
..SchemaObject::default()
})
}
}
}
impl RuleSelector {
pub fn specificity(&self) -> Specificity {
match self {
RuleSelector::All => Specificity::All,
#[allow(deprecated)]
RuleSelector::Nursery => Specificity::All,
RuleSelector::T => Specificity::LinterGroup,
RuleSelector::C => Specificity::LinterGroup,
RuleSelector::Linter(..) => Specificity::Linter,
RuleSelector::Rule { .. } => Specificity::Rule,
RuleSelector::Prefix { prefix, .. } => {
let prefix: &'static str = prefix.short_code();
match prefix.len() {
1 => Specificity::Prefix1Char,
2 => Specificity::Prefix2Chars,
3 => Specificity::Prefix3Chars,
4 => Specificity::Prefix4Chars,
_ => panic!("RuleSelector::specificity doesn't yet support codes with so many characters"),
}
}
}
}
}
#[derive(EnumIter, PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Debug)]
pub enum Specificity {
/// The specificity when selecting all rules (e.g., `--select ALL`).
All,
/// The specificity when selecting a legacy linter group (e.g., `--select C` or `--select T`).
LinterGroup,
/// The specificity when selecting a linter (e.g., `--select PLE` or `--select UP`).
Linter,
/// The specificity when selecting via a rule prefix with a one-character code (e.g., `--select PLE1`).
Prefix1Char,
/// The specificity when selecting via a rule prefix with a two-character code (e.g., `--select PLE12`).
Prefix2Chars,
/// The specificity when selecting via a rule prefix with a three-character code (e.g., `--select PLE123`).
Prefix3Chars,
/// The specificity when selecting via a rule prefix with a four-character code (e.g., `--select PLE1234`).
Prefix4Chars,
/// The specificity when selecting an individual rule (e.g., `--select PLE1205`).
Rule,
}
#[cfg(feature = "clap")]
pub mod clap_completion {
use clap::builder::{PossibleValue, TypedValueParser, ValueParserFactory};
use strum::IntoEnumIterator;
use crate::{
codes::RuleCodePrefix,
registry::{Linter, RuleNamespace},
rule_selector::is_single_rule_selector,
RuleSelector,
};
#[derive(Clone)]
pub struct RuleSelectorParser;
impl ValueParserFactory for RuleSelector {
type Parser = RuleSelectorParser;
fn value_parser() -> Self::Parser {
RuleSelectorParser
}
}
impl TypedValueParser for RuleSelectorParser {
type Value = RuleSelector;
fn parse_ref(
&self,
cmd: &clap::Command,
arg: Option<&clap::Arg>,
value: &std::ffi::OsStr,
) -> Result<Self::Value, clap::Error> {
let value = value
.to_str()
.ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
value.parse().map_err(|_| {
let mut error =
clap::Error::new(clap::error::ErrorKind::ValueValidation).with_cmd(cmd);
if let Some(arg) = arg {
error.insert(
clap::error::ContextKind::InvalidArg,
clap::error::ContextValue::String(arg.to_string()),
);
}
error.insert(
clap::error::ContextKind::InvalidValue,
clap::error::ContextValue::String(value.to_string()),
);
error
})
}
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
Some(Box::new(
std::iter::once(PossibleValue::new("ALL").help("all rules")).chain(
Linter::iter()
.filter_map(|l| {
let prefix = l.common_prefix();
(!prefix.is_empty()).then(|| PossibleValue::new(prefix).help(l.name()))
})
.chain(
RuleCodePrefix::iter()
// Filter out rule gated behind `#[cfg(feature = "unreachable-code")]`, which is
// off-by-default
.filter(|prefix| {
format!(
"{}{}",
prefix.linter().common_prefix(),
prefix.short_code()
) != "RUF014"
})
.filter_map(|prefix| {
// Ex) `UP`
if prefix.short_code().is_empty() {
let code = prefix.linter().common_prefix();
let name = prefix.linter().name();
return Some(PossibleValue::new(code).help(name));
}
// Ex) `UP004`
if is_single_rule_selector(&prefix) {
let rule = prefix.rules().next()?;
let code = format!(
"{}{}",
prefix.linter().common_prefix(),
prefix.short_code()
);
let name: &'static str = rule.into();
return Some(PossibleValue::new(code).help(name));
}
None
}),
),
),
))
}
}
}

View file

@ -0,0 +1,25 @@
//! Airflow-specific rules.
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_messages, settings};
#[test_case(Rule::AirflowVariableNameTaskIdMismatch, Path::new("AIR001.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("airflow").join(path).as_path(),
&settings::Settings::for_rule(rule_code),
)?;
assert_messages!(snapshot, diagnostics);
Ok(())
}
}

View file

@ -0,0 +1,3 @@
pub(crate) use task_variable_name::*;
mod task_variable_name;

View file

@ -0,0 +1,101 @@
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast as ast;
use ruff_python_ast::Constant;
use ruff_python_ast::Expr;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks that the task variable name matches the `task_id` value for
/// Airflow Operators.
///
/// ## Why is this bad?
/// When initializing an Airflow Operator, for consistency, the variable
/// name should match the `task_id` value. This makes it easier to
/// follow the flow of the DAG.
///
/// ## Example
/// ```python
/// from airflow.operators import PythonOperator
///
///
/// incorrect_name = PythonOperator(task_id="my_task")
/// ```
///
/// Use instead:
/// ```python
/// from airflow.operators import PythonOperator
///
///
/// my_task = PythonOperator(task_id="my_task")
/// ```
#[violation]
pub struct AirflowVariableNameTaskIdMismatch {
task_id: String,
}
impl Violation for AirflowVariableNameTaskIdMismatch {
#[derive_message_formats]
fn message(&self) -> String {
let AirflowVariableNameTaskIdMismatch { task_id } = self;
format!("Task variable name should match the `task_id`: \"{task_id}\"")
}
}
/// AIR001
pub(crate) fn variable_name_task_id(
checker: &mut Checker,
targets: &[Expr],
value: &Expr,
) -> Option<Diagnostic> {
// If we have more than one target, we can't do anything.
let [target] = targets else {
return None;
};
let Expr::Name(ast::ExprName { id, .. }) = target else {
return None;
};
// If the value is not a call, we can't do anything.
let Expr::Call(ast::ExprCall {
func, arguments, ..
}) = value
else {
return None;
};
// If the function doesn't come from Airflow, we can't do anything.
if !checker
.semantic()
.resolve_call_path(func)
.is_some_and(|call_path| matches!(call_path[0], "airflow"))
{
return None;
}
// If the call doesn't have a `task_id` keyword argument, we can't do anything.
let keyword = arguments.find_keyword("task_id")?;
// If the keyword argument is not a string, we can't do anything.
let task_id = match &keyword.value {
Expr::Constant(constant) => match &constant.value {
Constant::Str(ast::StringConstant { value, .. }) => value,
_ => return None,
},
_ => return None,
};
// If the target name is the same as the task_id, no violation.
if id == task_id {
return None;
}
Some(Diagnostic::new(
AirflowVariableNameTaskIdMismatch {
task_id: task_id.to_string(),
},
target.range(),
))
}

View file

@ -0,0 +1,22 @@
---
source: crates/ruff_linter/src/rules/airflow/mod.rs
---
AIR001.py:11:1: AIR001 Task variable name should match the `task_id`: "my_task"
|
9 | my_task_2 = PythonOperator(callable=my_callable, task_id="my_task_2")
10 |
11 | incorrect_name = PythonOperator(task_id="my_task")
| ^^^^^^^^^^^^^^ AIR001
12 | incorrect_name_2 = PythonOperator(callable=my_callable, task_id="my_task_2")
|
AIR001.py:12:1: AIR001 Task variable name should match the `task_id`: "my_task_2"
|
11 | incorrect_name = PythonOperator(task_id="my_task")
12 | incorrect_name_2 = PythonOperator(callable=my_callable, task_id="my_task_2")
| ^^^^^^^^^^^^^^^^ AIR001
13 |
14 | from my_module import MyClass
|

View file

@ -0,0 +1,282 @@
/// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
use once_cell::sync::Lazy;
use regex::Regex;
use ruff_python_parser::parse_suite;
static ALLOWLIST_REGEX: Lazy<Regex> = Lazy::new(|| {
Regex::new(
r"^(?i)(?:pylint|pyright|noqa|nosec|region|endregion|type:\s*ignore|fmt:\s*(on|off)|isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)|mypy:|SPDX-License-Identifier:)"
).unwrap()
});
static BRACKET_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[()\[\]{}\s]+$").unwrap());
static CODE_INDICATORS: &[&str] = &[
"(", ")", "[", "]", "{", "}", ":", "=", "%", "print", "return", "break", "continue", "import",
];
static CODE_KEYWORDS: Lazy<Vec<Regex>> = Lazy::new(|| {
vec![
Regex::new(r"^\s*elif\s+.*\s*:\s*$").unwrap(),
Regex::new(r"^\s*else\s*:\s*$").unwrap(),
Regex::new(r"^\s*try\s*:\s*$").unwrap(),
Regex::new(r"^\s*finally\s*:\s*$").unwrap(),
Regex::new(r"^\s*except\s+.*\s*:\s*$").unwrap(),
]
});
static CODING_COMMENT_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r"^.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)").unwrap());
static HASH_NUMBER: Lazy<Regex> = Lazy::new(|| Regex::new(r"#\d").unwrap());
static MULTILINE_ASSIGNMENT_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r"^\s*([(\[]\s*)?(\w+\s*,\s*)*\w+\s*([)\]]\s*)?=.*[(\[{]$").unwrap());
static PARTIAL_DICTIONARY_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"^\s*['"]\w+['"]\s*:.+[,{]\s*(#.*)?$"#).unwrap());
static PRINT_RETURN_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^(print|return)\b\s*").unwrap());
/// Returns `true` if a comment contains Python code.
pub(crate) fn comment_contains_code(line: &str, task_tags: &[String]) -> bool {
let line = if let Some(line) = line.trim().strip_prefix('#') {
line.trim_start_matches([' ', '#'])
} else {
return false;
};
// Ignore non-comment related hashes (e.g., "# Issue #999").
if HASH_NUMBER.is_match(line) {
return false;
}
// Ignore whitelisted comments.
if ALLOWLIST_REGEX.is_match(line) {
return false;
}
if let Some(first) = line.split(&[' ', ':']).next() {
if task_tags.iter().any(|tag| tag == first) {
return false;
}
}
if CODING_COMMENT_REGEX.is_match(line) {
return false;
}
// Check that this is possibly code.
if CODE_INDICATORS.iter().all(|symbol| !line.contains(symbol)) {
return false;
}
if multiline_case(line) {
return true;
}
if CODE_KEYWORDS.iter().any(|symbol| symbol.is_match(line)) {
return true;
}
let line = PRINT_RETURN_REGEX.replace_all(line, "");
if PARTIAL_DICTIONARY_REGEX.is_match(&line) {
return true;
}
// Finally, compile the source code.
parse_suite(&line, "<filename>").is_ok()
}
/// Returns `true` if a line is probably part of some multiline code.
fn multiline_case(line: &str) -> bool {
if line.ends_with('\\') {
return true;
}
if MULTILINE_ASSIGNMENT_REGEX.is_match(line) {
return true;
}
if BRACKET_REGEX.is_match(line) {
return true;
}
false
}
#[cfg(test)]
mod tests {
use super::comment_contains_code;
#[test]
fn comment_contains_code_basic() {
assert!(comment_contains_code("# x = 1", &[]));
assert!(comment_contains_code("# # x = 1", &[]));
assert!(comment_contains_code("#from foo import eradicate", &[]));
assert!(comment_contains_code("#import eradicate", &[]));
assert!(comment_contains_code(r#"#"key": value,"#, &[]));
assert!(comment_contains_code(r#"#"key": "value","#, &[]));
assert!(comment_contains_code(r#"#"key": 1 + 1,"#, &[]));
assert!(comment_contains_code("#'key': 1 + 1,", &[]));
assert!(comment_contains_code(r#"#"key": {"#, &[]));
assert!(comment_contains_code("#}", &[]));
assert!(comment_contains_code("#} )]", &[]));
assert!(!comment_contains_code("#", &[]));
assert!(!comment_contains_code("# This is a (real) comment.", &[]));
assert!(!comment_contains_code("# # A (nested) comment.", &[]));
assert!(!comment_contains_code("# 123", &[]));
assert!(!comment_contains_code("# 123.1", &[]));
assert!(!comment_contains_code("# 1, 2, 3", &[]));
assert!(!comment_contains_code("x = 1 # x = 1", &[]));
assert!(!comment_contains_code(
"# pylint: disable=redefined-outer-name",
&[]
),);
assert!(!comment_contains_code(
"# Issue #999: This is not code",
&[]
));
assert!(!comment_contains_code("# mypy: allow-untyped-calls", &[]));
assert!(!comment_contains_code(
"# SPDX-License-Identifier: MIT",
&[]
));
// TODO(charlie): This should be `true` under aggressive mode.
assert!(!comment_contains_code("#},", &[]));
}
#[test]
fn comment_contains_code_with_print() {
assert!(comment_contains_code("#print", &[]));
assert!(comment_contains_code("#print(1)", &[]));
assert!(comment_contains_code("#print 1", &[]));
assert!(!comment_contains_code("#to print", &[]));
}
#[test]
fn comment_contains_code_with_return() {
assert!(comment_contains_code("#return x", &[]));
assert!(!comment_contains_code("#to print", &[]));
}
#[test]
fn comment_contains_code_with_multiline() {
assert!(comment_contains_code("#else:", &[]));
assert!(comment_contains_code("# else : ", &[]));
assert!(comment_contains_code(r#"# "foo %d" % \\"#, &[]));
assert!(comment_contains_code("#elif True:", &[]));
assert!(comment_contains_code("#x = foo(", &[]));
assert!(comment_contains_code("#except Exception:", &[]));
assert!(!comment_contains_code("# this is = to that :(", &[]));
assert!(!comment_contains_code("#else", &[]));
assert!(!comment_contains_code("#or else:", &[]));
assert!(!comment_contains_code("#else True:", &[]));
// Unpacking assignments
assert!(comment_contains_code(
"# user_content_type, _ = TimelineEvent.objects.using(db_alias).get_or_create(",
&[]
),);
assert!(comment_contains_code(
"# (user_content_type, _) = TimelineEvent.objects.using(db_alias).get_or_create(",
&[]
),);
assert!(comment_contains_code(
"# ( user_content_type , _ )= TimelineEvent.objects.using(db_alias).get_or_create(",
&[]
));
assert!(comment_contains_code(
"# app_label=\"core\", model=\"user\"",
&[]
));
assert!(comment_contains_code("# )", &[]));
// TODO(charlie): This should be `true` under aggressive mode.
assert!(!comment_contains_code("#def foo():", &[]));
}
#[test]
fn comment_contains_code_with_sentences() {
assert!(!comment_contains_code("#code is good", &[]));
}
#[test]
fn comment_contains_code_with_encoding() {
assert!(comment_contains_code("# codings=utf-8", &[]));
assert!(!comment_contains_code("# coding=utf-8", &[]));
assert!(!comment_contains_code("#coding= utf-8", &[]));
assert!(!comment_contains_code("# coding: utf-8", &[]));
assert!(!comment_contains_code("# encoding: utf8", &[]));
}
#[test]
fn comment_contains_code_with_default_allowlist() {
assert!(!comment_contains_code("# pylint: disable=A0123", &[]));
assert!(!comment_contains_code("# pylint:disable=A0123", &[]));
assert!(!comment_contains_code("# pylint: disable = A0123", &[]));
assert!(!comment_contains_code("# pylint:disable = A0123", &[]));
assert!(!comment_contains_code(
"# pyright: reportErrorName=true",
&[]
));
assert!(!comment_contains_code("# noqa", &[]));
assert!(!comment_contains_code("# NOQA", &[]));
assert!(!comment_contains_code("# noqa: A123", &[]));
assert!(!comment_contains_code("# noqa:A123", &[]));
assert!(!comment_contains_code("# nosec", &[]));
assert!(!comment_contains_code("# region", &[]));
assert!(!comment_contains_code("# endregion", &[]));
assert!(!comment_contains_code("# region.name", &[]));
assert!(!comment_contains_code("# region name", &[]));
assert!(!comment_contains_code("# region: name", &[]));
assert!(!comment_contains_code("# fmt: on", &[]));
assert!(!comment_contains_code("# fmt: off", &[]));
assert!(!comment_contains_code("# fmt:on", &[]));
assert!(!comment_contains_code("# fmt:off", &[]));
assert!(!comment_contains_code("# isort: on", &[]));
assert!(!comment_contains_code("# isort:on", &[]));
assert!(!comment_contains_code("# isort: off", &[]));
assert!(!comment_contains_code("# isort:off", &[]));
assert!(!comment_contains_code("# isort: skip", &[]));
assert!(!comment_contains_code("# isort:skip", &[]));
assert!(!comment_contains_code("# isort: skip_file", &[]));
assert!(!comment_contains_code("# isort:skip_file", &[]));
assert!(!comment_contains_code("# isort: split", &[]));
assert!(!comment_contains_code("# isort:split", &[]));
assert!(!comment_contains_code("# isort: dont-add-imports", &[]));
assert!(!comment_contains_code("# isort:dont-add-imports", &[]));
assert!(!comment_contains_code(
"# isort: dont-add-imports: [\"import os\"]",
&[]
));
assert!(!comment_contains_code(
"# isort:dont-add-imports: [\"import os\"]",
&[]
));
assert!(!comment_contains_code(
"# isort: dont-add-imports:[\"import os\"]",
&[]
));
assert!(!comment_contains_code(
"# isort:dont-add-imports:[\"import os\"]",
&[]
));
assert!(!comment_contains_code("# type: ignore", &[]));
assert!(!comment_contains_code("# type:ignore", &[]));
assert!(!comment_contains_code("# type: ignore[import]", &[]));
assert!(!comment_contains_code("# type:ignore[import]", &[]));
assert!(!comment_contains_code(
"# TODO: Do that",
&["TODO".to_string()]
));
assert!(!comment_contains_code(
"# FIXME: Fix that",
&["FIXME".to_string()]
));
assert!(!comment_contains_code(
"# XXX: What ever",
&["XXX".to_string()]
));
}
}

View file

@ -0,0 +1,26 @@
//! Rules from [eradicate](https://pypi.org/project/eradicate/).
pub(crate) mod detection;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_messages, settings};
#[test_case(Rule::CommentedOutCode, Path::new("ERA001.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("eradicate").join(path).as_path(),
&settings::Settings::for_rule(rule_code),
)?;
assert_messages!(snapshot, diagnostics);
Ok(())
}
}

View file

@ -0,0 +1,72 @@
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_index::Indexer;
use ruff_source_file::Locator;
use crate::registry::Rule;
use crate::settings::Settings;
use super::super::detection::comment_contains_code;
/// ## What it does
/// Checks for commented-out Python code.
///
/// ## Why is this bad?
/// Commented-out code is dead code, and is often included inadvertently.
/// It should be removed.
///
/// ## Example
/// ```python
/// # print('foo')
/// ```
///
/// ## Options
/// - `task-tags`
#[violation]
pub struct CommentedOutCode;
impl AlwaysAutofixableViolation for CommentedOutCode {
#[derive_message_formats]
fn message(&self) -> String {
format!("Found commented-out code")
}
fn autofix_title(&self) -> String {
"Remove commented-out code".to_string()
}
}
fn is_standalone_comment(line: &str) -> bool {
for char in line.chars() {
if char == '#' {
return true;
} else if !char.is_whitespace() {
return false;
}
}
unreachable!("Comment should contain '#' character")
}
/// ERA001
pub(crate) fn commented_out_code(
diagnostics: &mut Vec<Diagnostic>,
locator: &Locator,
indexer: &Indexer,
settings: &Settings,
) {
for range in indexer.comment_ranges() {
let line = locator.full_lines(*range);
// Verify that the comment is on its own line, and that it contains code.
if is_standalone_comment(line) && comment_contains_code(line, &settings.task_tags[..]) {
let mut diagnostic = Diagnostic::new(CommentedOutCode, *range);
if settings.rules.should_fix(Rule::CommentedOutCode) {
diagnostic.set_fix(Fix::manual(Edit::range_deletion(
locator.full_lines_range(*range),
)));
}
diagnostics.push(diagnostic);
}
}
}

View file

@ -0,0 +1,3 @@
pub(crate) use commented_out_code::*;
mod commented_out_code;

View file

@ -0,0 +1,151 @@
---
source: crates/ruff_linter/src/rules/eradicate/mod.rs
---
ERA001.py:1:1: ERA001 [*] Found commented-out code
|
1 | #import os
| ^^^^^^^^^^ ERA001
2 | # from foo import junk
3 | #a = 3
|
= help: Remove commented-out code
Possible fix
1 |-#import os
2 1 | # from foo import junk
3 2 | #a = 3
4 3 | a = 4
ERA001.py:2:1: ERA001 [*] Found commented-out code
|
1 | #import os
2 | # from foo import junk
| ^^^^^^^^^^^^^^^^^^^^^^ ERA001
3 | #a = 3
4 | a = 4
|
= help: Remove commented-out code
Possible fix
1 1 | #import os
2 |-# from foo import junk
3 2 | #a = 3
4 3 | a = 4
5 4 | #foo(1, 2, 3)
ERA001.py:3:1: ERA001 [*] Found commented-out code
|
1 | #import os
2 | # from foo import junk
3 | #a = 3
| ^^^^^^ ERA001
4 | a = 4
5 | #foo(1, 2, 3)
|
= help: Remove commented-out code
Possible fix
1 1 | #import os
2 2 | # from foo import junk
3 |-#a = 3
4 3 | a = 4
5 4 | #foo(1, 2, 3)
6 5 |
ERA001.py:5:1: ERA001 [*] Found commented-out code
|
3 | #a = 3
4 | a = 4
5 | #foo(1, 2, 3)
| ^^^^^^^^^^^^^ ERA001
6 |
7 | def foo(x, y, z):
|
= help: Remove commented-out code
Possible fix
2 2 | # from foo import junk
3 3 | #a = 3
4 4 | a = 4
5 |-#foo(1, 2, 3)
6 5 |
7 6 | def foo(x, y, z):
8 7 | content = 1 # print('hello')
ERA001.py:13:5: ERA001 [*] Found commented-out code
|
11 | # This is a real comment.
12 | # # This is a (nested) comment.
13 | #return True
| ^^^^^^^^^^^^ ERA001
14 | return False
|
= help: Remove commented-out code
Possible fix
10 10 |
11 11 | # This is a real comment.
12 12 | # # This is a (nested) comment.
13 |- #return True
14 13 | return False
15 14 |
16 15 | #import os # noqa: ERA001
ERA001.py:21:5: ERA001 [*] Found commented-out code
|
19 | class A():
20 | pass
21 | # b = c
| ^^^^^^^ ERA001
|
= help: Remove commented-out code
Possible fix
18 18 |
19 19 | class A():
20 20 | pass
21 |- # b = c
22 21 |
23 22 |
24 23 | dictionary = {
ERA001.py:26:5: ERA001 [*] Found commented-out code
|
24 | dictionary = {
25 | # "key1": 123, # noqa: ERA001
26 | # "key2": 456,
| ^^^^^^^^^^^^^^ ERA001
27 | # "key3": 789, # test
28 | }
|
= help: Remove commented-out code
Possible fix
23 23 |
24 24 | dictionary = {
25 25 | # "key1": 123, # noqa: ERA001
26 |- # "key2": 456,
27 26 | # "key3": 789, # test
28 27 | }
29 28 |
ERA001.py:27:5: ERA001 [*] Found commented-out code
|
25 | # "key1": 123, # noqa: ERA001
26 | # "key2": 456,
27 | # "key3": 789, # test
| ^^^^^^^^^^^^^^^^^^^^^^ ERA001
28 | }
|
= help: Remove commented-out code
Possible fix
24 24 | dictionary = {
25 25 | # "key1": 123, # noqa: ERA001
26 26 | # "key2": 456,
27 |- # "key3": 789, # test
28 27 | }
29 28 |
30 29 | #import os # noqa

View file

@ -0,0 +1,9 @@
use ruff_python_ast::Expr;
use ruff_python_semantic::SemanticModel;
pub(super) fn is_sys(expr: &Expr, target: &str, semantic: &SemanticModel) -> bool {
semantic
.resolve_call_path(expr)
.is_some_and(|call_path| call_path.as_slice() == ["sys", target])
}

View file

@ -0,0 +1,35 @@
//! Rules from [flake8-2020](https://pypi.org/project/flake8-2020/).
mod helpers;
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::registry::Rule;
use crate::test::test_path;
use crate::{assert_messages, settings};
#[test_case(Rule::SysVersionSlice3, Path::new("YTT101.py"))]
#[test_case(Rule::SysVersion2, Path::new("YTT102.py"))]
#[test_case(Rule::SysVersionCmpStr3, Path::new("YTT103.py"))]
#[test_case(Rule::SysVersionInfo0Eq3, Path::new("YTT201.py"))]
#[test_case(Rule::SixPY3, Path::new("YTT202.py"))]
#[test_case(Rule::SysVersionInfo1CmpInt, Path::new("YTT203.py"))]
#[test_case(Rule::SysVersionInfoMinorCmpInt, Path::new("YTT204.py"))]
#[test_case(Rule::SysVersion0, Path::new("YTT301.py"))]
#[test_case(Rule::SysVersionCmpStr10, Path::new("YTT302.py"))]
#[test_case(Rule::SysVersionSlice1, Path::new("YTT303.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_2020").join(path).as_path(),
&settings::Settings::for_rule(rule_code),
)?;
assert_messages!(snapshot, diagnostics);
Ok(())
}
}

View file

@ -0,0 +1,318 @@
use num_bigint::BigInt;
use ruff_python_ast::{self as ast, CmpOp, Constant, Expr};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::registry::Rule;
use super::super::helpers::is_sys;
/// ## What it does
/// Checks for comparisons that test `sys.version` against string literals,
/// such that the comparison will evaluate to `False` on Python 3.10 or later.
///
/// ## Why is this bad?
/// Comparing `sys.version` to a string is error-prone and may cause subtle
/// bugs, as the comparison will be performed lexicographically, not
/// semantically. For example, `sys.version > "3.9"` will evaluate to `False`
/// when using Python 3.10, as `"3.10"` is lexicographically "less" than
/// `"3.9"`.
///
/// Instead, use `sys.version_info` to access the current major and minor
/// version numbers as a tuple, which can be compared to other tuples
/// without issue.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version > "3.9" # `False` on Python 3.10.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// sys.version_info > (3, 9) # `True` on Python 3.10.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[violation]
pub struct SysVersionCmpStr3;
impl Violation for SysVersionCmpStr3 {
#[derive_message_formats]
fn message(&self) -> String {
format!("`sys.version` compared to string (python3.10), use `sys.version_info`")
}
}
/// ## What it does
/// Checks for equality comparisons against the major version returned by
/// `sys.version_info` (e.g., `sys.version_info[0] == 3`).
///
/// ## Why is this bad?
/// Using `sys.version_info[0] == 3` to verify that the major version is
/// Python 3 or greater will fail if the major version number is ever
/// incremented (e.g., to Python 4). This is likely unintended, as code
/// that uses this comparison is likely intended to be run on Python 2,
/// but would now run on Python 4 too.
///
/// Instead, use `>=` to check if the major version number is 3 or greater,
/// to future-proof the code.
///
/// ## Example
/// ```python
/// import sys
///
/// if sys.version_info[0] == 3:
/// ...
/// else:
/// print("Python 2") # This will be printed on Python 4.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// if sys.version_info >= (3,):
/// ...
/// else:
/// print("Python 2") # This will not be printed on Python 4.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[violation]
pub struct SysVersionInfo0Eq3;
impl Violation for SysVersionInfo0Eq3 {
#[derive_message_formats]
fn message(&self) -> String {
format!("`sys.version_info[0] == 3` referenced (python4), use `>=`")
}
}
/// ## What it does
/// Checks for comparisons that test `sys.version_info[1]` against an integer.
///
/// ## Why is this bad?
/// Comparisons based on the current minor version number alone can cause
/// subtle bugs and would likely lead to unintended effects if the Python
/// major version number were ever incremented (e.g., to Python 4).
///
/// Instead, compare `sys.version_info` to a tuple, including the major and
/// minor version numbers, to future-proof the code.
///
/// ## Example
/// ```python
/// import sys
///
/// if sys.version_info[1] < 7:
/// print("Python 3.6 or earlier.") # This will be printed on Python 4.0.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// if sys.version_info < (3, 7):
/// print("Python 3.6 or earlier.")
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[violation]
pub struct SysVersionInfo1CmpInt;
impl Violation for SysVersionInfo1CmpInt {
#[derive_message_formats]
fn message(&self) -> String {
format!(
"`sys.version_info[1]` compared to integer (python4), compare `sys.version_info` to \
tuple"
)
}
}
/// ## What it does
/// Checks for comparisons that test `sys.version_info.minor` against an integer.
///
/// ## Why is this bad?
/// Comparisons based on the current minor version number alone can cause
/// subtle bugs and would likely lead to unintended effects if the Python
/// major version number were ever incremented (e.g., to Python 4).
///
/// Instead, compare `sys.version_info` to a tuple, including the major and
/// minor version numbers, to future-proof the code.
///
/// ## Example
/// ```python
/// import sys
///
/// if sys.version_info.minor < 7:
/// print("Python 3.6 or earlier.") # This will be printed on Python 4.0.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// if sys.version_info < (3, 7):
/// print("Python 3.6 or earlier.")
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[violation]
pub struct SysVersionInfoMinorCmpInt;
impl Violation for SysVersionInfoMinorCmpInt {
#[derive_message_formats]
fn message(&self) -> String {
format!(
"`sys.version_info.minor` compared to integer (python4), compare `sys.version_info` \
to tuple"
)
}
}
/// ## What it does
/// Checks for comparisons that test `sys.version` against string literals,
/// such that the comparison would fail if the major version number were
/// ever incremented to Python 10 or higher.
///
/// ## Why is this bad?
/// Comparing `sys.version` to a string is error-prone and may cause subtle
/// bugs, as the comparison will be performed lexicographically, not
/// semantically.
///
/// Instead, use `sys.version_info` to access the current major and minor
/// version numbers as a tuple, which can be compared to other tuples
/// without issue.
///
/// ## Example
/// ```python
/// import sys
///
/// sys.version >= "3" # `False` on Python 10.
/// ```
///
/// Use instead:
/// ```python
/// import sys
///
/// sys.version_info >= (3,) # `True` on Python 10.
/// ```
///
/// ## References
/// - [Python documentation: `sys.version`](https://docs.python.org/3/library/sys.html#sys.version)
/// - [Python documentation: `sys.version_info`](https://docs.python.org/3/library/sys.html#sys.version_info)
#[violation]
pub struct SysVersionCmpStr10;
impl Violation for SysVersionCmpStr10 {
#[derive_message_formats]
fn message(&self) -> String {
format!("`sys.version` compared to string (python10), use `sys.version_info`")
}
}
/// YTT103, YTT201, YTT203, YTT204, YTT302
pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[CmpOp], comparators: &[Expr]) {
match left {
Expr::Subscript(ast::ExprSubscript { value, slice, .. })
if is_sys(value, "version_info", checker.semantic()) =>
{
if let Expr::Constant(ast::ExprConstant {
value: Constant::Int(i),
..
}) = slice.as_ref()
{
if *i == BigInt::from(0) {
if let (
[CmpOp::Eq | CmpOp::NotEq],
[Expr::Constant(ast::ExprConstant {
value: Constant::Int(n),
..
})],
) = (ops, comparators)
{
if *n == BigInt::from(3) && checker.enabled(Rule::SysVersionInfo0Eq3) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionInfo0Eq3, left.range()));
}
}
} else if *i == BigInt::from(1) {
if let (
[CmpOp::Lt | CmpOp::LtE | CmpOp::Gt | CmpOp::GtE],
[Expr::Constant(ast::ExprConstant {
value: Constant::Int(_),
..
})],
) = (ops, comparators)
{
if checker.enabled(Rule::SysVersionInfo1CmpInt) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionInfo1CmpInt, left.range()));
}
}
}
}
}
Expr::Attribute(ast::ExprAttribute { value, attr, .. })
if is_sys(value, "version_info", checker.semantic()) && attr == "minor" =>
{
if let (
[CmpOp::Lt | CmpOp::LtE | CmpOp::Gt | CmpOp::GtE],
[Expr::Constant(ast::ExprConstant {
value: Constant::Int(_),
..
})],
) = (ops, comparators)
{
if checker.enabled(Rule::SysVersionInfoMinorCmpInt) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionInfoMinorCmpInt, left.range()));
}
}
}
_ => {}
}
if is_sys(left, "version", checker.semantic()) {
if let (
[CmpOp::Lt | CmpOp::LtE | CmpOp::Gt | CmpOp::GtE],
[Expr::Constant(ast::ExprConstant {
value: Constant::Str(s),
..
})],
) = (ops, comparators)
{
if s.len() == 1 {
if checker.enabled(Rule::SysVersionCmpStr10) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionCmpStr10, left.range()));
}
} else if checker.enabled(Rule::SysVersionCmpStr3) {
checker
.diagnostics
.push(Diagnostic::new(SysVersionCmpStr3, left.range()));
}
}
}
}

View file

@ -0,0 +1,7 @@
pub(crate) use compare::*;
pub(crate) use name_or_attribute::*;
pub(crate) use subscript::*;
mod compare;
mod name_or_attribute;
mod subscript;

View file

@ -0,0 +1,59 @@
use ruff_python_ast::Expr;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for uses of `six.PY3`.
///
/// ## Why is this bad?
/// `six.PY3` will evaluate to `False` on Python 4 and greater. This is likely
/// unintended, and may cause code intended to run on Python 2 to run on Python 4
/// too.
///
/// Instead, use `not six.PY2` to validate that the current Python major version is
/// _not_ equal to 2, to future-proof the code.
///
/// ## Example
/// ```python
/// import six
///
/// six.PY3 # `False` on Python 4.
/// ```
///
/// Use instead:
/// ```python
/// import six
///
/// not six.PY2 # `True` on Python 4.
/// ```
///
/// ## References
/// - [PyPI: `six`](https://pypi.org/project/six/)
/// - [Six documentation: `six.PY2`](https://six.readthedocs.io/#six.PY2)
/// - [Six documentation: `six.PY3`](https://six.readthedocs.io/#six.PY3)
#[violation]
pub struct SixPY3;
impl Violation for SixPY3 {
#[derive_message_formats]
fn message(&self) -> String {
format!("`six.PY3` referenced (python4), use `not six.PY2`")
}
}
/// YTT202
pub(crate) fn name_or_attribute(checker: &mut Checker, expr: &Expr) {
if checker
.semantic()
.resolve_call_path(expr)
.is_some_and(|call_path| matches!(call_path.as_slice(), ["six", "PY3"]))
{
checker
.diagnostics
.push(Diagnostic::new(SixPY3, expr.range()));
}
}

Some files were not shown because too many files have changed in this diff Show more