mirror of
https://github.com/astral-sh/ruff.git
synced 2025-08-10 05:39:12 +00:00
Remove lexing from flake8-pytest-style (#6795)
## Summary Another drive-by change to remove unnecessary custom lexing. We just need to know the parenthesized range, so we can use... `parenthesized_range`. I've also updated `parenthesized_range` to support nested parentheses. ## Test Plan `cargo test`
This commit is contained in:
parent
417a1d0717
commit
26e63ab137
3 changed files with 68 additions and 65 deletions
|
@ -1,18 +1,16 @@
|
|||
use rustc_hash::FxHashMap;
|
||||
use std::hash::BuildHasherDefault;
|
||||
|
||||
use ruff_python_ast::{
|
||||
self as ast, Arguments, Constant, Decorator, Expr, ExprContext, PySourceType, Ranged,
|
||||
};
|
||||
use ruff_python_parser::{lexer, AsMode, Tok};
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::comparable::ComparableExpr;
|
||||
use ruff_python_ast::node::AstNode;
|
||||
use ruff_python_ast::parenthesize::parenthesized_range;
|
||||
use ruff_python_ast::{self as ast, Arguments, Constant, Decorator, Expr, ExprContext, Ranged};
|
||||
use ruff_python_codegen::Generator;
|
||||
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
|
||||
use ruff_source_file::Locator;
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
use crate::registry::{AsRule, Rule};
|
||||
|
@ -74,7 +72,7 @@ use super::helpers::{is_pytest_parametrize, split_names};
|
|||
/// - [`pytest` documentation: How to parametrize fixtures and test functions](https://docs.pytest.org/en/latest/how-to/parametrize.html#pytest-mark-parametrize)
|
||||
#[violation]
|
||||
pub struct PytestParametrizeNamesWrongType {
|
||||
pub expected: types::ParametrizeNameType,
|
||||
expected: types::ParametrizeNameType,
|
||||
}
|
||||
|
||||
impl Violation for PytestParametrizeNamesWrongType {
|
||||
|
@ -159,8 +157,8 @@ impl Violation for PytestParametrizeNamesWrongType {
|
|||
/// - [`pytest` documentation: How to parametrize fixtures and test functions](https://docs.pytest.org/en/latest/how-to/parametrize.html#pytest-mark-parametrize)
|
||||
#[violation]
|
||||
pub struct PytestParametrizeValuesWrongType {
|
||||
pub values: types::ParametrizeValuesType,
|
||||
pub row: types::ParametrizeValuesRowType,
|
||||
values: types::ParametrizeValuesType,
|
||||
row: types::ParametrizeValuesRowType,
|
||||
}
|
||||
|
||||
impl Violation for PytestParametrizeValuesWrongType {
|
||||
|
@ -283,34 +281,12 @@ fn elts_to_csv(elts: &[Expr], generator: Generator) -> Option<String> {
|
|||
fn get_parametrize_name_range(
|
||||
decorator: &Decorator,
|
||||
expr: &Expr,
|
||||
locator: &Locator,
|
||||
source_type: PySourceType,
|
||||
) -> TextRange {
|
||||
let mut locations = Vec::new();
|
||||
let mut name_range = None;
|
||||
|
||||
// The parenthesis are not part of the AST, so we need to tokenize the
|
||||
// decorator to find them.
|
||||
for (tok, range) in lexer::lex_starts_at(
|
||||
locator.slice(decorator.range()),
|
||||
source_type.as_mode(),
|
||||
decorator.start(),
|
||||
)
|
||||
.flatten()
|
||||
{
|
||||
match tok {
|
||||
Tok::Lpar => locations.push(range.start()),
|
||||
Tok::Rpar => {
|
||||
if let Some(start) = locations.pop() {
|
||||
name_range = Some(TextRange::new(start, range.end()));
|
||||
}
|
||||
}
|
||||
// Stop after the first argument.
|
||||
Tok::Comma => break,
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
name_range.unwrap_or_else(|| expr.range())
|
||||
source: &str,
|
||||
) -> Option<TextRange> {
|
||||
decorator
|
||||
.expression
|
||||
.as_call_expr()
|
||||
.and_then(|call| parenthesized_range(expr.into(), call.arguments.as_any_node_ref(), source))
|
||||
}
|
||||
|
||||
/// PT006
|
||||
|
@ -329,9 +305,9 @@ fn check_names(checker: &mut Checker, decorator: &Decorator, expr: &Expr) {
|
|||
let name_range = get_parametrize_name_range(
|
||||
decorator,
|
||||
expr,
|
||||
checker.locator(),
|
||||
checker.source_type,
|
||||
);
|
||||
checker.locator().contents(),
|
||||
)
|
||||
.unwrap_or(expr.range());
|
||||
let mut diagnostic = Diagnostic::new(
|
||||
PytestParametrizeNamesWrongType {
|
||||
expected: names_type,
|
||||
|
@ -364,9 +340,9 @@ fn check_names(checker: &mut Checker, decorator: &Decorator, expr: &Expr) {
|
|||
let name_range = get_parametrize_name_range(
|
||||
decorator,
|
||||
expr,
|
||||
checker.locator(),
|
||||
checker.source_type,
|
||||
);
|
||||
checker.locator().contents(),
|
||||
)
|
||||
.unwrap_or(expr.range());
|
||||
let mut diagnostic = Diagnostic::new(
|
||||
PytestParametrizeNamesWrongType {
|
||||
expected: names_type,
|
||||
|
|
|
@ -9,7 +9,7 @@ use crate::{ExpressionRef, Ranged};
|
|||
pub fn parenthesized_range(
|
||||
expr: ExpressionRef,
|
||||
parent: AnyNodeRef,
|
||||
contents: &str,
|
||||
source: &str,
|
||||
) -> Option<TextRange> {
|
||||
// If the parent is a node that brings its own parentheses, exclude the closing parenthesis
|
||||
// from our search range. Otherwise, we risk matching on calls, like `func(x)`, for which
|
||||
|
@ -28,20 +28,21 @@ pub fn parenthesized_range(
|
|||
parent.end()
|
||||
};
|
||||
|
||||
// First, test if there's a closing parenthesis because it tends to be cheaper.
|
||||
let tokenizer =
|
||||
SimpleTokenizer::new(contents, TextRange::new(expr.end(), exclusive_parent_end));
|
||||
let right = tokenizer.skip_trivia().next()?;
|
||||
let right_tokenizer =
|
||||
SimpleTokenizer::new(source, TextRange::new(expr.end(), exclusive_parent_end))
|
||||
.skip_trivia()
|
||||
.take_while(|token| token.kind == SimpleTokenKind::RParen);
|
||||
|
||||
if right.kind == SimpleTokenKind::RParen {
|
||||
// Next, test for the opening parenthesis.
|
||||
let mut tokenizer =
|
||||
SimpleTokenizer::up_to_without_back_comment(expr.start(), contents).skip_trivia();
|
||||
let left = tokenizer.next_back()?;
|
||||
if left.kind == SimpleTokenKind::LParen {
|
||||
return Some(TextRange::new(left.start(), right.end()));
|
||||
}
|
||||
}
|
||||
let left_tokenizer = SimpleTokenizer::up_to_without_back_comment(expr.start(), source)
|
||||
.skip_trivia()
|
||||
.rev()
|
||||
.take_while(|token| token.kind == SimpleTokenKind::LParen);
|
||||
|
||||
None
|
||||
// Zip closing parenthesis with opening parenthesis. The order is intentional, as testing for
|
||||
// closing parentheses is cheaper, and `zip` will avoid progressing the `left_tokenizer` if
|
||||
// the `right_tokenizer` is exhausted.
|
||||
right_tokenizer
|
||||
.zip(left_tokenizer)
|
||||
.last()
|
||||
.map(|(right, left)| TextRange::new(left.start(), right.end()))
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use ruff_python_ast::parenthesize::parenthesized_range;
|
||||
use ruff_python_parser::parse_expression;
|
||||
use ruff_text_size::TextRange;
|
||||
|
||||
#[test]
|
||||
fn test_parenthesized_name() {
|
||||
|
@ -10,7 +11,7 @@ fn test_parenthesized_name() {
|
|||
let name = bin_op.left.as_ref();
|
||||
|
||||
let parenthesized = parenthesized_range(name.into(), bin_op.into(), source_code);
|
||||
assert!(parenthesized.is_some());
|
||||
assert_eq!(parenthesized, Some(TextRange::new(0.into(), 3.into())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -22,7 +23,7 @@ fn test_non_parenthesized_name() {
|
|||
let name = bin_op.left.as_ref();
|
||||
|
||||
let parenthesized = parenthesized_range(name.into(), bin_op.into(), source_code);
|
||||
assert!(parenthesized.is_none());
|
||||
assert_eq!(parenthesized, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -35,7 +36,7 @@ fn test_parenthesized_argument() {
|
|||
let argument = arguments.args.first().unwrap();
|
||||
|
||||
let parenthesized = parenthesized_range(argument.into(), arguments.into(), source_code);
|
||||
assert!(parenthesized.is_some());
|
||||
assert_eq!(parenthesized, Some(TextRange::new(2.into(), 5.into())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -48,7 +49,7 @@ fn test_non_parenthesized_argument() {
|
|||
let argument = arguments.args.first().unwrap();
|
||||
|
||||
let parenthesized = parenthesized_range(argument.into(), arguments.into(), source_code);
|
||||
assert!(parenthesized.is_none());
|
||||
assert_eq!(parenthesized, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -60,7 +61,7 @@ fn test_parenthesized_tuple_member() {
|
|||
let member = tuple.elts.last().unwrap();
|
||||
|
||||
let parenthesized = parenthesized_range(member.into(), tuple.into(), source_code);
|
||||
assert!(parenthesized.is_some());
|
||||
assert_eq!(parenthesized, Some(TextRange::new(4.into(), 7.into())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -72,5 +73,30 @@ fn test_non_parenthesized_tuple_member() {
|
|||
let member = tuple.elts.last().unwrap();
|
||||
|
||||
let parenthesized = parenthesized_range(member.into(), tuple.into(), source_code);
|
||||
assert!(parenthesized.is_none());
|
||||
assert_eq!(parenthesized, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_twice_parenthesized_name() {
|
||||
let source_code = r#"((x)) + 1"#;
|
||||
let expr = parse_expression(source_code, "<filename>").unwrap();
|
||||
|
||||
let bin_op = expr.as_bin_op_expr().unwrap();
|
||||
let name = bin_op.left.as_ref();
|
||||
|
||||
let parenthesized = parenthesized_range(name.into(), bin_op.into(), source_code);
|
||||
assert_eq!(parenthesized, Some(TextRange::new(0.into(), 5.into())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_twice_parenthesized_argument() {
|
||||
let source_code = r#"f(((a + 1)))"#;
|
||||
let expr = parse_expression(source_code, "<filename>").unwrap();
|
||||
|
||||
let call = expr.as_call_expr().unwrap();
|
||||
let arguments = &call.arguments;
|
||||
let argument = arguments.args.first().unwrap();
|
||||
|
||||
let parenthesized = parenthesized_range(argument.into(), arguments.into(), source_code);
|
||||
assert_eq!(parenthesized, Some(TextRange::new(2.into(), 11.into())));
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue