Remove cyclic dev dependency with the parser crate (#11261)

## Summary

This PR removes the cyclic dev dependency some of the crates had with
the parser crate.

The cyclic dependencies are:
* `ruff_python_ast` has a **dev dependency** on `ruff_python_parser` and
`ruff_python_parser` directly depends on `ruff_python_ast`
* `ruff_python_trivia` has a **dev dependency** on `ruff_python_parser`
and `ruff_python_parser` has an indirect dependency on
`ruff_python_trivia` (`ruff_python_parser` - `ruff_python_ast` -
`ruff_python_trivia`)

Specifically, this PR does the following:
* Introduce two new crates
* `ruff_python_ast_integration_tests` and move the tests from the
`ruff_python_ast` crate which uses the parser in this crate
* `ruff_python_trivia_integration_tests` and move the tests from the
`ruff_python_trivia` crate which uses the parser in this crate

### Motivation

The main motivation for this PR is to help development. Before this PR,
`rust-analyzer` wouldn't provide any intellisense in the
`ruff_python_parser` crate regarding the symbols in `ruff_python_ast`
crate.

```
[ERROR][2024-05-03 13:47:06] .../vim/lsp/rpc.lua:770	"rpc"	"/Users/dhruv/.cargo/bin/rust-analyzer"	"stderr"	"[ERROR project_model::workspace] cyclic deps: ruff_python_parser(Idx::<CrateData>(50)) -> ruff_python_ast(Idx::<CrateData>(37)), alternative path: ruff_python_ast(Idx::<CrateData>(37)) -> ruff_python_parser(Idx::<CrateData>(50))\n"
```

## Test Plan

Check the logs of `rust-analyzer` to not see any signs of cyclic
dependency.
This commit is contained in:
Dhruv Manilawala 2024-05-07 14:54:57 +05:30 committed by GitHub
parent 12b5c3a54c
commit 28cc71fb6b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
78 changed files with 774 additions and 728 deletions

26
Cargo.lock generated
View file

@ -2204,7 +2204,6 @@ dependencies = [
"is-macro",
"itertools 0.12.1",
"once_cell",
"ruff_python_parser",
"ruff_python_trivia",
"ruff_source_file",
"ruff_text_size",
@ -2212,6 +2211,17 @@ dependencies = [
"serde",
]
[[package]]
name = "ruff_python_ast_integration_tests"
version = "0.0.0"
dependencies = [
"insta",
"ruff_python_ast",
"ruff_python_parser",
"ruff_python_trivia",
"ruff_text_size",
]
[[package]]
name = "ruff_python_codegen"
version = "0.0.0"
@ -2340,13 +2350,23 @@ version = "0.0.0"
dependencies = [
"insta",
"itertools 0.12.1",
"ruff_python_index",
"ruff_python_parser",
"ruff_source_file",
"ruff_text_size",
"unicode-ident",
]
[[package]]
name = "ruff_python_trivia_integration_tests"
version = "0.0.0"
dependencies = [
"insta",
"ruff_python_index",
"ruff_python_parser",
"ruff_python_trivia",
"ruff_source_file",
"ruff_text_size",
]
[[package]]
name = "ruff_server"
version = "0.2.2"

View file

@ -27,7 +27,6 @@ serde = { workspace = true, optional = true }
[dev-dependencies]
insta = { workspace = true }
ruff_python_parser = { path = "../ruff_python_parser" }
[features]
serde = ["dep:serde", "ruff_text_size/serde"]

View file

@ -0,0 +1,23 @@
[package]
name = "ruff_python_ast_integration_tests"
version = "0.0.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
[dependencies]
[dev-dependencies]
ruff_python_parser = { path = "../ruff_python_parser" }
ruff_python_ast = { path = "../ruff_python_ast" }
ruff_python_trivia = { path = "../ruff_python_trivia" }
ruff_text_size = { path = "../ruff_text_size" }
insta = { workspace = true }
[lints]
workspace = true

View file

@ -0,0 +1,13 @@
# Integration tests for `ruff_python_ast`
This crate includes integration tests for the `ruff_python_ast` crate.
The reason for having a separate crate is to avoid introducing a dev circular
dependency between the `ruff_python_parser` crate and the `ruff_python_ast` crate.
This crate shouldn't include any code, only tests.
**Reference:**
- `rust-analyzer` issue: <https://github.com/rust-lang/rust-analyzer/issues/3390>
- Ruff's pull request: <https://github.com/astral-sh/ruff/pull/11261>

View file

@ -0,0 +1 @@

View file

@ -1,8 +1,7 @@
use ruff_python_ast::identifier;
use ruff_python_parser::{parse_suite, ParseError};
use ruff_text_size::{TextRange, TextSize};
use ruff_python_ast::identifier;
#[test]
fn extract_else_range() -> Result<(), ParseError> {
let contents = r"

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -8,4 +8,3 @@ expression: trace
- BytesLiteral
- BytesLiteral
- BytesLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -12,4 +12,3 @@ expression: trace
- TypeParamParamSpec
- StmtExpr
- ExprEllipsisLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -10,4 +10,3 @@ expression: trace
- ExprName
- Lt
- ExprNumberLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -12,4 +12,3 @@ expression: trace
- Decorator
- ExprName
- StmtPass

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -13,4 +13,3 @@ expression: trace
- Comprehension
- ExprName
- ExprName

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -15,4 +15,3 @@ expression: trace
- ExprName
- FStringLiteralElement
- FStringLiteralElement

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -23,4 +23,3 @@ expression: trace
- ExprNumberLiteral
- Parameter
- StmtPass

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -15,4 +15,3 @@ expression: trace
- ExprNumberLiteral
- Parameter
- StmtPass

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -13,4 +13,3 @@ expression: trace
- Parameters
- StmtExpr
- ExprEllipsisLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -9,4 +9,3 @@ expression: trace
- Comprehension
- ExprName
- ExprName

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -30,4 +30,3 @@ expression: trace
- ExprNumberLiteral
- StmtExpr
- ExprEllipsisLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -9,4 +9,3 @@ expression: trace
- Comprehension
- ExprName
- ExprName

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -8,4 +8,3 @@ expression: trace
- StringLiteral
- StringLiteral
- StringLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/preorder.rs
source: crates/ruff_python_ast_integration_tests/tests/preorder.rs
expression: trace
---
- ModModule
@ -14,4 +14,3 @@ expression: trace
- ExprSubscript
- ExprName
- ExprName

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtExpr
@ -7,4 +7,3 @@ expression: trace
- BytesLiteral
- BytesLiteral
- BytesLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtClassDef
@ -10,4 +10,3 @@ expression: trace
- TypeParamParamSpec
- StmtExpr
- ExprEllipsisLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtExpr
@ -9,4 +9,3 @@ expression: trace
- Lt
- ExprName
- ExprNumberLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtFunctionDef
@ -9,4 +9,3 @@ expression: trace
- StmtClassDef
- ExprName
- StmtPass

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtExpr
@ -12,4 +12,3 @@ expression: trace
- ExprName
- Pow
- ExprNumberLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtExpr
@ -14,4 +14,3 @@ expression: trace
- ExprName
- FStringLiteralElement
- FStringLiteralElement

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtFunctionDef
@ -16,4 +16,3 @@ expression: trace
- Parameter
- Parameter
- StmtPass

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtFunctionDef
@ -11,4 +11,3 @@ expression: trace
- Parameter
- Parameter
- StmtPass

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtFunctionDef
@ -11,4 +11,3 @@ expression: trace
- Parameters
- StmtExpr
- ExprEllipsisLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtExpr
@ -8,4 +8,3 @@ expression: trace
- ExprName
- ExprName
- ExprName

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtMatch
@ -24,4 +24,3 @@ expression: trace
- ExprNumberLiteral
- StmtExpr
- ExprEllipsisLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtExpr
@ -8,4 +8,3 @@ expression: trace
- ExprName
- ExprName
- ExprName

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtExpr
@ -7,4 +7,3 @@ expression: trace
- StringLiteral
- StringLiteral
- StringLiteral

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_ast/tests/visitor.rs
source: crates/ruff_python_ast_integration_tests/tests/visitor.rs
expression: trace
---
- StmtTypeAlias
@ -12,4 +12,3 @@ expression: trace
- TypeParamTypeVarTuple
- TypeParamParamSpec
- ExprName

View file

@ -1,5 +1,4 @@
use ruff_python_ast::stmt_if::elif_else_range;
use ruff_python_parser::{parse_suite, ParseError};
use ruff_text_size::TextSize;

View file

@ -1,9 +1,6 @@
use std::fmt::{Debug, Write};
use insta::assert_snapshot;
use ruff_python_ast as ast;
use ruff_python_parser::lexer::lex;
use ruff_python_parser::{parse_tokens, Mode};
use ruff_python_ast::visitor::{
walk_alias, walk_bytes_literal, walk_comprehension, walk_except_handler, walk_expr,
@ -11,12 +8,13 @@ use ruff_python_ast::visitor::{
walk_parameters, walk_pattern, walk_stmt, walk_string_literal, walk_type_param, walk_with_item,
Visitor,
};
use ruff_python_ast::AnyNodeRef;
use ruff_python_ast::{
Alias, BoolOp, BytesLiteral, CmpOp, Comprehension, ExceptHandler, Expr, FString,
FStringElement, Keyword, MatchCase, Operator, Parameter, Parameters, Pattern, Stmt,
StringLiteral, TypeParam, UnaryOp, WithItem,
self as ast, Alias, AnyNodeRef, BoolOp, BytesLiteral, CmpOp, Comprehension, ExceptHandler,
Expr, FString, FStringElement, Keyword, MatchCase, Operator, Parameter, Parameters, Pattern,
Stmt, StringLiteral, TypeParam, UnaryOp, WithItem,
};
use ruff_python_parser::lexer::lex;
use ruff_python_parser::{parse_tokens, Mode};
#[test]
fn function_arguments() {

View file

@ -21,8 +21,6 @@ unicode-ident = { workspace = true }
[dev-dependencies]
insta = { workspace = true }
ruff_python_parser = { path = "../ruff_python_parser" }
ruff_python_index = { path = "../ruff_python_index" }
[lints]
workspace = true

View file

@ -203,158 +203,3 @@ impl<'a> IntoIterator for &'a CommentRanges {
self.raw.iter()
}
}
#[cfg(test)]
mod tests {
use ruff_python_index::Indexer;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{tokenize, Mode};
use ruff_source_file::Locator;
use ruff_text_size::TextSize;
#[test]
fn block_comments_two_line_block_at_start() {
// arrange
let source = "# line 1\n# line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, vec![TextSize::new(0), TextSize::new(9)]);
}
#[test]
fn block_comments_indented_block() {
// arrange
let source = " # line 1\n # line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, vec![TextSize::new(4), TextSize::new(17)]);
}
#[test]
fn block_comments_single_line_is_not_a_block() {
// arrange
let source = "\n";
let tokens: Vec<LexResult> = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_lines_with_code_not_a_block() {
// arrange
let source = "x = 1 # line 1\ny = 2 # line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_sequential_lines_not_in_block() {
// arrange
let source = " # line 1\n # line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_lines_in_triple_quotes_not_a_block() {
// arrange
let source = r#"
"""
# line 1
# line 2
"""
"#;
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_stress_test() {
// arrange
let source = r#"
# block comment 1 line 1
# block comment 2 line 2
# these lines
# do not form
# a block comment
x = 1 # these lines also do not
y = 2 # do not form a block comment
# these lines do form a block comment
#
#
# and so do these
#
"""
# these lines are in triple quotes and
# therefore do not form a block comment
"""
"#;
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(
block_comments,
vec![
// Block #1
TextSize::new(1),
TextSize::new(26),
// Block #2
TextSize::new(174),
TextSize::new(212),
// Block #3
TextSize::new(219),
TextSize::new(225),
TextSize::new(247)
]
);
}
}

View file

@ -1024,426 +1024,3 @@ impl Iterator for BackwardsTokenizer<'_> {
}
}
}
#[cfg(test)]
mod tests {
use insta::assert_debug_snapshot;
use ruff_python_parser::lexer::lex;
use ruff_python_parser::{Mode, Tok};
use ruff_text_size::{TextLen, TextRange, TextSize};
use crate::tokenizer::{lines_after, lines_before, SimpleToken, SimpleTokenizer};
use crate::{BackwardsTokenizer, SimpleTokenKind};
struct TokenizationTestCase {
source: &'static str,
range: TextRange,
tokens: Vec<SimpleToken>,
}
impl TokenizationTestCase {
fn assert_reverse_tokenization(&self) {
let mut backwards = self.tokenize_reverse();
// Re-reverse to get the tokens in forward order.
backwards.reverse();
assert_eq!(&backwards, &self.tokens);
}
fn tokenize_reverse(&self) -> Vec<SimpleToken> {
let comment_ranges: Vec<_> = lex(self.source, Mode::Module)
.filter_map(|result| {
let (token, range) = result.expect("Input to be a valid python program.");
if matches!(token, Tok::Comment(_)) {
Some(range)
} else {
None
}
})
.collect();
BackwardsTokenizer::new(self.source, self.range, &comment_ranges).collect()
}
fn tokens(&self) -> &[SimpleToken] {
&self.tokens
}
}
fn tokenize_range(source: &'static str, range: TextRange) -> TokenizationTestCase {
let tokens: Vec<_> = SimpleTokenizer::new(source, range).collect();
TokenizationTestCase {
source,
range,
tokens,
}
}
fn tokenize(source: &'static str) -> TokenizationTestCase {
tokenize_range(source, TextRange::new(TextSize::new(0), source.text_len()))
}
#[test]
fn tokenize_trivia() {
let source = "# comment\n # comment";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_parentheses() {
let source = "([{}])";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_comma() {
let source = ",,,,";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_eq() {
// Should tokenize as `==`, then `=`, regardless of whether we're lexing forwards or
// backwards.
let source = "===";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_not_eq() {
// Should tokenize as `!=`, then `=`, regardless of whether we're lexing forwards or
// backwards.
let source = "!==";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_continuation() {
let source = "( \\\n )";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_operators() {
let source = "-> *= ( -= ) ~ // ** **= ^ ^= | |=";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_invalid_operators() {
let source = "-> $=";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus, bogus] vs [bogus, bogus, other]
}
#[test]
fn tricky_unicode() {
let source = "មុ";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn identifier_ending_in_non_start_char() {
let source = "i5";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn string_with_kind() {
let source = "f'foo'";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus] vs [bogus, other]
}
#[test]
fn string_with_byte_kind() {
let source = "BR'foo'";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus] vs [bogus, other]
}
#[test]
fn string_with_invalid_kind() {
let source = "abc'foo'";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus] vs [bogus, other]
}
#[test]
fn identifier_starting_with_string_kind() {
let source = "foo bar";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn ignore_word_with_only_id_continuing_chars() {
let source = "555";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus, bogus] vs [bogus, bogus, other]
}
#[test]
fn tokenize_multichar() {
let source = "if in else match";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_substring() {
let source = "('some string') # comment";
let test_case =
tokenize_range(source, TextRange::new(TextSize::new(14), source.text_len()));
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_slash() {
let source = r" # trailing positional comment
# Positional arguments only after here
,/";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_bogus() {
let source = r#"# leading comment
"a string"
a = (10)"#;
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
assert_debug_snapshot!("Reverse", test_case.tokenize_reverse());
}
#[test]
fn single_quoted_multiline_string_containing_comment() {
let test_case = tokenize(
r"'This string contains a hash looking like a comment\
# This is not a comment'",
);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn single_quoted_multiline_string_implicit_concatenation() {
let test_case = tokenize(
r#"'This string contains a hash looking like a comment\
# This is' "not_a_comment""#,
);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn triple_quoted_multiline_string_containing_comment() {
let test_case = tokenize(
r"'''This string contains a hash looking like a comment
# This is not a comment'''",
);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn comment_containing_triple_quoted_string() {
let test_case = tokenize("'''leading string''' # a comment '''not a string'''");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn comment_containing_single_quoted_string() {
let test_case = tokenize("'leading string' # a comment 'not a string'");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn string_followed_by_multiple_comments() {
let test_case =
tokenize(r#"'a string # containing a hash " # and another hash ' # finally a comment"#);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn string_with_escaped_quote() {
let test_case = tokenize(r"'a string \' # containing a hash ' # finally a comment");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn string_with_double_escaped_backslash() {
let test_case = tokenize(r"'a string \\' # a comment '");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn empty_string_literal() {
let test_case = tokenize(r"'' # a comment '");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn lines_before_empty_string() {
assert_eq!(lines_before(TextSize::new(0), ""), 0);
}
#[test]
fn lines_before_in_the_middle_of_a_line() {
assert_eq!(lines_before(TextSize::new(4), "a = 20"), 0);
}
#[test]
fn lines_before_on_a_new_line() {
assert_eq!(lines_before(TextSize::new(7), "a = 20\nb = 10"), 1);
}
#[test]
fn lines_before_multiple_leading_newlines() {
assert_eq!(lines_before(TextSize::new(9), "a = 20\n\r\nb = 10"), 2);
}
#[test]
fn lines_before_with_comment_offset() {
assert_eq!(lines_before(TextSize::new(8), "a = 20\n# a comment"), 0);
}
#[test]
fn lines_before_with_trailing_comment() {
assert_eq!(
lines_before(TextSize::new(22), "a = 20 # some comment\nb = 10"),
1
);
}
#[test]
fn lines_before_with_comment_only_line() {
assert_eq!(
lines_before(TextSize::new(22), "a = 20\n# some comment\nb = 10"),
1
);
}
#[test]
fn lines_after_empty_string() {
assert_eq!(lines_after(TextSize::new(0), ""), 0);
}
#[test]
fn lines_after_in_the_middle_of_a_line() {
assert_eq!(lines_after(TextSize::new(4), "a = 20"), 0);
}
#[test]
fn lines_after_before_a_new_line() {
assert_eq!(lines_after(TextSize::new(6), "a = 20\nb = 10"), 1);
}
#[test]
fn lines_after_multiple_newlines() {
assert_eq!(lines_after(TextSize::new(6), "a = 20\n\r\nb = 10"), 2);
}
#[test]
fn lines_after_before_comment_offset() {
assert_eq!(lines_after(TextSize::new(7), "a = 20 # a comment\n"), 0);
}
#[test]
fn lines_after_with_comment_only_line() {
assert_eq!(
lines_after(TextSize::new(6), "a = 20\n# some comment\nb = 10"),
1
);
}
#[test]
fn test_previous_token_simple() {
let cases = &["x = (", "x = ( ", "x = (\n"];
for source in cases {
let token = BackwardsTokenizer::up_to(source.text_len(), source, &[])
.skip_trivia()
.next()
.unwrap();
assert_eq!(
token,
SimpleToken {
kind: SimpleTokenKind::LParen,
range: TextRange::new(TextSize::new(4), TextSize::new(5)),
}
);
}
}
}

View file

@ -79,51 +79,3 @@ impl PythonWhitespace for str {
self.trim_end_matches(is_python_whitespace)
}
}
#[cfg(test)]
mod tests {
use ruff_python_parser::{parse_suite, ParseError};
use ruff_source_file::Locator;
use ruff_text_size::Ranged;
use crate::has_trailing_content;
#[test]
fn trailing_content() -> Result<(), ParseError> {
let contents = "x = 1";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
let contents = "x = 1; y = 2";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(has_trailing_content(stmt.end(), &locator));
let contents = "x = 1 ";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
let contents = "x = 1 # Comment";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
let contents = r"
x = 1
y = 2
"
.trim();
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
Ok(())
}
}

View file

@ -0,0 +1,24 @@
[package]
name = "ruff_python_trivia_integration_tests"
version = "0.0.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
[dependencies]
[dev-dependencies]
ruff_python_index = { path = "../ruff_python_index" }
ruff_python_parser = { path = "../ruff_python_parser" }
ruff_python_trivia = { path = "../ruff_python_trivia" }
ruff_source_file = { path = "../ruff_source_file" }
ruff_text_size = { path = "../ruff_text_size" }
insta = { workspace = true }
[lints]
workspace = true

View file

@ -0,0 +1,13 @@
# Integration tests for `ruff_python_trivia`
This crate includes integration tests for the `ruff_python_trivia` crate.
The reason for having a separate crate is to avoid introducing a dev circular
dependency between the `ruff_python_parser` crate and the `ruff_python_trivia` crate.
This crate shouldn't include any code, only tests.
**Reference:**
- `rust-analyzer` issue: <https://github.com/rust-lang/rust-analyzer/issues/3390>
- Ruff's pull request: <https://github.com/astral-sh/ruff/pull/11261>

View file

@ -0,0 +1 @@

View file

@ -0,0 +1,151 @@
use ruff_python_index::Indexer;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{tokenize, Mode};
use ruff_source_file::Locator;
use ruff_text_size::TextSize;
#[test]
fn block_comments_two_line_block_at_start() {
// arrange
let source = "# line 1\n# line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, vec![TextSize::new(0), TextSize::new(9)]);
}
#[test]
fn block_comments_indented_block() {
// arrange
let source = " # line 1\n # line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, vec![TextSize::new(4), TextSize::new(17)]);
}
#[test]
fn block_comments_single_line_is_not_a_block() {
// arrange
let source = "\n";
let tokens: Vec<LexResult> = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_lines_with_code_not_a_block() {
// arrange
let source = "x = 1 # line 1\ny = 2 # line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_sequential_lines_not_in_block() {
// arrange
let source = " # line 1\n # line 2\n";
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_lines_in_triple_quotes_not_a_block() {
// arrange
let source = r#"
"""
# line 1
# line 2
"""
"#;
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(block_comments, Vec::<TextSize>::new());
}
#[test]
fn block_comments_stress_test() {
// arrange
let source = r#"
# block comment 1 line 1
# block comment 2 line 2
# these lines
# do not form
# a block comment
x = 1 # these lines also do not
y = 2 # do not form a block comment
# these lines do form a block comment
#
#
# and so do these
#
"""
# these lines are in triple quotes and
# therefore do not form a block comment
"""
"#;
let tokens = tokenize(source, Mode::Module);
let locator = Locator::new(source);
let indexer = Indexer::from_tokens(&tokens, &locator);
// act
let block_comments = indexer.comment_ranges().block_comments(&locator);
// assert
assert_eq!(
block_comments,
vec![
// Block #1
TextSize::new(1),
TextSize::new(26),
// Block #2
TextSize::new(174),
TextSize::new(212),
// Block #3
TextSize::new(219),
TextSize::new(225),
TextSize::new(247)
]
);
}

View file

@ -0,0 +1,417 @@
use insta::assert_debug_snapshot;
use ruff_python_parser::lexer::lex;
use ruff_python_parser::{Mode, Tok};
use ruff_python_trivia::{lines_after, lines_before, SimpleToken, SimpleTokenizer};
use ruff_python_trivia::{BackwardsTokenizer, SimpleTokenKind};
use ruff_text_size::{TextLen, TextRange, TextSize};
struct TokenizationTestCase {
source: &'static str,
range: TextRange,
tokens: Vec<SimpleToken>,
}
impl TokenizationTestCase {
fn assert_reverse_tokenization(&self) {
let mut backwards = self.tokenize_reverse();
// Re-reverse to get the tokens in forward order.
backwards.reverse();
assert_eq!(&backwards, &self.tokens);
}
fn tokenize_reverse(&self) -> Vec<SimpleToken> {
let comment_ranges: Vec<_> = lex(self.source, Mode::Module)
.filter_map(|result| {
let (token, range) = result.expect("Input to be a valid python program.");
if matches!(token, Tok::Comment(_)) {
Some(range)
} else {
None
}
})
.collect();
BackwardsTokenizer::new(self.source, self.range, &comment_ranges).collect()
}
fn tokens(&self) -> &[SimpleToken] {
&self.tokens
}
}
fn tokenize_range(source: &'static str, range: TextRange) -> TokenizationTestCase {
let tokens: Vec<_> = SimpleTokenizer::new(source, range).collect();
TokenizationTestCase {
source,
range,
tokens,
}
}
fn tokenize(source: &'static str) -> TokenizationTestCase {
tokenize_range(source, TextRange::new(TextSize::new(0), source.text_len()))
}
#[test]
fn tokenize_trivia() {
let source = "# comment\n # comment";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_parentheses() {
let source = "([{}])";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_comma() {
let source = ",,,,";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_eq() {
// Should tokenize as `==`, then `=`, regardless of whether we're lexing forwards or
// backwards.
let source = "===";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_not_eq() {
// Should tokenize as `!=`, then `=`, regardless of whether we're lexing forwards or
// backwards.
let source = "!==";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_continuation() {
let source = "( \\\n )";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_operators() {
let source = "-> *= ( -= ) ~ // ** **= ^ ^= | |=";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_invalid_operators() {
let source = "-> $=";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus, bogus] vs [bogus, bogus, other]
}
#[test]
fn tricky_unicode() {
let source = "មុ";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn identifier_ending_in_non_start_char() {
let source = "i5";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn string_with_kind() {
let source = "f'foo'";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus] vs [bogus, other]
}
#[test]
fn string_with_byte_kind() {
let source = "BR'foo'";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus] vs [bogus, other]
}
#[test]
fn string_with_invalid_kind() {
let source = "abc'foo'";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus] vs [bogus, other]
}
#[test]
fn identifier_starting_with_string_kind() {
let source = "foo bar";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn ignore_word_with_only_id_continuing_chars() {
let source = "555";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
// note: not reversible: [other, bogus, bogus] vs [bogus, bogus, other]
}
#[test]
fn tokenize_multichar() {
let source = "if in else match";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_substring() {
let source = "('some string') # comment";
let test_case = tokenize_range(source, TextRange::new(TextSize::new(14), source.text_len()));
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_slash() {
let source = r" # trailing positional comment
# Positional arguments only after here
,/";
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}
#[test]
fn tokenize_bogus() {
let source = r#"# leading comment
"a string"
a = (10)"#;
let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
assert_debug_snapshot!("Reverse", test_case.tokenize_reverse());
}
#[test]
fn single_quoted_multiline_string_containing_comment() {
let test_case = tokenize(
r"'This string contains a hash looking like a comment\
# This is not a comment'",
);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn single_quoted_multiline_string_implicit_concatenation() {
let test_case = tokenize(
r#"'This string contains a hash looking like a comment\
# This is' "not_a_comment""#,
);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn triple_quoted_multiline_string_containing_comment() {
let test_case = tokenize(
r"'''This string contains a hash looking like a comment
# This is not a comment'''",
);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn comment_containing_triple_quoted_string() {
let test_case = tokenize("'''leading string''' # a comment '''not a string'''");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn comment_containing_single_quoted_string() {
let test_case = tokenize("'leading string' # a comment 'not a string'");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn string_followed_by_multiple_comments() {
let test_case =
tokenize(r#"'a string # containing a hash " # and another hash ' # finally a comment"#);
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn string_with_escaped_quote() {
let test_case = tokenize(r"'a string \' # containing a hash ' # finally a comment");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn string_with_double_escaped_backslash() {
let test_case = tokenize(r"'a string \\' # a comment '");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn empty_string_literal() {
let test_case = tokenize(r"'' # a comment '");
assert_debug_snapshot!(test_case.tokenize_reverse());
}
#[test]
fn lines_before_empty_string() {
assert_eq!(lines_before(TextSize::new(0), ""), 0);
}
#[test]
fn lines_before_in_the_middle_of_a_line() {
assert_eq!(lines_before(TextSize::new(4), "a = 20"), 0);
}
#[test]
fn lines_before_on_a_new_line() {
assert_eq!(lines_before(TextSize::new(7), "a = 20\nb = 10"), 1);
}
#[test]
fn lines_before_multiple_leading_newlines() {
assert_eq!(lines_before(TextSize::new(9), "a = 20\n\r\nb = 10"), 2);
}
#[test]
fn lines_before_with_comment_offset() {
assert_eq!(lines_before(TextSize::new(8), "a = 20\n# a comment"), 0);
}
#[test]
fn lines_before_with_trailing_comment() {
assert_eq!(
lines_before(TextSize::new(22), "a = 20 # some comment\nb = 10"),
1
);
}
#[test]
fn lines_before_with_comment_only_line() {
assert_eq!(
lines_before(TextSize::new(22), "a = 20\n# some comment\nb = 10"),
1
);
}
#[test]
fn lines_after_empty_string() {
assert_eq!(lines_after(TextSize::new(0), ""), 0);
}
#[test]
fn lines_after_in_the_middle_of_a_line() {
assert_eq!(lines_after(TextSize::new(4), "a = 20"), 0);
}
#[test]
fn lines_after_before_a_new_line() {
assert_eq!(lines_after(TextSize::new(6), "a = 20\nb = 10"), 1);
}
#[test]
fn lines_after_multiple_newlines() {
assert_eq!(lines_after(TextSize::new(6), "a = 20\n\r\nb = 10"), 2);
}
#[test]
fn lines_after_before_comment_offset() {
assert_eq!(lines_after(TextSize::new(7), "a = 20 # a comment\n"), 0);
}
#[test]
fn lines_after_with_comment_only_line() {
assert_eq!(
lines_after(TextSize::new(6), "a = 20\n# some comment\nb = 10"),
1
);
}
#[test]
fn test_previous_token_simple() {
let cases = &["x = (", "x = ( ", "x = (\n"];
for source in cases {
let token = BackwardsTokenizer::up_to(source.text_len(), source, &[])
.skip_trivia()
.next()
.unwrap();
assert_eq!(
token,
SimpleToken {
kind: SimpleTokenKind::LParen,
range: TextRange::new(TextSize::new(4), TextSize::new(5)),
}
);
}
}

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokens()
---
[

View file

@ -1,5 +1,5 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
source: crates/ruff_python_trivia_integration_tests/tests/simple_tokenizer.rs
expression: test_case.tokenize_reverse()
---
[

View file

@ -0,0 +1,43 @@
use ruff_python_parser::{parse_suite, ParseError};
use ruff_python_trivia::has_trailing_content;
use ruff_source_file::Locator;
use ruff_text_size::Ranged;
#[test]
fn trailing_content() -> Result<(), ParseError> {
let contents = "x = 1";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
let contents = "x = 1; y = 2";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(has_trailing_content(stmt.end(), &locator));
let contents = "x = 1 ";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
let contents = "x = 1 # Comment";
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
let contents = r"
x = 1
y = 2
"
.trim();
let program = parse_suite(contents)?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert!(!has_trailing_content(stmt.end(), &locator));
Ok(())
}

View file

@ -13,9 +13,10 @@ license = { workspace = true }
[lib]
[dependencies]
ruff_text_size = { path = "../ruff_text_size" }
memchr = { workspace = true }
once_cell = { workspace = true }
ruff_text_size = { path = "../ruff_text_size" }
serde = { workspace = true, optional = true }
[dev-dependencies]