Upgrade RustPython to match new flattened exports (#3141)

This commit is contained in:
Charlie Marsh 2023-02-22 14:36:13 -05:00 committed by GitHub
parent ba61bb6a6c
commit 2f9de335db
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
49 changed files with 196 additions and 236 deletions

8
Cargo.lock generated
View file

@ -2150,7 +2150,7 @@ dependencies = [
[[package]]
name = "rustpython-ast"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"num-bigint",
"rustpython-compiler-core",
@ -2159,7 +2159,7 @@ dependencies = [
[[package]]
name = "rustpython-common"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"ascii",
"bitflags",
@ -2184,7 +2184,7 @@ dependencies = [
[[package]]
name = "rustpython-compiler-core"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"bincode",
"bitflags",
@ -2201,7 +2201,7 @@ dependencies = [
[[package]]
name = "rustpython-parser"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"ahash",
"anyhow",

View file

@ -13,8 +13,8 @@ libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "f2f0b7a487a87
once_cell = { version = "1.16.0" }
regex = { version = "1.6.0" }
rustc-hash = { version = "1.1.0" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "6d71f758170d504817cc47720762c41d9031506d" }
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "6d71f758170d504817cc47720762c41d9031506d" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" }
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" }
schemars = { version = "0.8.11" }
serde = { version = "1.0.147", features = ["derive"] }
serde_json = { version = "1.0.87" }

View file

@ -289,3 +289,7 @@ def x(y):
return 1
case 1:
print() # error
def foo(baz: str) -> str:
return baz

View file

@ -9,10 +9,7 @@ use rustpython_parser::ast::{
Arguments, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Keyword, KeywordData,
Located, Location, MatchCase, Pattern, PatternKind, Stmt, StmtKind,
};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::token::StringKind;
use rustpython_parser::{lexer, Mode, StringKind, Tok};
use smallvec::{smallvec, SmallVec};
use crate::ast::types::{Binding, BindingKind, CallPath, Range};
@ -656,7 +653,7 @@ pub fn has_comments<T>(located: &Located<T>, locator: &Locator) -> bool {
/// Returns `true` if a [`Range`] includes at least one comment.
pub fn has_comments_in(range: Range, locator: &Locator) -> bool {
for tok in lexer::make_tokenizer_located(locator.slice(&range), Mode::Module, range.location) {
for tok in lexer::lex_located(locator.slice(&range), Mode::Module, range.location) {
match tok {
Ok((_, tok, _)) => {
if matches!(tok, Tok::Comment(..)) {
@ -871,8 +868,7 @@ pub fn match_parens(start: Location, locator: &Locator) -> Option<Range> {
let mut fix_start = None;
let mut fix_end = None;
let mut count: usize = 0;
for (start, tok, end) in lexer::make_tokenizer_located(contents, Mode::Module, start).flatten()
{
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, start).flatten() {
if matches!(tok, Tok::Lpar) {
if count == 0 {
fix_start = Some(start);
@ -904,8 +900,7 @@ pub fn identifier_range(stmt: &Stmt, locator: &Locator) -> Range {
| StmtKind::AsyncFunctionDef { .. }
) {
let contents = locator.slice(&Range::from_located(stmt));
for (start, tok, end) in
lexer::make_tokenizer_located(contents, Mode::Module, stmt.location).flatten()
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten()
{
if matches!(tok, Tok::Name { .. }) {
return Range::new(start, end);
@ -937,7 +932,7 @@ pub fn find_names<'a, T, U>(
locator: &'a Locator,
) -> impl Iterator<Item = Range> + 'a {
let contents = locator.slice(&Range::from_located(located));
lexer::make_tokenizer_located(contents, Mode::Module, located.location)
lexer::lex_located(contents, Mode::Module, located.location)
.flatten()
.filter(|(_, tok, _)| matches!(tok, Tok::Name { .. }))
.map(|(start, _, end)| Range {
@ -955,7 +950,7 @@ pub fn excepthandler_name_range(handler: &Excepthandler, locator: &Locator) -> O
(Some(_), Some(type_)) => {
let type_end_location = type_.end_location.unwrap();
let contents = locator.slice(&Range::new(type_end_location, body[0].location));
let range = lexer::make_tokenizer_located(contents, Mode::Module, type_end_location)
let range = lexer::lex_located(contents, Mode::Module, type_end_location)
.flatten()
.tuple_windows()
.find(|(tok, next_tok)| {
@ -982,7 +977,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
location: handler.location,
end_location: end,
});
let range = lexer::make_tokenizer_located(contents, Mode::Module, handler.location)
let range = lexer::lex_located(contents, Mode::Module, handler.location)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Except { .. }))
.map(|(location, _, end_location)| Range {
@ -996,7 +991,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
/// Find f-strings that don't contain any formatted values in a `JoinedStr`.
pub fn find_useless_f_strings(expr: &Expr, locator: &Locator) -> Vec<(Range, Range)> {
let contents = locator.slice(&Range::from_located(expr));
lexer::make_tokenizer_located(contents, Mode::Module, expr.location)
lexer::lex_located(contents, Mode::Module, expr.location)
.flatten()
.filter_map(|(location, tok, end_location)| match tok {
Tok::String {
@ -1050,7 +1045,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
.expect("Expected orelse to be non-empty")
.location,
});
let range = lexer::make_tokenizer_located(contents, Mode::Module, body_end)
let range = lexer::lex_located(contents, Mode::Module, body_end)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Else))
.map(|(location, _, end_location)| Range {
@ -1066,7 +1061,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
/// Return the `Range` of the first `Tok::Colon` token in a `Range`.
pub fn first_colon_range(range: Range, locator: &Locator) -> Option<Range> {
let contents = locator.slice(&range);
let range = lexer::make_tokenizer_located(contents, Mode::Module, range.location)
let range = lexer::lex_located(contents, Mode::Module, range.location)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Colon))
.map(|(location, _, end_location)| Range {
@ -1096,7 +1091,7 @@ pub fn elif_else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
_ => return None,
};
let contents = locator.slice(&Range::new(start, end));
let range = lexer::make_tokenizer_located(contents, Mode::Module, start)
let range = lexer::lex_located(contents, Mode::Module, start)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Elif | Tok::Else))
.map(|(location, _, end_location)| Range {
@ -1212,8 +1207,8 @@ pub fn is_logger_candidate(func: &Expr) -> bool {
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::parser;
use crate::ast::helpers::{
elif_else_range, else_range, first_colon_range, identifier_range, match_trailing_content,

View file

@ -1,9 +1,7 @@
use bitflags::bitflags;
use rustc_hash::FxHashMap;
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Stmt, StmtKind};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers::any_over_expr;
use crate::ast::types::{BindingKind, Scope};
@ -285,9 +283,7 @@ pub type LocatedCmpop<U = ()> = Located<Cmpop, U>;
/// `CPython` doesn't either. This method iterates over the token stream and
/// re-identifies [`Cmpop`] nodes, annotating them with valid ranges.
pub fn locate_cmpops(contents: &str) -> Vec<LocatedCmpop> {
let mut tok_iter = lexer::make_tokenizer(contents, Mode::Module)
.flatten()
.peekable();
let mut tok_iter = lexer::lex(contents, Mode::Module).flatten().peekable();
let mut ops: Vec<LocatedCmpop> = vec![];
let mut count: usize = 0;
loop {

View file

@ -4,9 +4,7 @@ use libcst_native::{
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
};
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers;
use crate::ast::helpers::to_absolute;
@ -372,9 +370,7 @@ pub fn remove_argument(
if n_arguments == 1 {
// Case 1: there is only one argument.
let mut count: usize = 0;
for (start, tok, end) in
lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten()
{
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
if matches!(tok, Tok::Lpar) {
if count == 0 {
fix_start = Some(if remove_parentheses {
@ -406,9 +402,7 @@ pub fn remove_argument(
{
// Case 2: argument or keyword is _not_ the last node.
let mut seen_comma = false;
for (start, tok, end) in
lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten()
{
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
if seen_comma {
if matches!(tok, Tok::NonLogicalNewline) {
// Also delete any non-logical newlines after the comma.
@ -431,9 +425,7 @@ pub fn remove_argument(
} else {
// Case 3: argument or keyword is the last node, so we have to find the last
// comma in the stmt.
for (start, tok, _) in
lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten()
{
for (start, tok, _) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
if start == expr_at {
fix_end = Some(expr_end);
break;
@ -455,8 +447,8 @@ pub fn remove_argument(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::parser;
use crate::autofix::helpers::{next_stmt_break, trailing_semicolon};
use crate::source_code::Locator;

View file

@ -6,19 +6,18 @@ use std::path::Path;
use itertools::Itertools;
use log::error;
use nohash_hasher::IntMap;
use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS};
use ruff_python::typing::TYPING_EXTENSIONS;
use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_common::cformat::{CFormatError, CFormatErrorType};
use rustpython_parser as parser;
use rustpython_parser::ast::{
Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext,
ExprKind, KeywordData, Located, Location, Operator, Pattern, PatternKind, Stmt, StmtKind,
Suite,
};
use rustpython_parser::parser;
use smallvec::smallvec;
use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS};
use ruff_python::typing::TYPING_EXTENSIONS;
use crate::ast::helpers::{
binding_range, collect_call_path, extract_handler_names, from_relative_import, to_module_path,
};
@ -2060,8 +2059,8 @@ where
value,
..
} => {
// If we're in a class or module scope, then the annotation needs to be available
// at runtime.
// If we're in a class or module scope, then the annotation needs to be
// available at runtime.
// See: https://docs.python.org/3/reference/simple_stmts.html#annotated-assignment-statements
if !self.annotations_future_enabled
&& matches!(

View file

@ -152,9 +152,8 @@ pub fn check_logical_lines(
#[cfg(test)]
mod tests {
use rustpython_parser::lexer;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode};
use crate::checkers::logical_lines::iter_logical_lines;
use crate::source_code::Locator;
@ -165,7 +164,7 @@ mod tests {
x = 1
y = 2
z = x + 1"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@ -186,7 +185,7 @@ x = [
]
y = 2
z = x + 1"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@ -200,7 +199,7 @@ z = x + 1"#;
assert_eq!(actual, expected);
let contents = "x = 'abc'";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@ -213,7 +212,7 @@ z = x + 1"#;
def f():
x = 1
f()"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@ -228,7 +227,7 @@ def f():
# Comment goes here.
x = 1
f()"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()

View file

@ -1,6 +1,7 @@
//! Lint rules based on token traversal.
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::lex::docstring_detection::StateMachine;
use crate::registry::{Diagnostic, Rule};

View file

@ -3,7 +3,8 @@
use bitflags::bitflags;
use nohash_hasher::{IntMap, IntSet};
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::registry::LintSource;
use crate::settings::Settings;
@ -150,15 +151,14 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
#[cfg(test)]
mod tests {
use nohash_hasher::{IntMap, IntSet};
use rustpython_parser::lexer;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode};
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
#[test]
fn noqa_extraction() {
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
z = x + 1",
@ -167,7 +167,7 @@ z = x + 1",
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"
x = 1
y = 2
@ -177,7 +177,7 @@ z = x + 1",
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
z = x + 1
@ -187,7 +187,7 @@ z = x + 1
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
@ -198,7 +198,7 @@ z = x + 1
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = '''abc
def
ghi
@ -213,7 +213,7 @@ z = x + 1",
IntMap::from_iter([(1, 4), (2, 4), (3, 4)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = '''abc
def
@ -228,7 +228,7 @@ z = 2",
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = '''abc
def
@ -242,7 +242,7 @@ ghi
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
r#"x = \
1"#,
Mode::Module,
@ -250,7 +250,7 @@ ghi
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)]));
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
r#"from foo import \
bar as baz, \
qux as quux"#,
@ -262,7 +262,7 @@ ghi
IntMap::from_iter([(1, 3), (2, 3)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
r#"
# Foo
from foo import \
@ -286,7 +286,7 @@ y = \
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
let contents = "# isort: off
@ -294,7 +294,7 @@ x = 1
y = 2
# isort: on
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
@ -307,7 +307,7 @@ y = 2
# isort: on
z = x + 1
# isort: on";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4, 5])
@ -317,7 +317,7 @@ z = x + 1
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
@ -327,7 +327,7 @@ z = x + 1";
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
let contents = "# isort: off
@ -336,7 +336,7 @@ x = 1
y = 2
# isort: skip_file
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
}
@ -345,20 +345,20 @@ z = x + 1";
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, Vec::<usize>::new());
let contents = "x = 1
y = 2
# isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![3]);
let contents = "x = 1
y = 2 # isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![2]);
}
}

View file

@ -1,10 +1,12 @@
//! Doc line extraction. In this context, a doc line is a line consisting of a
//! standalone comment or a constant string statement.
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
use rustpython_parser::lexer::{LexResult, Tok};
use std::iter::FusedIterator;
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::visitor;
use crate::ast::visitor::Visitor;

View file

@ -4,7 +4,7 @@
//!
//! TODO(charlie): Consolidate with the existing AST-based docstring extraction.
use rustpython_parser::lexer::Tok;
use rustpython_parser::Tok;
#[derive(Default)]
enum State {

View file

@ -5,8 +5,8 @@ use anyhow::{anyhow, Result};
use colored::Colorize;
use log::error;
use rustc_hash::FxHashMap;
use rustpython_parser::error::ParseError;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::ParseError;
use crate::autofix::fix_file;
use crate::checkers::ast::check_ast;

View file

@ -1,6 +1,7 @@
/// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
use once_cell::sync::Lazy;
use regex::Regex;
use rustpython_parser as parser;
static ALLOWLIST_REGEX: Lazy<Regex> = Lazy::new(|| {
Regex::new(
@ -77,7 +78,7 @@ pub fn comment_contains_code(line: &str, task_tags: &[String]) -> bool {
}
// Finally, compile the source code.
rustpython_parser::parser::parse_program(&line, "<filename>").is_ok()
parser::parse_program(&line, "<filename>").is_ok()
}
/// Returns `true` if a line is probably part of some multiline code.

View file

@ -1,8 +1,6 @@
use anyhow::{bail, Result};
use rustpython_parser::ast::Stmt;
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::fix::Fix;
@ -17,9 +15,7 @@ pub fn add_return_none_annotation(locator: &Locator, stmt: &Stmt) -> Result<Fix>
let mut seen_lpar = false;
let mut seen_rpar = false;
let mut count: usize = 0;
for (start, tok, ..) in
lexer::make_tokenizer_located(contents, Mode::Module, range.location).flatten()
{
for (start, tok, ..) in lexer::lex_located(contents, Mode::Module, range.location).flatten() {
if seen_lpar && seen_rpar {
if matches!(tok, Tok::Colon) {
return Ok(Fix::insertion(" -> None".to_string(), start));

View file

@ -1,7 +1,7 @@
use itertools::Itertools;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::lexer::{LexResult, Spanned};
use rustpython_parser::token::Tok;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::fix::Fix;

View file

@ -1,7 +1,8 @@
use itertools::Itertools;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Operator};
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::registry::Diagnostic;

View file

@ -1,6 +1,7 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use super::settings::Quote;
use crate::ast::types::Range;

View file

@ -1,9 +1,7 @@
use std::borrow::Cow;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::source_code::Locator;
@ -18,7 +16,7 @@ pub struct Comment<'a> {
/// Collect all comments in an import block.
pub fn collect_comments<'a>(range: &Range, locator: &'a Locator) -> Vec<Comment<'a>> {
let contents = locator.slice(range);
lexer::make_tokenizer_located(contents, Mode::Module, range.location)
lexer::lex_located(contents, Mode::Module, range.location)
.flatten()
.filter_map(|(start, tok, end)| {
if let Tok::Comment(value) = tok {

View file

@ -1,23 +1,18 @@
use rustpython_parser::ast::{Location, Stmt};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use super::types::TrailingComma;
use crate::ast::helpers::is_docstring_stmt;
use crate::ast::types::Range;
use crate::source_code::Locator;
use super::types::TrailingComma;
/// Return `true` if a `StmtKind::ImportFrom` statement ends with a magic
/// trailing comma.
pub fn trailing_comma(stmt: &Stmt, locator: &Locator) -> TrailingComma {
let contents = locator.slice(&Range::from_located(stmt));
let mut count: usize = 0;
let mut trailing_comma = TrailingComma::Absent;
for (_, tok, _) in
lexer::make_tokenizer_located(contents, Mode::Module, stmt.location).flatten()
{
for (_, tok, _) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten() {
if matches!(tok, Tok::Lpar) {
count += 1;
}
@ -114,7 +109,7 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location {
// Find the first token that isn't a comment or whitespace.
let contents = locator.skip(splice);
for (.., tok, end) in lexer::make_tokenizer_located(contents, Mode::Module, splice).flatten() {
for (.., tok, end) in lexer::lex_located(contents, Mode::Module, splice).flatten() {
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
splice = end;
} else {
@ -128,12 +123,11 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location {
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::parser;
use crate::source_code::Locator;
use super::find_splice_location;
use crate::source_code::Locator;
fn splice_contents(contents: &str) -> Result<Location> {
let program = parser::parse_program(contents, "<filename>")?;

View file

@ -2,6 +2,7 @@ use std::fmt;
use log::error;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser as parser;
use rustpython_parser::ast::{Location, StmtKind, Suite};
use super::super::helpers;
@ -16,13 +17,15 @@ use crate::violation::AlwaysAutofixableViolation;
define_violation!(
/// ## What it does
/// Adds any required imports, as specified by the user, to the top of the file.
/// Adds any required imports, as specified by the user, to the top of the
/// file.
///
/// ## Why is this bad?
/// In some projects, certain imports are required to be present in all files. For
/// example, some projects assume that `from __future__ import annotations` is enabled,
/// and thus require that import to be present in all files. Omitting a "required" import
/// (as specified by the user) can cause errors or unexpected behavior.
/// In some projects, certain imports are required to be present in all
/// files. For example, some projects assume that `from __future__
/// import annotations` is enabled, and thus require that import to be
/// present in all files. Omitting a "required" import (as specified by
/// the user) can cause errors or unexpected behavior.
///
/// ## Example
/// ```python
@ -210,18 +213,26 @@ pub fn add_required_imports(
.required_imports
.iter()
.flat_map(|required_import| {
let Ok(body) = rustpython_parser::parser::parse_program(required_import, "<filename>") else {
let Ok(body) = parser::parse_program(required_import, "<filename>") else {
error!("Failed to parse required import: `{}`", required_import);
return vec![];
};
if body.is_empty() || body.len() > 1 {
error!("Expected require import to contain a single statement: `{}`", required_import);
error!(
"Expected require import to contain a single statement: `{}`",
required_import
);
return vec![];
}
match &body[0].node {
StmtKind::ImportFrom { module, names, level } => {
names.iter().filter_map(|name| {
StmtKind::ImportFrom {
module,
names,
level,
} => names
.iter()
.filter_map(|name| {
add_required_import(
&AnyImport::ImportFrom(ImportFrom {
module: module.as_ref().map(String::as_str),
@ -238,10 +249,11 @@ pub fn add_required_imports(
settings,
autofix,
)
}).collect()
}
StmtKind::Import { names } => {
names.iter().filter_map(|name| {
})
.collect(),
StmtKind::Import { names } => names
.iter()
.filter_map(|name| {
add_required_import(
&AnyImport::Import(Import {
name: Alias {
@ -256,10 +268,13 @@ pub fn add_required_imports(
settings,
autofix,
)
}).collect()
}
})
.collect(),
_ => {
error!("Expected required import to be in import-from style: `{}`", required_import);
error!(
"Expected required import to be in import-from style: `{}`",
required_import
);
vec![]
}
}

View file

@ -10,10 +10,11 @@ define_violation!(
/// ## What it does
/// Checks for functions with a high `McCabe` complexity.
///
/// The `McCabe` complexity of a function is a measure of the complexity of the
/// control flow graph of the function. It is calculated by adding one to the
/// number of decision points in the function. A decision point is a place in
/// the code where the program has a choice of two or more paths to follow.
/// The `McCabe` complexity of a function is a measure of the complexity of
/// the control flow graph of the function. It is calculated by adding
/// one to the number of decision points in the function. A decision
/// point is a place in the code where the program has a choice of two
/// or more paths to follow.
///
/// ## Why is this bad?
/// Functions with a high complexity are hard to understand and maintain.
@ -147,7 +148,7 @@ pub fn function_is_too_complex(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::get_complexity_number;

View file

@ -1,6 +1,7 @@
use bitflags::bitflags;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::source_code::Locator;

View file

@ -1,6 +1,6 @@
use rustpython_parser::lexer::{LexResult, Tok};
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::fix::Fix;

View file

@ -1,5 +1,5 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::error::ParseError;
use rustpython_parser::ParseError;
use crate::ast::types::Range;
use crate::registry::Diagnostic;

View file

@ -1,9 +1,8 @@
#![allow(dead_code)]
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::Tok;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::Location;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::registry::DiagnosticKind;

View file

@ -1,11 +1,8 @@
use anyhow::{bail, Result};
use libcst_native::{Call, Codegen, CodegenState, Dict, DictElement, Expression};
use rustpython_parser::ast::{Excepthandler, Expr};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use ruff_python::string::strip_quotes_and_prefixes;
use rustpython_parser::ast::{Excepthandler, Expr};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::cst::matchers::{match_expr, match_module};
@ -124,7 +121,7 @@ pub fn remove_exception_handler_assignment(
// End of the token just before the `as` to the semicolon.
let mut prev = None;
for (start, tok, end) in
lexer::make_tokenizer_located(contents, Mode::Module, excepthandler.location).flatten()
lexer::lex_located(contents, Mode::Module, excepthandler.location).flatten()
{
if matches!(tok, Tok::As) {
fix_start = prev;

View file

@ -1,11 +1,8 @@
use itertools::Itertools;
use log::error;
use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers::contains_effect;
use crate::ast::types::{BindingKind, Range, RefEquality, ScopeKind};
@ -21,8 +18,8 @@ define_violation!(
/// Checks for the presence of unused variables in function scopes.
///
/// ## Why is this bad?
/// A variable that is defined but not used is likely a mistake, and should be
/// removed to avoid confusion.
/// A variable that is defined but not used is likely a mistake, and should
/// be removed to avoid confusion.
///
/// If a variable is intentionally defined-but-not-used, it should be
/// prefixed with an underscore, or some other value that adheres to the
@ -62,8 +59,8 @@ impl AlwaysAutofixableViolation for UnusedVariable {
}
}
/// Return the start and end [`Location`] of the token after the next match of the predicate,
/// skipping over any bracketed expressions.
/// Return the start and end [`Location`] of the token after the next match of
/// the predicate, skipping over any bracketed expressions.
fn match_token_after<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
where
F: Fn(Tok) -> bool,
@ -76,7 +73,7 @@ where
let mut brace_count = 0;
for ((_, tok, _), (start, _, end)) in
lexer::make_tokenizer_located(contents, Mode::Module, located.location)
lexer::lex_located(contents, Mode::Module, located.location)
.flatten()
.tuple_windows()
{
@ -125,8 +122,8 @@ where
unreachable!("No token after matched");
}
/// Return the start and end [`Location`] of the token matching the predicate, skipping over
/// any bracketed expressions.
/// Return the start and end [`Location`] of the token matching the predicate,
/// skipping over any bracketed expressions.
fn match_token<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
where
F: Fn(Tok) -> bool,
@ -138,8 +135,7 @@ where
let mut sqb_count = 0;
let mut brace_count = 0;
for (start, tok, end) in
lexer::make_tokenizer_located(contents, Mode::Module, located.location).flatten()
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, located.location).flatten()
{
match tok {
Tok::Lpar => {

View file

@ -1,13 +1,10 @@
use std::str::FromStr;
use ruff_macros::{define_violation, derive_message_formats};
use rustc_hash::FxHashMap;
use rustpython_common::cformat::{CFormatPart, CFormatSpec, CFormatStrOrBytes, CFormatString};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location, Operator};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::checkers::ast::Checker;
@ -248,9 +245,7 @@ pub fn bad_string_format_type(checker: &mut Checker, expr: &Expr, right: &Expr)
// Grab each string segment (in case there's an implicit concatenation).
let content = checker.locator.slice(&Range::from_located(expr));
let mut strings: Vec<(Location, Location)> = vec![];
for (start, tok, end) in
lexer::make_tokenizer_located(content, Mode::Module, expr.location).flatten()
{
for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() {
if matches!(tok, Tok::String { .. }) {
strings.push((start, end));
} else if matches!(tok, Tok::Percent) {

View file

@ -120,7 +120,7 @@ pub fn too_many_branches(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::num_branches;

View file

@ -55,7 +55,7 @@ pub fn too_many_return_statements(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::num_returns;

View file

@ -123,7 +123,7 @@ pub fn too_many_statements(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::num_statements;

View file

@ -4,9 +4,7 @@ use libcst_native::{
SmallStatement, Statement, Suite,
};
use rustpython_parser::ast::{Expr, Keyword, Location};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::autofix::helpers::remove_argument;
@ -111,7 +109,7 @@ pub fn remove_import_members(contents: &str, members: &[&str]) -> String {
// Find all Tok::Name tokens that are not preceded by Tok::As, and all
// Tok::Comma tokens.
let mut prev_tok = None;
for (start, tok, end) in lexer::make_tokenizer(contents, Mode::Module)
for (start, tok, end) in lexer::lex(contents, Mode::Module)
.flatten()
.skip_while(|(_, tok, _)| !matches!(tok, Tok::Import))
{

View file

@ -1,5 +1,6 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::fix::Fix;

View file

@ -1,13 +1,10 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustc_hash::FxHashMap;
use rustpython_common::format::{
FieldName, FieldNamePart, FieldType, FormatPart, FormatString, FromTemplate,
};
use rustpython_parser::ast::{Constant, Expr, ExprKind, KeywordData};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::checkers::ast::Checker;
@ -131,7 +128,7 @@ fn try_convert_to_f_string(checker: &Checker, expr: &Expr) -> Option<String> {
let contents = checker.locator.slice(&Range::from_located(value));
// Tokenize: we need to avoid trying to fix implicit string concatenations.
if lexer::make_tokenizer(contents, Mode::Module)
if lexer::lex(contents, Mode::Module)
.flatten()
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
.count()

View file

@ -2,9 +2,7 @@ use std::fmt;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use serde::{Deserialize, Serialize};
use crate::ast::types::Range;
@ -119,7 +117,7 @@ pub fn native_literals(
// safely remove the outer call in this situation. We're following pyupgrade
// here and skip.
let arg_code = checker.locator.slice(&Range::from_located(arg));
if lexer::make_tokenizer_located(arg_code, Mode::Module, arg.location)
if lexer::lex_located(arg_code, Mode::Module, arg.location)
.flatten()
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
.count()

View file

@ -2,12 +2,9 @@ use std::cmp::Ordering;
use log::error;
use num_bigint::{BigInt, Sign};
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Location, Stmt};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Location, Stmt};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::{Range, RefEquality};
use crate::ast::whitespace::indentation;
@ -69,7 +66,7 @@ fn metadata<T>(locator: &Locator, located: &Located<T>) -> Option<BlockMetadata>
let mut else_ = None;
for (start, tok, _) in
lexer::make_tokenizer_located(text, Mode::Module, Location::new(located.location.row(), 0))
lexer::lex_located(text, Mode::Module, Location::new(located.location.row(), 0))
.flatten()
.filter(|(_, tok, _)| {
!matches!(

View file

@ -1,16 +1,13 @@
use std::str::FromStr;
use rustpython_common::cformat::{
CConversionFlags, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatString,
};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use ruff_macros::{define_violation, derive_message_formats};
use ruff_python::identifiers::is_identifier;
use ruff_python::keyword::KWLIST;
use rustpython_common::cformat::{
CConversionFlags, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatString,
};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::ast::whitespace::indentation;
@ -321,7 +318,7 @@ pub(crate) fn printf_string_formatting(
// Grab each string segment (in case there's an implicit concatenation).
let mut strings: Vec<(Location, Location)> = vec![];
let mut extension = None;
for (start, tok, end) in lexer::make_tokenizer_located(
for (start, tok, end) in lexer::lex_located(
checker.locator.slice(&Range::from_located(expr)),
Mode::Module,
expr.location,

View file

@ -4,9 +4,7 @@ use anyhow::{anyhow, Result};
use log::error;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Location};
use rustpython_parser::lexer;
use rustpython_parser::mode::Mode;
use rustpython_parser::token::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers::find_keyword;
use crate::ast::types::Range;
@ -143,9 +141,7 @@ fn create_remove_param_fix(locator: &Locator, expr: &Expr, mode_param: &Expr) ->
let mut fix_end: Option<Location> = None;
let mut is_first_arg: bool = false;
let mut delete_first_arg: bool = false;
for (start, tok, end) in
lexer::make_tokenizer_located(content, Mode::Module, expr.location).flatten()
{
for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() {
if start == mode_param.location {
if is_first_arg {
delete_first_arg = true;

View file

@ -1,13 +1,12 @@
use rustpython_parser as parser;
use rustpython_parser::ast::{Mod, Suite};
use rustpython_parser::error::ParseError;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, parser};
use rustpython_parser::{lexer, Mode, ParseError};
/// Collect tokens up to and including the first error.
pub fn tokenize(contents: &str) -> Vec<LexResult> {
let mut tokens: Vec<LexResult> = vec![];
for tok in lexer::make_tokenizer(contents, Mode::Module) {
for tok in lexer::lex(contents, Mode::Module) {
let is_err = tok.is_err();
tokens.push(tok);
if is_err {

View file

@ -1258,7 +1258,7 @@ impl<'a> Generator<'a> {
#[cfg(test)]
mod tests {
use rustpython_parser::parser;
use rustpython_parser as parser;
use crate::source_code::stylist::{Indentation, LineEnding, Quote};
use crate::source_code::Generator;

View file

@ -2,7 +2,8 @@
//! are omitted from the AST (e.g., commented lines).
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
pub struct Indexer {
commented_lines: Vec<usize>,
@ -49,16 +50,15 @@ impl From<&[LexResult]> for Indexer {
#[cfg(test)]
mod tests {
use rustpython_parser::lexer;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode};
use crate::source_code::Indexer;
#[test]
fn continuation() {
let contents = r#"x = 1"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
@ -70,7 +70,7 @@ x = 1
y = 2
"#
.trim();
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
@ -90,7 +90,7 @@ if True:
)
"#
.trim();
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), [1, 5, 6, 11]);
@ -110,7 +110,7 @@ x = 1; \
import os
"#
.trim();
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), [9, 12]);
}

View file

@ -6,8 +6,8 @@ mod stylist;
pub(crate) use generator::Generator;
pub(crate) use indexer::Indexer;
pub(crate) use locator::Locator;
use rustpython_parser::error::ParseError;
use rustpython_parser::parser;
use rustpython_parser as parser;
use rustpython_parser::ParseError;
pub(crate) use stylist::{LineEnding, Stylist};
/// Run round-trip source code generation on a given Python code.

View file

@ -5,9 +5,7 @@ use std::ops::Deref;
use once_cell::unsync::OnceCell;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::rules::pydocstyle::helpers::leading_quote;
@ -166,7 +164,7 @@ impl Deref for LineEnding {
/// Detect the indentation style of the given tokens.
fn detect_indentation(contents: &str, locator: &Locator) -> Option<Indentation> {
for (_start, tok, end) in lexer::make_tokenizer(contents, Mode::Module).flatten() {
for (_start, tok, end) in lexer::lex(contents, Mode::Module).flatten() {
if let Tok::Indent { .. } = tok {
let start = Location::new(end.row(), 0);
let whitespace = locator.slice(&Range::new(start, end));
@ -178,7 +176,7 @@ fn detect_indentation(contents: &str, locator: &Locator) -> Option<Indentation>
/// Detect the quotation style of the given tokens.
fn detect_quote(contents: &str, locator: &Locator) -> Option<Quote> {
for (start, tok, end) in lexer::make_tokenizer(contents, Mode::Module).flatten() {
for (start, tok, end) in lexer::lex(contents, Mode::Module).flatten() {
if let Tok::String { .. } = tok {
let content = locator.slice(&Range::new(start, end));
if let Some(pattern) = leading_quote(content) {

View file

@ -5,7 +5,7 @@ use std::fs;
use std::path::PathBuf;
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
#[derive(clap::Args)]
pub struct Args {

View file

@ -5,8 +5,7 @@ use std::fs;
use std::path::PathBuf;
use anyhow::Result;
use rustpython_parser::lexer;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, Mode};
#[derive(clap::Args)]
pub struct Args {
@ -17,7 +16,7 @@ pub struct Args {
pub fn main(args: &Args) -> Result<()> {
let contents = fs::read_to_string(&args.file)?;
for (_, tok, _) in lexer::make_tokenizer(&contents, Mode::Module).flatten() {
for (_, tok, _) in lexer::lex(&contents, Mode::Module).flatten() {
println!("{tok:#?}");
}
Ok(())

View file

@ -1,13 +1,12 @@
use rustpython_parser as parser;
use rustpython_parser::ast::{Mod, Suite};
use rustpython_parser::error::ParseError;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, parser};
use rustpython_parser::{lexer, Mode, ParseError};
/// Collect tokens up to and including the first error.
pub fn tokenize(contents: &str) -> Vec<LexResult> {
let mut tokens: Vec<LexResult> = vec![];
for tok in lexer::make_tokenizer(contents, Mode::Module) {
for tok in lexer::lex(contents, Mode::Module) {
let is_err = tok.is_err();
tokens.push(tok);
if is_err {

View file

@ -1,6 +1,7 @@
use rustc_hash::FxHashMap;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::core::types::Range;
use crate::cst::{Alias, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Stmt, StmtKind};
@ -45,7 +46,8 @@ pub struct TriviaToken {
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum TriviaKind {
/// A Comment that is separated by at least one line break from the preceding token.
/// A Comment that is separated by at least one line break from the
/// preceding token.
///
/// # Examples
///

View file

@ -1,4 +0,0 @@
edition = "2021"
max_width = 100
reorder_imports = true
reorder_modules = true