mirror of
https://github.com/astral-sh/ruff.git
synced 2025-08-03 02:13:08 +00:00
Upgrade RustPython to match new flattened exports (#3141)
This commit is contained in:
parent
ba61bb6a6c
commit
2f9de335db
49 changed files with 196 additions and 236 deletions
8
Cargo.lock
generated
8
Cargo.lock
generated
|
@ -2150,7 +2150,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustpython-ast"
|
name = "rustpython-ast"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
|
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"num-bigint",
|
"num-bigint",
|
||||||
"rustpython-compiler-core",
|
"rustpython-compiler-core",
|
||||||
|
@ -2159,7 +2159,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustpython-common"
|
name = "rustpython-common"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
|
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ascii",
|
"ascii",
|
||||||
"bitflags",
|
"bitflags",
|
||||||
|
@ -2184,7 +2184,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustpython-compiler-core"
|
name = "rustpython-compiler-core"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
|
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"bitflags",
|
"bitflags",
|
||||||
|
@ -2201,7 +2201,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustpython-parser"
|
name = "rustpython-parser"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d"
|
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash",
|
"ahash",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
|
|
@ -13,8 +13,8 @@ libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "f2f0b7a487a87
|
||||||
once_cell = { version = "1.16.0" }
|
once_cell = { version = "1.16.0" }
|
||||||
regex = { version = "1.6.0" }
|
regex = { version = "1.6.0" }
|
||||||
rustc-hash = { version = "1.1.0" }
|
rustc-hash = { version = "1.1.0" }
|
||||||
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "6d71f758170d504817cc47720762c41d9031506d" }
|
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" }
|
||||||
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "6d71f758170d504817cc47720762c41d9031506d" }
|
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" }
|
||||||
schemars = { version = "0.8.11" }
|
schemars = { version = "0.8.11" }
|
||||||
serde = { version = "1.0.147", features = ["derive"] }
|
serde = { version = "1.0.147", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.87" }
|
serde_json = { version = "1.0.87" }
|
||||||
|
|
|
@ -289,3 +289,7 @@ def x(y):
|
||||||
return 1
|
return 1
|
||||||
case 1:
|
case 1:
|
||||||
print() # error
|
print() # error
|
||||||
|
|
||||||
|
|
||||||
|
def foo(baz: str) -> str:
|
||||||
|
return baz
|
||||||
|
|
|
@ -9,10 +9,7 @@ use rustpython_parser::ast::{
|
||||||
Arguments, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Keyword, KeywordData,
|
Arguments, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Keyword, KeywordData,
|
||||||
Located, Location, MatchCase, Pattern, PatternKind, Stmt, StmtKind,
|
Located, Location, MatchCase, Pattern, PatternKind, Stmt, StmtKind,
|
||||||
};
|
};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, StringKind, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
use rustpython_parser::token::StringKind;
|
|
||||||
use smallvec::{smallvec, SmallVec};
|
use smallvec::{smallvec, SmallVec};
|
||||||
|
|
||||||
use crate::ast::types::{Binding, BindingKind, CallPath, Range};
|
use crate::ast::types::{Binding, BindingKind, CallPath, Range};
|
||||||
|
@ -656,7 +653,7 @@ pub fn has_comments<T>(located: &Located<T>, locator: &Locator) -> bool {
|
||||||
|
|
||||||
/// Returns `true` if a [`Range`] includes at least one comment.
|
/// Returns `true` if a [`Range`] includes at least one comment.
|
||||||
pub fn has_comments_in(range: Range, locator: &Locator) -> bool {
|
pub fn has_comments_in(range: Range, locator: &Locator) -> bool {
|
||||||
for tok in lexer::make_tokenizer_located(locator.slice(&range), Mode::Module, range.location) {
|
for tok in lexer::lex_located(locator.slice(&range), Mode::Module, range.location) {
|
||||||
match tok {
|
match tok {
|
||||||
Ok((_, tok, _)) => {
|
Ok((_, tok, _)) => {
|
||||||
if matches!(tok, Tok::Comment(..)) {
|
if matches!(tok, Tok::Comment(..)) {
|
||||||
|
@ -871,8 +868,7 @@ pub fn match_parens(start: Location, locator: &Locator) -> Option<Range> {
|
||||||
let mut fix_start = None;
|
let mut fix_start = None;
|
||||||
let mut fix_end = None;
|
let mut fix_end = None;
|
||||||
let mut count: usize = 0;
|
let mut count: usize = 0;
|
||||||
for (start, tok, end) in lexer::make_tokenizer_located(contents, Mode::Module, start).flatten()
|
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, start).flatten() {
|
||||||
{
|
|
||||||
if matches!(tok, Tok::Lpar) {
|
if matches!(tok, Tok::Lpar) {
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
fix_start = Some(start);
|
fix_start = Some(start);
|
||||||
|
@ -904,8 +900,7 @@ pub fn identifier_range(stmt: &Stmt, locator: &Locator) -> Range {
|
||||||
| StmtKind::AsyncFunctionDef { .. }
|
| StmtKind::AsyncFunctionDef { .. }
|
||||||
) {
|
) {
|
||||||
let contents = locator.slice(&Range::from_located(stmt));
|
let contents = locator.slice(&Range::from_located(stmt));
|
||||||
for (start, tok, end) in
|
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten()
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, stmt.location).flatten()
|
|
||||||
{
|
{
|
||||||
if matches!(tok, Tok::Name { .. }) {
|
if matches!(tok, Tok::Name { .. }) {
|
||||||
return Range::new(start, end);
|
return Range::new(start, end);
|
||||||
|
@ -937,7 +932,7 @@ pub fn find_names<'a, T, U>(
|
||||||
locator: &'a Locator,
|
locator: &'a Locator,
|
||||||
) -> impl Iterator<Item = Range> + 'a {
|
) -> impl Iterator<Item = Range> + 'a {
|
||||||
let contents = locator.slice(&Range::from_located(located));
|
let contents = locator.slice(&Range::from_located(located));
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, located.location)
|
lexer::lex_located(contents, Mode::Module, located.location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|(_, tok, _)| matches!(tok, Tok::Name { .. }))
|
.filter(|(_, tok, _)| matches!(tok, Tok::Name { .. }))
|
||||||
.map(|(start, _, end)| Range {
|
.map(|(start, _, end)| Range {
|
||||||
|
@ -955,7 +950,7 @@ pub fn excepthandler_name_range(handler: &Excepthandler, locator: &Locator) -> O
|
||||||
(Some(_), Some(type_)) => {
|
(Some(_), Some(type_)) => {
|
||||||
let type_end_location = type_.end_location.unwrap();
|
let type_end_location = type_.end_location.unwrap();
|
||||||
let contents = locator.slice(&Range::new(type_end_location, body[0].location));
|
let contents = locator.slice(&Range::new(type_end_location, body[0].location));
|
||||||
let range = lexer::make_tokenizer_located(contents, Mode::Module, type_end_location)
|
let range = lexer::lex_located(contents, Mode::Module, type_end_location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.tuple_windows()
|
.tuple_windows()
|
||||||
.find(|(tok, next_tok)| {
|
.find(|(tok, next_tok)| {
|
||||||
|
@ -982,7 +977,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
|
||||||
location: handler.location,
|
location: handler.location,
|
||||||
end_location: end,
|
end_location: end,
|
||||||
});
|
});
|
||||||
let range = lexer::make_tokenizer_located(contents, Mode::Module, handler.location)
|
let range = lexer::lex_located(contents, Mode::Module, handler.location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.find(|(_, kind, _)| matches!(kind, Tok::Except { .. }))
|
.find(|(_, kind, _)| matches!(kind, Tok::Except { .. }))
|
||||||
.map(|(location, _, end_location)| Range {
|
.map(|(location, _, end_location)| Range {
|
||||||
|
@ -996,7 +991,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
|
||||||
/// Find f-strings that don't contain any formatted values in a `JoinedStr`.
|
/// Find f-strings that don't contain any formatted values in a `JoinedStr`.
|
||||||
pub fn find_useless_f_strings(expr: &Expr, locator: &Locator) -> Vec<(Range, Range)> {
|
pub fn find_useless_f_strings(expr: &Expr, locator: &Locator) -> Vec<(Range, Range)> {
|
||||||
let contents = locator.slice(&Range::from_located(expr));
|
let contents = locator.slice(&Range::from_located(expr));
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, expr.location)
|
lexer::lex_located(contents, Mode::Module, expr.location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter_map(|(location, tok, end_location)| match tok {
|
.filter_map(|(location, tok, end_location)| match tok {
|
||||||
Tok::String {
|
Tok::String {
|
||||||
|
@ -1050,7 +1045,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
|
||||||
.expect("Expected orelse to be non-empty")
|
.expect("Expected orelse to be non-empty")
|
||||||
.location,
|
.location,
|
||||||
});
|
});
|
||||||
let range = lexer::make_tokenizer_located(contents, Mode::Module, body_end)
|
let range = lexer::lex_located(contents, Mode::Module, body_end)
|
||||||
.flatten()
|
.flatten()
|
||||||
.find(|(_, kind, _)| matches!(kind, Tok::Else))
|
.find(|(_, kind, _)| matches!(kind, Tok::Else))
|
||||||
.map(|(location, _, end_location)| Range {
|
.map(|(location, _, end_location)| Range {
|
||||||
|
@ -1066,7 +1061,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
|
||||||
/// Return the `Range` of the first `Tok::Colon` token in a `Range`.
|
/// Return the `Range` of the first `Tok::Colon` token in a `Range`.
|
||||||
pub fn first_colon_range(range: Range, locator: &Locator) -> Option<Range> {
|
pub fn first_colon_range(range: Range, locator: &Locator) -> Option<Range> {
|
||||||
let contents = locator.slice(&range);
|
let contents = locator.slice(&range);
|
||||||
let range = lexer::make_tokenizer_located(contents, Mode::Module, range.location)
|
let range = lexer::lex_located(contents, Mode::Module, range.location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.find(|(_, kind, _)| matches!(kind, Tok::Colon))
|
.find(|(_, kind, _)| matches!(kind, Tok::Colon))
|
||||||
.map(|(location, _, end_location)| Range {
|
.map(|(location, _, end_location)| Range {
|
||||||
|
@ -1096,7 +1091,7 @@ pub fn elif_else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
|
||||||
_ => return None,
|
_ => return None,
|
||||||
};
|
};
|
||||||
let contents = locator.slice(&Range::new(start, end));
|
let contents = locator.slice(&Range::new(start, end));
|
||||||
let range = lexer::make_tokenizer_located(contents, Mode::Module, start)
|
let range = lexer::lex_located(contents, Mode::Module, start)
|
||||||
.flatten()
|
.flatten()
|
||||||
.find(|(_, kind, _)| matches!(kind, Tok::Elif | Tok::Else))
|
.find(|(_, kind, _)| matches!(kind, Tok::Elif | Tok::Else))
|
||||||
.map(|(location, _, end_location)| Range {
|
.map(|(location, _, end_location)| Range {
|
||||||
|
@ -1212,8 +1207,8 @@ pub fn is_logger_candidate(func: &Expr) -> bool {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::parser;
|
|
||||||
|
|
||||||
use crate::ast::helpers::{
|
use crate::ast::helpers::{
|
||||||
elif_else_range, else_range, first_colon_range, identifier_range, match_trailing_content,
|
elif_else_range, else_range, first_colon_range, identifier_range, match_trailing_content,
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Stmt, StmtKind};
|
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Stmt, StmtKind};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use crate::ast::helpers::any_over_expr;
|
use crate::ast::helpers::any_over_expr;
|
||||||
use crate::ast::types::{BindingKind, Scope};
|
use crate::ast::types::{BindingKind, Scope};
|
||||||
|
@ -285,9 +283,7 @@ pub type LocatedCmpop<U = ()> = Located<Cmpop, U>;
|
||||||
/// `CPython` doesn't either. This method iterates over the token stream and
|
/// `CPython` doesn't either. This method iterates over the token stream and
|
||||||
/// re-identifies [`Cmpop`] nodes, annotating them with valid ranges.
|
/// re-identifies [`Cmpop`] nodes, annotating them with valid ranges.
|
||||||
pub fn locate_cmpops(contents: &str) -> Vec<LocatedCmpop> {
|
pub fn locate_cmpops(contents: &str) -> Vec<LocatedCmpop> {
|
||||||
let mut tok_iter = lexer::make_tokenizer(contents, Mode::Module)
|
let mut tok_iter = lexer::lex(contents, Mode::Module).flatten().peekable();
|
||||||
.flatten()
|
|
||||||
.peekable();
|
|
||||||
let mut ops: Vec<LocatedCmpop> = vec![];
|
let mut ops: Vec<LocatedCmpop> = vec![];
|
||||||
let mut count: usize = 0;
|
let mut count: usize = 0;
|
||||||
loop {
|
loop {
|
||||||
|
|
|
@ -4,9 +4,7 @@ use libcst_native::{
|
||||||
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
|
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
|
||||||
};
|
};
|
||||||
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
|
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use crate::ast::helpers;
|
use crate::ast::helpers;
|
||||||
use crate::ast::helpers::to_absolute;
|
use crate::ast::helpers::to_absolute;
|
||||||
|
@ -372,9 +370,7 @@ pub fn remove_argument(
|
||||||
if n_arguments == 1 {
|
if n_arguments == 1 {
|
||||||
// Case 1: there is only one argument.
|
// Case 1: there is only one argument.
|
||||||
let mut count: usize = 0;
|
let mut count: usize = 0;
|
||||||
for (start, tok, end) in
|
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten()
|
|
||||||
{
|
|
||||||
if matches!(tok, Tok::Lpar) {
|
if matches!(tok, Tok::Lpar) {
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
fix_start = Some(if remove_parentheses {
|
fix_start = Some(if remove_parentheses {
|
||||||
|
@ -406,9 +402,7 @@ pub fn remove_argument(
|
||||||
{
|
{
|
||||||
// Case 2: argument or keyword is _not_ the last node.
|
// Case 2: argument or keyword is _not_ the last node.
|
||||||
let mut seen_comma = false;
|
let mut seen_comma = false;
|
||||||
for (start, tok, end) in
|
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten()
|
|
||||||
{
|
|
||||||
if seen_comma {
|
if seen_comma {
|
||||||
if matches!(tok, Tok::NonLogicalNewline) {
|
if matches!(tok, Tok::NonLogicalNewline) {
|
||||||
// Also delete any non-logical newlines after the comma.
|
// Also delete any non-logical newlines after the comma.
|
||||||
|
@ -431,9 +425,7 @@ pub fn remove_argument(
|
||||||
} else {
|
} else {
|
||||||
// Case 3: argument or keyword is the last node, so we have to find the last
|
// Case 3: argument or keyword is the last node, so we have to find the last
|
||||||
// comma in the stmt.
|
// comma in the stmt.
|
||||||
for (start, tok, _) in
|
for (start, tok, _) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten()
|
|
||||||
{
|
|
||||||
if start == expr_at {
|
if start == expr_at {
|
||||||
fix_end = Some(expr_end);
|
fix_end = Some(expr_end);
|
||||||
break;
|
break;
|
||||||
|
@ -455,8 +447,8 @@ pub fn remove_argument(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::parser;
|
|
||||||
|
|
||||||
use crate::autofix::helpers::{next_stmt_break, trailing_semicolon};
|
use crate::autofix::helpers::{next_stmt_break, trailing_semicolon};
|
||||||
use crate::source_code::Locator;
|
use crate::source_code::Locator;
|
||||||
|
|
|
@ -6,19 +6,18 @@ use std::path::Path;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use log::error;
|
use log::error;
|
||||||
use nohash_hasher::IntMap;
|
use nohash_hasher::IntMap;
|
||||||
|
use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS};
|
||||||
|
use ruff_python::typing::TYPING_EXTENSIONS;
|
||||||
use rustc_hash::{FxHashMap, FxHashSet};
|
use rustc_hash::{FxHashMap, FxHashSet};
|
||||||
use rustpython_common::cformat::{CFormatError, CFormatErrorType};
|
use rustpython_common::cformat::{CFormatError, CFormatErrorType};
|
||||||
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::ast::{
|
use rustpython_parser::ast::{
|
||||||
Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext,
|
Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext,
|
||||||
ExprKind, KeywordData, Located, Location, Operator, Pattern, PatternKind, Stmt, StmtKind,
|
ExprKind, KeywordData, Located, Location, Operator, Pattern, PatternKind, Stmt, StmtKind,
|
||||||
Suite,
|
Suite,
|
||||||
};
|
};
|
||||||
use rustpython_parser::parser;
|
|
||||||
use smallvec::smallvec;
|
use smallvec::smallvec;
|
||||||
|
|
||||||
use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS};
|
|
||||||
use ruff_python::typing::TYPING_EXTENSIONS;
|
|
||||||
|
|
||||||
use crate::ast::helpers::{
|
use crate::ast::helpers::{
|
||||||
binding_range, collect_call_path, extract_handler_names, from_relative_import, to_module_path,
|
binding_range, collect_call_path, extract_handler_names, from_relative_import, to_module_path,
|
||||||
};
|
};
|
||||||
|
@ -2060,8 +2059,8 @@ where
|
||||||
value,
|
value,
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
// If we're in a class or module scope, then the annotation needs to be available
|
// If we're in a class or module scope, then the annotation needs to be
|
||||||
// at runtime.
|
// available at runtime.
|
||||||
// See: https://docs.python.org/3/reference/simple_stmts.html#annotated-assignment-statements
|
// See: https://docs.python.org/3/reference/simple_stmts.html#annotated-assignment-statements
|
||||||
if !self.annotations_future_enabled
|
if !self.annotations_future_enabled
|
||||||
&& matches!(
|
&& matches!(
|
||||||
|
|
|
@ -152,9 +152,8 @@ pub fn check_logical_lines(
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use rustpython_parser::lexer;
|
|
||||||
use rustpython_parser::lexer::LexResult;
|
use rustpython_parser::lexer::LexResult;
|
||||||
use rustpython_parser::mode::Mode;
|
use rustpython_parser::{lexer, Mode};
|
||||||
|
|
||||||
use crate::checkers::logical_lines::iter_logical_lines;
|
use crate::checkers::logical_lines::iter_logical_lines;
|
||||||
use crate::source_code::Locator;
|
use crate::source_code::Locator;
|
||||||
|
@ -165,7 +164,7 @@ mod tests {
|
||||||
x = 1
|
x = 1
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1"#;
|
z = x + 1"#;
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let locator = Locator::new(contents);
|
let locator = Locator::new(contents);
|
||||||
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -186,7 +185,7 @@ x = [
|
||||||
]
|
]
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1"#;
|
z = x + 1"#;
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let locator = Locator::new(contents);
|
let locator = Locator::new(contents);
|
||||||
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -200,7 +199,7 @@ z = x + 1"#;
|
||||||
assert_eq!(actual, expected);
|
assert_eq!(actual, expected);
|
||||||
|
|
||||||
let contents = "x = 'abc'";
|
let contents = "x = 'abc'";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let locator = Locator::new(contents);
|
let locator = Locator::new(contents);
|
||||||
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -213,7 +212,7 @@ z = x + 1"#;
|
||||||
def f():
|
def f():
|
||||||
x = 1
|
x = 1
|
||||||
f()"#;
|
f()"#;
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let locator = Locator::new(contents);
|
let locator = Locator::new(contents);
|
||||||
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -228,7 +227,7 @@ def f():
|
||||||
# Comment goes here.
|
# Comment goes here.
|
||||||
x = 1
|
x = 1
|
||||||
f()"#;
|
f()"#;
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let locator = Locator::new(contents);
|
let locator = Locator::new(contents);
|
||||||
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
//! Lint rules based on token traversal.
|
//! Lint rules based on token traversal.
|
||||||
|
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::lex::docstring_detection::StateMachine;
|
use crate::lex::docstring_detection::StateMachine;
|
||||||
use crate::registry::{Diagnostic, Rule};
|
use crate::registry::{Diagnostic, Rule};
|
||||||
|
|
|
@ -3,7 +3,8 @@
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
use nohash_hasher::{IntMap, IntSet};
|
use nohash_hasher::{IntMap, IntSet};
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::registry::LintSource;
|
use crate::registry::LintSource;
|
||||||
use crate::settings::Settings;
|
use crate::settings::Settings;
|
||||||
|
@ -150,15 +151,14 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use nohash_hasher::{IntMap, IntSet};
|
use nohash_hasher::{IntMap, IntSet};
|
||||||
use rustpython_parser::lexer;
|
|
||||||
use rustpython_parser::lexer::LexResult;
|
use rustpython_parser::lexer::LexResult;
|
||||||
use rustpython_parser::mode::Mode;
|
use rustpython_parser::{lexer, Mode};
|
||||||
|
|
||||||
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
|
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn noqa_extraction() {
|
fn noqa_extraction() {
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
"x = 1
|
"x = 1
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1",
|
z = x + 1",
|
||||||
|
@ -167,7 +167,7 @@ z = x + 1",
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
"
|
"
|
||||||
x = 1
|
x = 1
|
||||||
y = 2
|
y = 2
|
||||||
|
@ -177,7 +177,7 @@ z = x + 1",
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
"x = 1
|
"x = 1
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1
|
z = x + 1
|
||||||
|
@ -187,7 +187,7 @@ z = x + 1
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
"x = 1
|
"x = 1
|
||||||
|
|
||||||
y = 2
|
y = 2
|
||||||
|
@ -198,7 +198,7 @@ z = x + 1
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
"x = '''abc
|
"x = '''abc
|
||||||
def
|
def
|
||||||
ghi
|
ghi
|
||||||
|
@ -213,7 +213,7 @@ z = x + 1",
|
||||||
IntMap::from_iter([(1, 4), (2, 4), (3, 4)])
|
IntMap::from_iter([(1, 4), (2, 4), (3, 4)])
|
||||||
);
|
);
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
"x = 1
|
"x = 1
|
||||||
y = '''abc
|
y = '''abc
|
||||||
def
|
def
|
||||||
|
@ -228,7 +228,7 @@ z = 2",
|
||||||
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
|
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
|
||||||
);
|
);
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
"x = 1
|
"x = 1
|
||||||
y = '''abc
|
y = '''abc
|
||||||
def
|
def
|
||||||
|
@ -242,7 +242,7 @@ ghi
|
||||||
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
|
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
|
||||||
);
|
);
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
r#"x = \
|
r#"x = \
|
||||||
1"#,
|
1"#,
|
||||||
Mode::Module,
|
Mode::Module,
|
||||||
|
@ -250,7 +250,7 @@ ghi
|
||||||
.collect();
|
.collect();
|
||||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)]));
|
assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)]));
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
r#"from foo import \
|
r#"from foo import \
|
||||||
bar as baz, \
|
bar as baz, \
|
||||||
qux as quux"#,
|
qux as quux"#,
|
||||||
|
@ -262,7 +262,7 @@ ghi
|
||||||
IntMap::from_iter([(1, 3), (2, 3)])
|
IntMap::from_iter([(1, 3), (2, 3)])
|
||||||
);
|
);
|
||||||
|
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
let lxr: Vec<LexResult> = lexer::lex(
|
||||||
r#"
|
r#"
|
||||||
# Foo
|
# Foo
|
||||||
from foo import \
|
from foo import \
|
||||||
|
@ -286,7 +286,7 @@ y = \
|
||||||
let contents = "x = 1
|
let contents = "x = 1
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
|
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
|
||||||
|
|
||||||
let contents = "# isort: off
|
let contents = "# isort: off
|
||||||
|
@ -294,7 +294,7 @@ x = 1
|
||||||
y = 2
|
y = 2
|
||||||
# isort: on
|
# isort: on
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
extract_isort_directives(&lxr).exclusions,
|
extract_isort_directives(&lxr).exclusions,
|
||||||
IntSet::from_iter([2, 3, 4])
|
IntSet::from_iter([2, 3, 4])
|
||||||
|
@ -307,7 +307,7 @@ y = 2
|
||||||
# isort: on
|
# isort: on
|
||||||
z = x + 1
|
z = x + 1
|
||||||
# isort: on";
|
# isort: on";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
extract_isort_directives(&lxr).exclusions,
|
extract_isort_directives(&lxr).exclusions,
|
||||||
IntSet::from_iter([2, 3, 4, 5])
|
IntSet::from_iter([2, 3, 4, 5])
|
||||||
|
@ -317,7 +317,7 @@ z = x + 1
|
||||||
x = 1
|
x = 1
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
extract_isort_directives(&lxr).exclusions,
|
extract_isort_directives(&lxr).exclusions,
|
||||||
IntSet::from_iter([2, 3, 4])
|
IntSet::from_iter([2, 3, 4])
|
||||||
|
@ -327,7 +327,7 @@ z = x + 1";
|
||||||
x = 1
|
x = 1
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
|
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
|
||||||
|
|
||||||
let contents = "# isort: off
|
let contents = "# isort: off
|
||||||
|
@ -336,7 +336,7 @@ x = 1
|
||||||
y = 2
|
y = 2
|
||||||
# isort: skip_file
|
# isort: skip_file
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
|
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,20 +345,20 @@ z = x + 1";
|
||||||
let contents = "x = 1
|
let contents = "x = 1
|
||||||
y = 2
|
y = 2
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(extract_isort_directives(&lxr).splits, Vec::<usize>::new());
|
assert_eq!(extract_isort_directives(&lxr).splits, Vec::<usize>::new());
|
||||||
|
|
||||||
let contents = "x = 1
|
let contents = "x = 1
|
||||||
y = 2
|
y = 2
|
||||||
# isort: split
|
# isort: split
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(extract_isort_directives(&lxr).splits, vec![3]);
|
assert_eq!(extract_isort_directives(&lxr).splits, vec![3]);
|
||||||
|
|
||||||
let contents = "x = 1
|
let contents = "x = 1
|
||||||
y = 2 # isort: split
|
y = 2 # isort: split
|
||||||
z = x + 1";
|
z = x + 1";
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
assert_eq!(extract_isort_directives(&lxr).splits, vec![2]);
|
assert_eq!(extract_isort_directives(&lxr).splits, vec![2]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
//! Doc line extraction. In this context, a doc line is a line consisting of a
|
//! Doc line extraction. In this context, a doc line is a line consisting of a
|
||||||
//! standalone comment or a constant string statement.
|
//! standalone comment or a constant string statement.
|
||||||
|
|
||||||
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
|
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
|
||||||
use std::iter::FusedIterator;
|
use std::iter::FusedIterator;
|
||||||
|
|
||||||
|
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
|
||||||
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::ast::visitor;
|
use crate::ast::visitor;
|
||||||
use crate::ast::visitor::Visitor;
|
use crate::ast::visitor::Visitor;
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
//!
|
//!
|
||||||
//! TODO(charlie): Consolidate with the existing AST-based docstring extraction.
|
//! TODO(charlie): Consolidate with the existing AST-based docstring extraction.
|
||||||
|
|
||||||
use rustpython_parser::lexer::Tok;
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
enum State {
|
enum State {
|
||||||
|
|
|
@ -5,8 +5,8 @@ use anyhow::{anyhow, Result};
|
||||||
use colored::Colorize;
|
use colored::Colorize;
|
||||||
use log::error;
|
use log::error;
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use rustpython_parser::error::ParseError;
|
|
||||||
use rustpython_parser::lexer::LexResult;
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::ParseError;
|
||||||
|
|
||||||
use crate::autofix::fix_file;
|
use crate::autofix::fix_file;
|
||||||
use crate::checkers::ast::check_ast;
|
use crate::checkers::ast::check_ast;
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
/// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
|
/// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
|
use rustpython_parser as parser;
|
||||||
|
|
||||||
static ALLOWLIST_REGEX: Lazy<Regex> = Lazy::new(|| {
|
static ALLOWLIST_REGEX: Lazy<Regex> = Lazy::new(|| {
|
||||||
Regex::new(
|
Regex::new(
|
||||||
|
@ -77,7 +78,7 @@ pub fn comment_contains_code(line: &str, task_tags: &[String]) -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, compile the source code.
|
// Finally, compile the source code.
|
||||||
rustpython_parser::parser::parse_program(&line, "<filename>").is_ok()
|
parser::parse_program(&line, "<filename>").is_ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if a line is probably part of some multiline code.
|
/// Returns `true` if a line is probably part of some multiline code.
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use rustpython_parser::ast::Stmt;
|
use rustpython_parser::ast::Stmt;
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::fix::Fix;
|
use crate::fix::Fix;
|
||||||
|
@ -17,9 +15,7 @@ pub fn add_return_none_annotation(locator: &Locator, stmt: &Stmt) -> Result<Fix>
|
||||||
let mut seen_lpar = false;
|
let mut seen_lpar = false;
|
||||||
let mut seen_rpar = false;
|
let mut seen_rpar = false;
|
||||||
let mut count: usize = 0;
|
let mut count: usize = 0;
|
||||||
for (start, tok, ..) in
|
for (start, tok, ..) in lexer::lex_located(contents, Mode::Module, range.location).flatten() {
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, range.location).flatten()
|
|
||||||
{
|
|
||||||
if seen_lpar && seen_rpar {
|
if seen_lpar && seen_rpar {
|
||||||
if matches!(tok, Tok::Colon) {
|
if matches!(tok, Tok::Colon) {
|
||||||
return Ok(Fix::insertion(" -> None".to_string(), start));
|
return Ok(Fix::insertion(" -> None".to_string(), start));
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustpython_parser::lexer::{LexResult, Spanned};
|
use rustpython_parser::lexer::{LexResult, Spanned};
|
||||||
use rustpython_parser::token::Tok;
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::fix::Fix;
|
use crate::fix::Fix;
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustpython_parser::ast::{Constant, Expr, ExprKind, Operator};
|
use rustpython_parser::ast::{Constant, Expr, ExprKind, Operator};
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::registry::Diagnostic;
|
use crate::registry::Diagnostic;
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use super::settings::Quote;
|
use super::settings::Quote;
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::source_code::Locator;
|
use crate::source_code::Locator;
|
||||||
|
@ -18,7 +16,7 @@ pub struct Comment<'a> {
|
||||||
/// Collect all comments in an import block.
|
/// Collect all comments in an import block.
|
||||||
pub fn collect_comments<'a>(range: &Range, locator: &'a Locator) -> Vec<Comment<'a>> {
|
pub fn collect_comments<'a>(range: &Range, locator: &'a Locator) -> Vec<Comment<'a>> {
|
||||||
let contents = locator.slice(range);
|
let contents = locator.slice(range);
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, range.location)
|
lexer::lex_located(contents, Mode::Module, range.location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter_map(|(start, tok, end)| {
|
.filter_map(|(start, tok, end)| {
|
||||||
if let Tok::Comment(value) = tok {
|
if let Tok::Comment(value) = tok {
|
||||||
|
|
|
@ -1,23 +1,18 @@
|
||||||
use rustpython_parser::ast::{Location, Stmt};
|
use rustpython_parser::ast::{Location, Stmt};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
|
use super::types::TrailingComma;
|
||||||
use crate::ast::helpers::is_docstring_stmt;
|
use crate::ast::helpers::is_docstring_stmt;
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::source_code::Locator;
|
use crate::source_code::Locator;
|
||||||
|
|
||||||
use super::types::TrailingComma;
|
|
||||||
|
|
||||||
/// Return `true` if a `StmtKind::ImportFrom` statement ends with a magic
|
/// Return `true` if a `StmtKind::ImportFrom` statement ends with a magic
|
||||||
/// trailing comma.
|
/// trailing comma.
|
||||||
pub fn trailing_comma(stmt: &Stmt, locator: &Locator) -> TrailingComma {
|
pub fn trailing_comma(stmt: &Stmt, locator: &Locator) -> TrailingComma {
|
||||||
let contents = locator.slice(&Range::from_located(stmt));
|
let contents = locator.slice(&Range::from_located(stmt));
|
||||||
let mut count: usize = 0;
|
let mut count: usize = 0;
|
||||||
let mut trailing_comma = TrailingComma::Absent;
|
let mut trailing_comma = TrailingComma::Absent;
|
||||||
for (_, tok, _) in
|
for (_, tok, _) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten() {
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, stmt.location).flatten()
|
|
||||||
{
|
|
||||||
if matches!(tok, Tok::Lpar) {
|
if matches!(tok, Tok::Lpar) {
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
|
@ -114,7 +109,7 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location {
|
||||||
|
|
||||||
// Find the first token that isn't a comment or whitespace.
|
// Find the first token that isn't a comment or whitespace.
|
||||||
let contents = locator.skip(splice);
|
let contents = locator.skip(splice);
|
||||||
for (.., tok, end) in lexer::make_tokenizer_located(contents, Mode::Module, splice).flatten() {
|
for (.., tok, end) in lexer::lex_located(contents, Mode::Module, splice).flatten() {
|
||||||
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
|
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
|
||||||
splice = end;
|
splice = end;
|
||||||
} else {
|
} else {
|
||||||
|
@ -128,12 +123,11 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::parser;
|
|
||||||
|
|
||||||
use crate::source_code::Locator;
|
|
||||||
|
|
||||||
use super::find_splice_location;
|
use super::find_splice_location;
|
||||||
|
use crate::source_code::Locator;
|
||||||
|
|
||||||
fn splice_contents(contents: &str) -> Result<Location> {
|
fn splice_contents(contents: &str) -> Result<Location> {
|
||||||
let program = parser::parse_program(contents, "<filename>")?;
|
let program = parser::parse_program(contents, "<filename>")?;
|
||||||
|
|
|
@ -2,6 +2,7 @@ use std::fmt;
|
||||||
|
|
||||||
use log::error;
|
use log::error;
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::ast::{Location, StmtKind, Suite};
|
use rustpython_parser::ast::{Location, StmtKind, Suite};
|
||||||
|
|
||||||
use super::super::helpers;
|
use super::super::helpers;
|
||||||
|
@ -16,13 +17,15 @@ use crate::violation::AlwaysAutofixableViolation;
|
||||||
|
|
||||||
define_violation!(
|
define_violation!(
|
||||||
/// ## What it does
|
/// ## What it does
|
||||||
/// Adds any required imports, as specified by the user, to the top of the file.
|
/// Adds any required imports, as specified by the user, to the top of the
|
||||||
|
/// file.
|
||||||
///
|
///
|
||||||
/// ## Why is this bad?
|
/// ## Why is this bad?
|
||||||
/// In some projects, certain imports are required to be present in all files. For
|
/// In some projects, certain imports are required to be present in all
|
||||||
/// example, some projects assume that `from __future__ import annotations` is enabled,
|
/// files. For example, some projects assume that `from __future__
|
||||||
/// and thus require that import to be present in all files. Omitting a "required" import
|
/// import annotations` is enabled, and thus require that import to be
|
||||||
/// (as specified by the user) can cause errors or unexpected behavior.
|
/// present in all files. Omitting a "required" import (as specified by
|
||||||
|
/// the user) can cause errors or unexpected behavior.
|
||||||
///
|
///
|
||||||
/// ## Example
|
/// ## Example
|
||||||
/// ```python
|
/// ```python
|
||||||
|
@ -210,18 +213,26 @@ pub fn add_required_imports(
|
||||||
.required_imports
|
.required_imports
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|required_import| {
|
.flat_map(|required_import| {
|
||||||
let Ok(body) = rustpython_parser::parser::parse_program(required_import, "<filename>") else {
|
let Ok(body) = parser::parse_program(required_import, "<filename>") else {
|
||||||
error!("Failed to parse required import: `{}`", required_import);
|
error!("Failed to parse required import: `{}`", required_import);
|
||||||
return vec![];
|
return vec![];
|
||||||
};
|
};
|
||||||
if body.is_empty() || body.len() > 1 {
|
if body.is_empty() || body.len() > 1 {
|
||||||
error!("Expected require import to contain a single statement: `{}`", required_import);
|
error!(
|
||||||
|
"Expected require import to contain a single statement: `{}`",
|
||||||
|
required_import
|
||||||
|
);
|
||||||
return vec![];
|
return vec![];
|
||||||
}
|
}
|
||||||
|
|
||||||
match &body[0].node {
|
match &body[0].node {
|
||||||
StmtKind::ImportFrom { module, names, level } => {
|
StmtKind::ImportFrom {
|
||||||
names.iter().filter_map(|name| {
|
module,
|
||||||
|
names,
|
||||||
|
level,
|
||||||
|
} => names
|
||||||
|
.iter()
|
||||||
|
.filter_map(|name| {
|
||||||
add_required_import(
|
add_required_import(
|
||||||
&AnyImport::ImportFrom(ImportFrom {
|
&AnyImport::ImportFrom(ImportFrom {
|
||||||
module: module.as_ref().map(String::as_str),
|
module: module.as_ref().map(String::as_str),
|
||||||
|
@ -238,10 +249,11 @@ pub fn add_required_imports(
|
||||||
settings,
|
settings,
|
||||||
autofix,
|
autofix,
|
||||||
)
|
)
|
||||||
}).collect()
|
})
|
||||||
}
|
.collect(),
|
||||||
StmtKind::Import { names } => {
|
StmtKind::Import { names } => names
|
||||||
names.iter().filter_map(|name| {
|
.iter()
|
||||||
|
.filter_map(|name| {
|
||||||
add_required_import(
|
add_required_import(
|
||||||
&AnyImport::Import(Import {
|
&AnyImport::Import(Import {
|
||||||
name: Alias {
|
name: Alias {
|
||||||
|
@ -256,10 +268,13 @@ pub fn add_required_imports(
|
||||||
settings,
|
settings,
|
||||||
autofix,
|
autofix,
|
||||||
)
|
)
|
||||||
}).collect()
|
})
|
||||||
}
|
.collect(),
|
||||||
_ => {
|
_ => {
|
||||||
error!("Expected required import to be in import-from style: `{}`", required_import);
|
error!(
|
||||||
|
"Expected required import to be in import-from style: `{}`",
|
||||||
|
required_import
|
||||||
|
);
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,10 +10,11 @@ define_violation!(
|
||||||
/// ## What it does
|
/// ## What it does
|
||||||
/// Checks for functions with a high `McCabe` complexity.
|
/// Checks for functions with a high `McCabe` complexity.
|
||||||
///
|
///
|
||||||
/// The `McCabe` complexity of a function is a measure of the complexity of the
|
/// The `McCabe` complexity of a function is a measure of the complexity of
|
||||||
/// control flow graph of the function. It is calculated by adding one to the
|
/// the control flow graph of the function. It is calculated by adding
|
||||||
/// number of decision points in the function. A decision point is a place in
|
/// one to the number of decision points in the function. A decision
|
||||||
/// the code where the program has a choice of two or more paths to follow.
|
/// point is a place in the code where the program has a choice of two
|
||||||
|
/// or more paths to follow.
|
||||||
///
|
///
|
||||||
/// ## Why is this bad?
|
/// ## Why is this bad?
|
||||||
/// Functions with a high complexity are hard to understand and maintain.
|
/// Functions with a high complexity are hard to understand and maintain.
|
||||||
|
@ -147,7 +148,7 @@ pub fn function_is_too_complex(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use rustpython_parser::parser;
|
use rustpython_parser as parser;
|
||||||
|
|
||||||
use super::get_complexity_number;
|
use super::get_complexity_number;
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::source_code::Locator;
|
use crate::source_code::Locator;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::fix::Fix;
|
use crate::fix::Fix;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustpython_parser::error::ParseError;
|
use rustpython_parser::ParseError;
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::registry::Diagnostic;
|
use crate::registry::Diagnostic;
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use rustpython_parser::ast::Location;
|
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
|
use rustpython_parser::ast::Location;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::registry::DiagnosticKind;
|
use crate::registry::DiagnosticKind;
|
||||||
|
|
|
@ -1,11 +1,8 @@
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use libcst_native::{Call, Codegen, CodegenState, Dict, DictElement, Expression};
|
use libcst_native::{Call, Codegen, CodegenState, Dict, DictElement, Expression};
|
||||||
use rustpython_parser::ast::{Excepthandler, Expr};
|
|
||||||
use rustpython_parser::lexer;
|
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use ruff_python::string::strip_quotes_and_prefixes;
|
use ruff_python::string::strip_quotes_and_prefixes;
|
||||||
|
use rustpython_parser::ast::{Excepthandler, Expr};
|
||||||
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::cst::matchers::{match_expr, match_module};
|
use crate::cst::matchers::{match_expr, match_module};
|
||||||
|
@ -124,7 +121,7 @@ pub fn remove_exception_handler_assignment(
|
||||||
// End of the token just before the `as` to the semicolon.
|
// End of the token just before the `as` to the semicolon.
|
||||||
let mut prev = None;
|
let mut prev = None;
|
||||||
for (start, tok, end) in
|
for (start, tok, end) in
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, excepthandler.location).flatten()
|
lexer::lex_located(contents, Mode::Module, excepthandler.location).flatten()
|
||||||
{
|
{
|
||||||
if matches!(tok, Tok::As) {
|
if matches!(tok, Tok::As) {
|
||||||
fix_start = prev;
|
fix_start = prev;
|
||||||
|
|
|
@ -1,11 +1,8 @@
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use log::error;
|
use log::error;
|
||||||
use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind};
|
|
||||||
use rustpython_parser::lexer;
|
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
|
use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind};
|
||||||
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
|
|
||||||
use crate::ast::helpers::contains_effect;
|
use crate::ast::helpers::contains_effect;
|
||||||
use crate::ast::types::{BindingKind, Range, RefEquality, ScopeKind};
|
use crate::ast::types::{BindingKind, Range, RefEquality, ScopeKind};
|
||||||
|
@ -21,8 +18,8 @@ define_violation!(
|
||||||
/// Checks for the presence of unused variables in function scopes.
|
/// Checks for the presence of unused variables in function scopes.
|
||||||
///
|
///
|
||||||
/// ## Why is this bad?
|
/// ## Why is this bad?
|
||||||
/// A variable that is defined but not used is likely a mistake, and should be
|
/// A variable that is defined but not used is likely a mistake, and should
|
||||||
/// removed to avoid confusion.
|
/// be removed to avoid confusion.
|
||||||
///
|
///
|
||||||
/// If a variable is intentionally defined-but-not-used, it should be
|
/// If a variable is intentionally defined-but-not-used, it should be
|
||||||
/// prefixed with an underscore, or some other value that adheres to the
|
/// prefixed with an underscore, or some other value that adheres to the
|
||||||
|
@ -62,8 +59,8 @@ impl AlwaysAutofixableViolation for UnusedVariable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the start and end [`Location`] of the token after the next match of the predicate,
|
/// Return the start and end [`Location`] of the token after the next match of
|
||||||
/// skipping over any bracketed expressions.
|
/// the predicate, skipping over any bracketed expressions.
|
||||||
fn match_token_after<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
|
fn match_token_after<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
|
||||||
where
|
where
|
||||||
F: Fn(Tok) -> bool,
|
F: Fn(Tok) -> bool,
|
||||||
|
@ -76,7 +73,7 @@ where
|
||||||
let mut brace_count = 0;
|
let mut brace_count = 0;
|
||||||
|
|
||||||
for ((_, tok, _), (start, _, end)) in
|
for ((_, tok, _), (start, _, end)) in
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, located.location)
|
lexer::lex_located(contents, Mode::Module, located.location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.tuple_windows()
|
.tuple_windows()
|
||||||
{
|
{
|
||||||
|
@ -125,8 +122,8 @@ where
|
||||||
unreachable!("No token after matched");
|
unreachable!("No token after matched");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the start and end [`Location`] of the token matching the predicate, skipping over
|
/// Return the start and end [`Location`] of the token matching the predicate,
|
||||||
/// any bracketed expressions.
|
/// skipping over any bracketed expressions.
|
||||||
fn match_token<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
|
fn match_token<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
|
||||||
where
|
where
|
||||||
F: Fn(Tok) -> bool,
|
F: Fn(Tok) -> bool,
|
||||||
|
@ -138,8 +135,7 @@ where
|
||||||
let mut sqb_count = 0;
|
let mut sqb_count = 0;
|
||||||
let mut brace_count = 0;
|
let mut brace_count = 0;
|
||||||
|
|
||||||
for (start, tok, end) in
|
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, located.location).flatten()
|
||||||
lexer::make_tokenizer_located(contents, Mode::Module, located.location).flatten()
|
|
||||||
{
|
{
|
||||||
match tok {
|
match tok {
|
||||||
Tok::Lpar => {
|
Tok::Lpar => {
|
||||||
|
|
|
@ -1,13 +1,10 @@
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use rustpython_common::cformat::{CFormatPart, CFormatSpec, CFormatStrOrBytes, CFormatString};
|
use rustpython_common::cformat::{CFormatPart, CFormatSpec, CFormatStrOrBytes, CFormatString};
|
||||||
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location, Operator};
|
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location, Operator};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::checkers::ast::Checker;
|
use crate::checkers::ast::Checker;
|
||||||
|
@ -248,9 +245,7 @@ pub fn bad_string_format_type(checker: &mut Checker, expr: &Expr, right: &Expr)
|
||||||
// Grab each string segment (in case there's an implicit concatenation).
|
// Grab each string segment (in case there's an implicit concatenation).
|
||||||
let content = checker.locator.slice(&Range::from_located(expr));
|
let content = checker.locator.slice(&Range::from_located(expr));
|
||||||
let mut strings: Vec<(Location, Location)> = vec![];
|
let mut strings: Vec<(Location, Location)> = vec![];
|
||||||
for (start, tok, end) in
|
for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() {
|
||||||
lexer::make_tokenizer_located(content, Mode::Module, expr.location).flatten()
|
|
||||||
{
|
|
||||||
if matches!(tok, Tok::String { .. }) {
|
if matches!(tok, Tok::String { .. }) {
|
||||||
strings.push((start, end));
|
strings.push((start, end));
|
||||||
} else if matches!(tok, Tok::Percent) {
|
} else if matches!(tok, Tok::Percent) {
|
||||||
|
|
|
@ -120,7 +120,7 @@ pub fn too_many_branches(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use rustpython_parser::parser;
|
use rustpython_parser as parser;
|
||||||
|
|
||||||
use super::num_branches;
|
use super::num_branches;
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ pub fn too_many_return_statements(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use rustpython_parser::parser;
|
use rustpython_parser as parser;
|
||||||
|
|
||||||
use super::num_returns;
|
use super::num_returns;
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,7 @@ pub fn too_many_statements(
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use rustpython_parser::parser;
|
use rustpython_parser as parser;
|
||||||
|
|
||||||
use super::num_statements;
|
use super::num_statements;
|
||||||
|
|
||||||
|
|
|
@ -4,9 +4,7 @@ use libcst_native::{
|
||||||
SmallStatement, Statement, Suite,
|
SmallStatement, Statement, Suite,
|
||||||
};
|
};
|
||||||
use rustpython_parser::ast::{Expr, Keyword, Location};
|
use rustpython_parser::ast::{Expr, Keyword, Location};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::autofix::helpers::remove_argument;
|
use crate::autofix::helpers::remove_argument;
|
||||||
|
@ -111,7 +109,7 @@ pub fn remove_import_members(contents: &str, members: &[&str]) -> String {
|
||||||
// Find all Tok::Name tokens that are not preceded by Tok::As, and all
|
// Find all Tok::Name tokens that are not preceded by Tok::As, and all
|
||||||
// Tok::Comma tokens.
|
// Tok::Comma tokens.
|
||||||
let mut prev_tok = None;
|
let mut prev_tok = None;
|
||||||
for (start, tok, end) in lexer::make_tokenizer(contents, Mode::Module)
|
for (start, tok, end) in lexer::lex(contents, Mode::Module)
|
||||||
.flatten()
|
.flatten()
|
||||||
.skip_while(|(_, tok, _)| !matches!(tok, Tok::Import))
|
.skip_while(|(_, tok, _)| !matches!(tok, Tok::Import))
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::fix::Fix;
|
use crate::fix::Fix;
|
||||||
|
|
|
@ -1,13 +1,10 @@
|
||||||
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use rustpython_common::format::{
|
use rustpython_common::format::{
|
||||||
FieldName, FieldNamePart, FieldType, FormatPart, FormatString, FromTemplate,
|
FieldName, FieldNamePart, FieldType, FormatPart, FormatString, FromTemplate,
|
||||||
};
|
};
|
||||||
use rustpython_parser::ast::{Constant, Expr, ExprKind, KeywordData};
|
use rustpython_parser::ast::{Constant, Expr, ExprKind, KeywordData};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::checkers::ast::Checker;
|
use crate::checkers::ast::Checker;
|
||||||
|
@ -131,7 +128,7 @@ fn try_convert_to_f_string(checker: &Checker, expr: &Expr) -> Option<String> {
|
||||||
let contents = checker.locator.slice(&Range::from_located(value));
|
let contents = checker.locator.slice(&Range::from_located(value));
|
||||||
|
|
||||||
// Tokenize: we need to avoid trying to fix implicit string concatenations.
|
// Tokenize: we need to avoid trying to fix implicit string concatenations.
|
||||||
if lexer::make_tokenizer(contents, Mode::Module)
|
if lexer::lex(contents, Mode::Module)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
|
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
|
||||||
.count()
|
.count()
|
||||||
|
|
|
@ -2,9 +2,7 @@ use std::fmt;
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
|
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
|
@ -119,7 +117,7 @@ pub fn native_literals(
|
||||||
// safely remove the outer call in this situation. We're following pyupgrade
|
// safely remove the outer call in this situation. We're following pyupgrade
|
||||||
// here and skip.
|
// here and skip.
|
||||||
let arg_code = checker.locator.slice(&Range::from_located(arg));
|
let arg_code = checker.locator.slice(&Range::from_located(arg));
|
||||||
if lexer::make_tokenizer_located(arg_code, Mode::Module, arg.location)
|
if lexer::lex_located(arg_code, Mode::Module, arg.location)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
|
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
|
||||||
.count()
|
.count()
|
||||||
|
|
|
@ -2,12 +2,9 @@ use std::cmp::Ordering;
|
||||||
|
|
||||||
use log::error;
|
use log::error;
|
||||||
use num_bigint::{BigInt, Sign};
|
use num_bigint::{BigInt, Sign};
|
||||||
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Location, Stmt};
|
|
||||||
use rustpython_parser::lexer;
|
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
|
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Location, Stmt};
|
||||||
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
|
|
||||||
use crate::ast::types::{Range, RefEquality};
|
use crate::ast::types::{Range, RefEquality};
|
||||||
use crate::ast::whitespace::indentation;
|
use crate::ast::whitespace::indentation;
|
||||||
|
@ -69,7 +66,7 @@ fn metadata<T>(locator: &Locator, located: &Located<T>) -> Option<BlockMetadata>
|
||||||
let mut else_ = None;
|
let mut else_ = None;
|
||||||
|
|
||||||
for (start, tok, _) in
|
for (start, tok, _) in
|
||||||
lexer::make_tokenizer_located(text, Mode::Module, Location::new(located.location.row(), 0))
|
lexer::lex_located(text, Mode::Module, Location::new(located.location.row(), 0))
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|(_, tok, _)| {
|
.filter(|(_, tok, _)| {
|
||||||
!matches!(
|
!matches!(
|
||||||
|
|
|
@ -1,16 +1,13 @@
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use rustpython_common::cformat::{
|
|
||||||
CConversionFlags, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatString,
|
|
||||||
};
|
|
||||||
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location};
|
|
||||||
use rustpython_parser::lexer;
|
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use ruff_python::identifiers::is_identifier;
|
use ruff_python::identifiers::is_identifier;
|
||||||
use ruff_python::keyword::KWLIST;
|
use ruff_python::keyword::KWLIST;
|
||||||
|
use rustpython_common::cformat::{
|
||||||
|
CConversionFlags, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatString,
|
||||||
|
};
|
||||||
|
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location};
|
||||||
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::ast::whitespace::indentation;
|
use crate::ast::whitespace::indentation;
|
||||||
|
@ -321,7 +318,7 @@ pub(crate) fn printf_string_formatting(
|
||||||
// Grab each string segment (in case there's an implicit concatenation).
|
// Grab each string segment (in case there's an implicit concatenation).
|
||||||
let mut strings: Vec<(Location, Location)> = vec![];
|
let mut strings: Vec<(Location, Location)> = vec![];
|
||||||
let mut extension = None;
|
let mut extension = None;
|
||||||
for (start, tok, end) in lexer::make_tokenizer_located(
|
for (start, tok, end) in lexer::lex_located(
|
||||||
checker.locator.slice(&Range::from_located(expr)),
|
checker.locator.slice(&Range::from_located(expr)),
|
||||||
Mode::Module,
|
Mode::Module,
|
||||||
expr.location,
|
expr.location,
|
||||||
|
|
|
@ -4,9 +4,7 @@ use anyhow::{anyhow, Result};
|
||||||
use log::error;
|
use log::error;
|
||||||
use ruff_macros::{define_violation, derive_message_formats};
|
use ruff_macros::{define_violation, derive_message_formats};
|
||||||
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Location};
|
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Location};
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
use rustpython_parser::token::Tok;
|
|
||||||
|
|
||||||
use crate::ast::helpers::find_keyword;
|
use crate::ast::helpers::find_keyword;
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
|
@ -143,9 +141,7 @@ fn create_remove_param_fix(locator: &Locator, expr: &Expr, mode_param: &Expr) ->
|
||||||
let mut fix_end: Option<Location> = None;
|
let mut fix_end: Option<Location> = None;
|
||||||
let mut is_first_arg: bool = false;
|
let mut is_first_arg: bool = false;
|
||||||
let mut delete_first_arg: bool = false;
|
let mut delete_first_arg: bool = false;
|
||||||
for (start, tok, end) in
|
for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() {
|
||||||
lexer::make_tokenizer_located(content, Mode::Module, expr.location).flatten()
|
|
||||||
{
|
|
||||||
if start == mode_param.location {
|
if start == mode_param.location {
|
||||||
if is_first_arg {
|
if is_first_arg {
|
||||||
delete_first_arg = true;
|
delete_first_arg = true;
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::ast::{Mod, Suite};
|
use rustpython_parser::ast::{Mod, Suite};
|
||||||
use rustpython_parser::error::ParseError;
|
|
||||||
use rustpython_parser::lexer::LexResult;
|
use rustpython_parser::lexer::LexResult;
|
||||||
use rustpython_parser::mode::Mode;
|
use rustpython_parser::{lexer, Mode, ParseError};
|
||||||
use rustpython_parser::{lexer, parser};
|
|
||||||
|
|
||||||
/// Collect tokens up to and including the first error.
|
/// Collect tokens up to and including the first error.
|
||||||
pub fn tokenize(contents: &str) -> Vec<LexResult> {
|
pub fn tokenize(contents: &str) -> Vec<LexResult> {
|
||||||
let mut tokens: Vec<LexResult> = vec![];
|
let mut tokens: Vec<LexResult> = vec![];
|
||||||
for tok in lexer::make_tokenizer(contents, Mode::Module) {
|
for tok in lexer::lex(contents, Mode::Module) {
|
||||||
let is_err = tok.is_err();
|
let is_err = tok.is_err();
|
||||||
tokens.push(tok);
|
tokens.push(tok);
|
||||||
if is_err {
|
if is_err {
|
||||||
|
|
|
@ -1258,7 +1258,7 @@ impl<'a> Generator<'a> {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use rustpython_parser::parser;
|
use rustpython_parser as parser;
|
||||||
|
|
||||||
use crate::source_code::stylist::{Indentation, LineEnding, Quote};
|
use crate::source_code::stylist::{Indentation, LineEnding, Quote};
|
||||||
use crate::source_code::Generator;
|
use crate::source_code::Generator;
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
//! are omitted from the AST (e.g., commented lines).
|
//! are omitted from the AST (e.g., commented lines).
|
||||||
|
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
pub struct Indexer {
|
pub struct Indexer {
|
||||||
commented_lines: Vec<usize>,
|
commented_lines: Vec<usize>,
|
||||||
|
@ -49,16 +50,15 @@ impl From<&[LexResult]> for Indexer {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use rustpython_parser::lexer;
|
|
||||||
use rustpython_parser::lexer::LexResult;
|
use rustpython_parser::lexer::LexResult;
|
||||||
use rustpython_parser::mode::Mode;
|
use rustpython_parser::{lexer, Mode};
|
||||||
|
|
||||||
use crate::source_code::Indexer;
|
use crate::source_code::Indexer;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn continuation() {
|
fn continuation() {
|
||||||
let contents = r#"x = 1"#;
|
let contents = r#"x = 1"#;
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let indexer: Indexer = lxr.as_slice().into();
|
let indexer: Indexer = lxr.as_slice().into();
|
||||||
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
|
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ x = 1
|
||||||
y = 2
|
y = 2
|
||||||
"#
|
"#
|
||||||
.trim();
|
.trim();
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let indexer: Indexer = lxr.as_slice().into();
|
let indexer: Indexer = lxr.as_slice().into();
|
||||||
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
|
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ if True:
|
||||||
)
|
)
|
||||||
"#
|
"#
|
||||||
.trim();
|
.trim();
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let indexer: Indexer = lxr.as_slice().into();
|
let indexer: Indexer = lxr.as_slice().into();
|
||||||
assert_eq!(indexer.continuation_lines(), [1, 5, 6, 11]);
|
assert_eq!(indexer.continuation_lines(), [1, 5, 6, 11]);
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ x = 1; \
|
||||||
import os
|
import os
|
||||||
"#
|
"#
|
||||||
.trim();
|
.trim();
|
||||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents, Mode::Module).collect();
|
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
|
||||||
let indexer: Indexer = lxr.as_slice().into();
|
let indexer: Indexer = lxr.as_slice().into();
|
||||||
assert_eq!(indexer.continuation_lines(), [9, 12]);
|
assert_eq!(indexer.continuation_lines(), [9, 12]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,8 +6,8 @@ mod stylist;
|
||||||
pub(crate) use generator::Generator;
|
pub(crate) use generator::Generator;
|
||||||
pub(crate) use indexer::Indexer;
|
pub(crate) use indexer::Indexer;
|
||||||
pub(crate) use locator::Locator;
|
pub(crate) use locator::Locator;
|
||||||
use rustpython_parser::error::ParseError;
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::parser;
|
use rustpython_parser::ParseError;
|
||||||
pub(crate) use stylist::{LineEnding, Stylist};
|
pub(crate) use stylist::{LineEnding, Stylist};
|
||||||
|
|
||||||
/// Run round-trip source code generation on a given Python code.
|
/// Run round-trip source code generation on a given Python code.
|
||||||
|
|
|
@ -5,9 +5,7 @@ use std::ops::Deref;
|
||||||
|
|
||||||
use once_cell::unsync::OnceCell;
|
use once_cell::unsync::OnceCell;
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode, Tok};
|
||||||
use rustpython_parser::lexer::Tok;
|
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
use crate::ast::types::Range;
|
use crate::ast::types::Range;
|
||||||
use crate::rules::pydocstyle::helpers::leading_quote;
|
use crate::rules::pydocstyle::helpers::leading_quote;
|
||||||
|
@ -166,7 +164,7 @@ impl Deref for LineEnding {
|
||||||
|
|
||||||
/// Detect the indentation style of the given tokens.
|
/// Detect the indentation style of the given tokens.
|
||||||
fn detect_indentation(contents: &str, locator: &Locator) -> Option<Indentation> {
|
fn detect_indentation(contents: &str, locator: &Locator) -> Option<Indentation> {
|
||||||
for (_start, tok, end) in lexer::make_tokenizer(contents, Mode::Module).flatten() {
|
for (_start, tok, end) in lexer::lex(contents, Mode::Module).flatten() {
|
||||||
if let Tok::Indent { .. } = tok {
|
if let Tok::Indent { .. } = tok {
|
||||||
let start = Location::new(end.row(), 0);
|
let start = Location::new(end.row(), 0);
|
||||||
let whitespace = locator.slice(&Range::new(start, end));
|
let whitespace = locator.slice(&Range::new(start, end));
|
||||||
|
@ -178,7 +176,7 @@ fn detect_indentation(contents: &str, locator: &Locator) -> Option<Indentation>
|
||||||
|
|
||||||
/// Detect the quotation style of the given tokens.
|
/// Detect the quotation style of the given tokens.
|
||||||
fn detect_quote(contents: &str, locator: &Locator) -> Option<Quote> {
|
fn detect_quote(contents: &str, locator: &Locator) -> Option<Quote> {
|
||||||
for (start, tok, end) in lexer::make_tokenizer(contents, Mode::Module).flatten() {
|
for (start, tok, end) in lexer::lex(contents, Mode::Module).flatten() {
|
||||||
if let Tok::String { .. } = tok {
|
if let Tok::String { .. } = tok {
|
||||||
let content = locator.slice(&Range::new(start, end));
|
let content = locator.slice(&Range::new(start, end));
|
||||||
if let Some(pattern) = leading_quote(content) {
|
if let Some(pattern) = leading_quote(content) {
|
||||||
|
|
|
@ -5,7 +5,7 @@ use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use rustpython_parser::parser;
|
use rustpython_parser as parser;
|
||||||
|
|
||||||
#[derive(clap::Args)]
|
#[derive(clap::Args)]
|
||||||
pub struct Args {
|
pub struct Args {
|
||||||
|
|
|
@ -5,8 +5,7 @@ use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use rustpython_parser::lexer;
|
use rustpython_parser::{lexer, Mode};
|
||||||
use rustpython_parser::mode::Mode;
|
|
||||||
|
|
||||||
#[derive(clap::Args)]
|
#[derive(clap::Args)]
|
||||||
pub struct Args {
|
pub struct Args {
|
||||||
|
@ -17,7 +16,7 @@ pub struct Args {
|
||||||
|
|
||||||
pub fn main(args: &Args) -> Result<()> {
|
pub fn main(args: &Args) -> Result<()> {
|
||||||
let contents = fs::read_to_string(&args.file)?;
|
let contents = fs::read_to_string(&args.file)?;
|
||||||
for (_, tok, _) in lexer::make_tokenizer(&contents, Mode::Module).flatten() {
|
for (_, tok, _) in lexer::lex(&contents, Mode::Module).flatten() {
|
||||||
println!("{tok:#?}");
|
println!("{tok:#?}");
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
|
use rustpython_parser as parser;
|
||||||
use rustpython_parser::ast::{Mod, Suite};
|
use rustpython_parser::ast::{Mod, Suite};
|
||||||
use rustpython_parser::error::ParseError;
|
|
||||||
use rustpython_parser::lexer::LexResult;
|
use rustpython_parser::lexer::LexResult;
|
||||||
use rustpython_parser::mode::Mode;
|
use rustpython_parser::{lexer, Mode, ParseError};
|
||||||
use rustpython_parser::{lexer, parser};
|
|
||||||
|
|
||||||
/// Collect tokens up to and including the first error.
|
/// Collect tokens up to and including the first error.
|
||||||
pub fn tokenize(contents: &str) -> Vec<LexResult> {
|
pub fn tokenize(contents: &str) -> Vec<LexResult> {
|
||||||
let mut tokens: Vec<LexResult> = vec![];
|
let mut tokens: Vec<LexResult> = vec![];
|
||||||
for tok in lexer::make_tokenizer(contents, Mode::Module) {
|
for tok in lexer::lex(contents, Mode::Module) {
|
||||||
let is_err = tok.is_err();
|
let is_err = tok.is_err();
|
||||||
tokens.push(tok);
|
tokens.push(tok);
|
||||||
if is_err {
|
if is_err {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use rustc_hash::FxHashMap;
|
use rustc_hash::FxHashMap;
|
||||||
use rustpython_parser::ast::Location;
|
use rustpython_parser::ast::Location;
|
||||||
use rustpython_parser::lexer::{LexResult, Tok};
|
use rustpython_parser::lexer::LexResult;
|
||||||
|
use rustpython_parser::Tok;
|
||||||
|
|
||||||
use crate::core::types::Range;
|
use crate::core::types::Range;
|
||||||
use crate::cst::{Alias, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Stmt, StmtKind};
|
use crate::cst::{Alias, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Stmt, StmtKind};
|
||||||
|
@ -45,7 +46,8 @@ pub struct TriviaToken {
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
pub enum TriviaKind {
|
pub enum TriviaKind {
|
||||||
/// A Comment that is separated by at least one line break from the preceding token.
|
/// A Comment that is separated by at least one line break from the
|
||||||
|
/// preceding token.
|
||||||
///
|
///
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
edition = "2021"
|
|
||||||
max_width = 100
|
|
||||||
reorder_imports = true
|
|
||||||
reorder_modules = true
|
|
Loading…
Add table
Add a link
Reference in a new issue