mirror of
https://github.com/astral-sh/ruff.git
synced 2025-07-24 13:34:40 +00:00
perf(formatter): Improve is_expression_parenthesized
performance (#5825)
This commit is contained in:
parent
1aa851796e
commit
3b32e3a8fe
2 changed files with 27 additions and 9 deletions
|
@ -1,6 +1,6 @@
|
|||
use crate::context::NodeLevel;
|
||||
use crate::prelude::*;
|
||||
use crate::trivia::{first_non_trivia_token, first_non_trivia_token_rev, Token, TokenKind};
|
||||
use crate::trivia::{first_non_trivia_token, SimpleTokenizer, Token, TokenKind};
|
||||
use ruff_formatter::prelude::tag::Condition;
|
||||
use ruff_formatter::{format_args, write, Argument, Arguments};
|
||||
use ruff_python_ast::node::AnyNodeRef;
|
||||
|
@ -72,19 +72,27 @@ pub enum Parentheses {
|
|||
}
|
||||
|
||||
pub(crate) fn is_expression_parenthesized(expr: AnyNodeRef, contents: &str) -> bool {
|
||||
matches!(
|
||||
// First test if there's a closing parentheses because it tends to be cheaper.
|
||||
if matches!(
|
||||
first_non_trivia_token(expr.end(), contents),
|
||||
Some(Token {
|
||||
kind: TokenKind::RParen,
|
||||
..
|
||||
})
|
||||
) && matches!(
|
||||
first_non_trivia_token_rev(expr.start(), contents),
|
||||
Some(Token {
|
||||
kind: TokenKind::LParen,
|
||||
..
|
||||
})
|
||||
)
|
||||
) {
|
||||
let mut tokenizer =
|
||||
SimpleTokenizer::up_to_without_back_comment(expr.start(), contents).skip_trivia();
|
||||
|
||||
matches!(
|
||||
tokenizer.next_back(),
|
||||
Some(Token {
|
||||
kind: TokenKind::LParen,
|
||||
..
|
||||
})
|
||||
)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Formats `content` enclosed by the `left` and `right` parentheses. The implementation also ensures
|
||||
|
|
|
@ -274,10 +274,20 @@ impl<'a> SimpleTokenizer<'a> {
|
|||
Self::new(source, range)
|
||||
}
|
||||
|
||||
/// Creates a tokenizer that lexes tokens from the start of `source` up to `offset`.
|
||||
pub(crate) fn up_to(offset: TextSize, source: &'a str) -> Self {
|
||||
Self::new(source, TextRange::up_to(offset))
|
||||
}
|
||||
|
||||
/// Creates a tokenizer that lexes tokens from the start of `source` up to `offset`, and informs
|
||||
/// the lexer that the line at `offset` contains no comments. This can significantly speed up backwards lexing
|
||||
/// because the lexer doesn't need to scan for comments.
|
||||
pub(crate) fn up_to_without_back_comment(offset: TextSize, source: &'a str) -> Self {
|
||||
let mut tokenizer = Self::up_to(offset, source);
|
||||
tokenizer.back_line_has_no_comment = true;
|
||||
tokenizer
|
||||
}
|
||||
|
||||
fn to_keyword_or_other(&self, range: TextRange) -> TokenKind {
|
||||
let source = &self.source[range];
|
||||
match source {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue