Remove lexer-based comment range detection (#5785)

## Summary

I'm doing some unrelated profiling, and I noticed that this method is
actually measurable on the CPython benchmark -- it's > 1% of execution
time. We don't need to lex here, we already know the ranges of all
comments, so we can just do a simple binary search for overlap, which
brings the method down to 0%.

## Test Plan

`cargo test`
This commit is contained in:
Charlie Marsh 2023-07-15 21:03:27 -04:00 committed by GitHub
parent f2e995f78d
commit 4782675bf9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 44 additions and 44 deletions

View file

@ -717,7 +717,7 @@ pub fn map_subscript(expr: &Expr) -> &Expr {
}
/// Returns `true` if a statement or expression includes at least one comment.
pub fn has_comments<T>(node: &T, locator: &Locator) -> bool
pub fn has_comments<T>(node: &T, locator: &Locator, indexer: &Indexer) -> bool
where
T: Ranged,
{
@ -732,26 +732,9 @@ where
locator.line_end(node.end())
};
has_comments_in(TextRange::new(start, end), locator)
}
/// Returns `true` if a [`TextRange`] includes at least one comment.
pub fn has_comments_in(range: TextRange, locator: &Locator) -> bool {
let source = &locator.contents()[range];
for tok in lexer::lex_starts_at(source, Mode::Module, range.start()) {
match tok {
Ok((tok, _)) => {
if matches!(tok, Tok::Comment(..)) {
return true;
}
}
Err(_) => {
return false;
}
}
}
false
indexer
.comment_ranges()
.intersects(TextRange::new(start, end))
}
/// Return `true` if the body uses `locals()`, `globals()`, `vars()`, `eval()`.