Use TokenKind in doc_lines_from_tokens (#11418)

## Summary

This PR updates the `doc_lines_from_tokens` function to use `TokenKind`
instead of `Tok`.

This is part of #11401 

## Test Plan

`cargo test`
This commit is contained in:
Dhruv Manilawala 2024-05-14 22:26:14 +05:30 committed by GitHub
parent 025768d303
commit a33763170e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 10 additions and 11 deletions

View file

@ -4,27 +4,26 @@
use std::iter::FusedIterator;
use ruff_python_ast::{self as ast, Stmt, Suite};
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::Tok;
use ruff_python_parser::{TokenKind, TokenKindIter};
use ruff_text_size::{Ranged, TextSize};
use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor};
use ruff_source_file::{Locator, UniversalNewlineIterator};
/// Extract doc lines (standalone comments) from a token sequence.
pub(crate) fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {
DocLines::new(lxr)
pub(crate) fn doc_lines_from_tokens(tokens: TokenKindIter) -> DocLines {
DocLines::new(tokens)
}
pub(crate) struct DocLines<'a> {
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
inner: TokenKindIter<'a>,
prev: TextSize,
}
impl<'a> DocLines<'a> {
fn new(lxr: &'a [LexResult]) -> Self {
fn new(tokens: TokenKindIter<'a>) -> Self {
Self {
inner: lxr.iter().flatten(),
inner: tokens,
prev: TextSize::default(),
}
}
@ -39,15 +38,15 @@ impl Iterator for DocLines<'_> {
let (tok, range) = self.inner.next()?;
match tok {
Tok::Comment(..) => {
TokenKind::Comment => {
if at_start_of_line {
break Some(range.start());
}
}
Tok::Newline | Tok::NonLogicalNewline => {
TokenKind::Newline | TokenKind::NonLogicalNewline => {
at_start_of_line = true;
}
Tok::Indent | Tok::Dedent => {
TokenKind::Indent | TokenKind::Dedent => {
// ignore
}
_ => {

View file

@ -93,7 +93,7 @@ pub fn check_path(
let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong);
let mut doc_lines = vec![];
if use_doc_lines {
doc_lines.extend(doc_lines_from_tokens(&tokens));
doc_lines.extend(doc_lines_from_tokens(tokens.kinds()));
}
// Run the token-based rules.