mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-09-03 17:10:39 +00:00
Change TokenSource to iteration based
This commit is contained in:
parent
ef00b5af1c
commit
fcb1eef323
8 changed files with 174 additions and 103 deletions
|
@ -17,8 +17,8 @@ pub(crate) use self::reparsing::incremental_reparse;
|
|||
|
||||
pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec<SyntaxError>) {
|
||||
let tokens = tokenize(&text);
|
||||
let token_source = text_token_source::TextTokenSource::new(text, &tokens);
|
||||
let mut token_source = text_token_source::TextTokenSource::new(text, &tokens);
|
||||
let mut tree_sink = text_tree_sink::TextTreeSink::new(text, &tokens);
|
||||
ra_parser::parse(&token_source, &mut tree_sink);
|
||||
ra_parser::parse(&mut token_source, &mut tree_sink);
|
||||
tree_sink.finish()
|
||||
}
|
||||
|
|
|
@ -85,9 +85,9 @@ fn reparse_block<'node>(
|
|||
if !is_balanced(&tokens) {
|
||||
return None;
|
||||
}
|
||||
let token_source = TextTokenSource::new(&text, &tokens);
|
||||
let mut token_source = TextTokenSource::new(&text, &tokens);
|
||||
let mut tree_sink = TextTreeSink::new(&text, &tokens);
|
||||
reparser.parse(&token_source, &mut tree_sink);
|
||||
reparser.parse(&mut token_source, &mut tree_sink);
|
||||
let (green, new_errors) = tree_sink.finish();
|
||||
Some((node.replace_with(green), new_errors, node.range()))
|
||||
}
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use ra_parser::TokenSource;
|
||||
use ra_parser::Token as PToken;
|
||||
|
||||
use crate::{
|
||||
SyntaxKind, SyntaxKind::EOF, TextRange, TextUnit,
|
||||
SyntaxKind::EOF, TextRange, TextUnit,
|
||||
parsing::lexer::Token,
|
||||
};
|
||||
|
||||
|
@ -23,31 +24,50 @@ pub(crate) struct TextTokenSource<'t> {
|
|||
/// ```
|
||||
/// tokens: `[struct, Foo, {, }]`
|
||||
tokens: Vec<Token>,
|
||||
|
||||
/// Current token and position
|
||||
curr: (PToken, usize),
|
||||
}
|
||||
|
||||
impl<'t> TokenSource for TextTokenSource<'t> {
|
||||
fn token_kind(&self, pos: usize) -> SyntaxKind {
|
||||
if !(pos < self.tokens.len()) {
|
||||
return EOF;
|
||||
}
|
||||
self.tokens[pos].kind
|
||||
fn current(&self) -> PToken {
|
||||
return self.curr.0;
|
||||
}
|
||||
fn is_token_joint_to_next(&self, pos: usize) -> bool {
|
||||
if !(pos + 1 < self.tokens.len()) {
|
||||
return true;
|
||||
}
|
||||
self.start_offsets[pos] + self.tokens[pos].len == self.start_offsets[pos + 1]
|
||||
|
||||
fn lookahead_nth(&self, n: usize) -> PToken {
|
||||
mk_token(self.curr.1 + n, &self.start_offsets, &self.tokens)
|
||||
}
|
||||
fn is_keyword(&self, pos: usize, kw: &str) -> bool {
|
||||
|
||||
fn bump(&mut self) {
|
||||
if self.curr.0.kind == EOF {
|
||||
return;
|
||||
}
|
||||
|
||||
let pos = self.curr.1 + 1;
|
||||
self.curr = (mk_token(pos, &self.start_offsets, &self.tokens), pos);
|
||||
}
|
||||
|
||||
fn is_keyword(&self, kw: &str) -> bool {
|
||||
let pos = self.curr.1;
|
||||
if !(pos < self.tokens.len()) {
|
||||
return false;
|
||||
}
|
||||
let range = TextRange::offset_len(self.start_offsets[pos], self.tokens[pos].len);
|
||||
|
||||
self.text[range] == *kw
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_token(pos: usize, start_offsets: &[TextUnit], tokens: &[Token]) -> PToken {
|
||||
let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF);
|
||||
let is_jointed_to_next = if pos + 1 < start_offsets.len() {
|
||||
start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1]
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
PToken { kind, is_jointed_to_next }
|
||||
}
|
||||
|
||||
impl<'t> TextTokenSource<'t> {
|
||||
/// Generate input from tokens(expect comment and whitespace).
|
||||
pub fn new(text: &'t str, raw_tokens: &'t [Token]) -> TextTokenSource<'t> {
|
||||
|
@ -62,6 +82,7 @@ impl<'t> TextTokenSource<'t> {
|
|||
len += token.len;
|
||||
}
|
||||
|
||||
TextTokenSource { text, start_offsets, tokens }
|
||||
let first = mk_token(0, &start_offsets, &tokens);
|
||||
TextTokenSource { text, start_offsets, tokens, curr: (first, 0) }
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue