Reduce the usage of bare subscript operator

This commit is contained in:
Veetaha 2020-06-14 12:48:16 +03:00
parent 246c66a7f7
commit 667d224fcc

View file

@ -1,40 +1,35 @@
//! FIXME: write short doc here //! See `TextTokenSource` docs.
use ra_parser::Token as PToken;
use ra_parser::TokenSource; use ra_parser::TokenSource;
use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextSize}; use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextSize};
/// Implementation of `ra_parser::TokenSource` that takes tokens from source code text.
pub(crate) struct TextTokenSource<'t> { pub(crate) struct TextTokenSource<'t> {
text: &'t str, text: &'t str,
/// start position of each token(expect whitespace and comment) /// token and its start position (non-whitespace/comment tokens)
/// ```non-rust /// ```non-rust
/// struct Foo; /// struct Foo;
/// ^------^--- /// ^------^--^-
/// | | ^- /// | | \________
/// 0 7 10 /// | \____ \
/// | \ |
/// (struct, 0) (Foo, 7) (;, 10)
/// ``` /// ```
/// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]` /// `[(struct, 0), (Foo, 7), (;, 10)]`
start_offsets: Vec<TextSize>, token_offset_pairs: Vec<(Token, TextSize)>,
/// non-whitespace/comment tokens
/// ```non-rust
/// struct Foo {}
/// ^^^^^^ ^^^ ^^
/// ```
/// tokens: `[struct, Foo, {, }]`
tokens: Vec<Token>,
/// Current token and position /// Current token and position
curr: (PToken, usize), curr: (ra_parser::Token, usize),
} }
impl<'t> TokenSource for TextTokenSource<'t> { impl<'t> TokenSource for TextTokenSource<'t> {
fn current(&self) -> PToken { fn current(&self) -> ra_parser::Token {
self.curr.0 self.curr.0
} }
fn lookahead_nth(&self, n: usize) -> PToken { fn lookahead_nth(&self, n: usize) -> ra_parser::Token {
mk_token(self.curr.1 + n, &self.start_offsets, &self.tokens) mk_token(self.curr.1 + n, &self.token_offset_pairs)
} }
fn bump(&mut self) { fn bump(&mut self) {
@ -43,45 +38,47 @@ impl<'t> TokenSource for TextTokenSource<'t> {
} }
let pos = self.curr.1 + 1; let pos = self.curr.1 + 1;
self.curr = (mk_token(pos, &self.start_offsets, &self.tokens), pos); self.curr = (mk_token(pos, &self.token_offset_pairs), pos);
} }
fn is_keyword(&self, kw: &str) -> bool { fn is_keyword(&self, kw: &str) -> bool {
let pos = self.curr.1; self.token_offset_pairs
if pos >= self.tokens.len() { .get(self.curr.1)
return false; .map(|(token, offset)| &self.text[TextRange::at(*offset, token.len)] == kw)
} .unwrap_or(false)
let range = TextRange::at(self.start_offsets[pos], self.tokens[pos].len);
self.text[range] == *kw
} }
} }
fn mk_token(pos: usize, start_offsets: &[TextSize], tokens: &[Token]) -> PToken { fn mk_token(pos: usize, token_offset_pairs: &[(Token, TextSize)]) -> ra_parser::Token {
let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF); let (kind, is_jointed_to_next) = match token_offset_pairs.get(pos) {
let is_jointed_to_next = if pos + 1 < start_offsets.len() { Some((token, offset)) => (
start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1] token.kind,
} else { token_offset_pairs
false .get(pos + 1)
.map(|(_, next_offset)| offset + token.len == *next_offset)
.unwrap_or(false),
),
None => (EOF, false),
}; };
ra_parser::Token { kind, is_jointed_to_next }
PToken { kind, is_jointed_to_next }
} }
impl<'t> TextTokenSource<'t> { impl<'t> TextTokenSource<'t> {
/// Generate input from tokens(expect comment and whitespace). /// Generate input from tokens(expect comment and whitespace).
pub fn new(text: &'t str, raw_tokens: &'t [Token]) -> TextTokenSource<'t> { pub fn new(text: &'t str, raw_tokens: &'t [Token]) -> TextTokenSource<'t> {
let mut tokens = Vec::new(); let token_offset_pairs: Vec<_> = raw_tokens
let mut start_offsets = Vec::new(); .iter()
let mut len = 0.into(); .filter_map({
for &token in raw_tokens.iter() { let mut len = 0.into();
if !token.kind.is_trivia() { move |token| {
tokens.push(token); let pair = if token.kind.is_trivia() { None } else { Some((*token, len)) };
start_offsets.push(len); len += token.len;
} pair
len += token.len; }
} })
.collect();
let first = mk_token(0, &start_offsets, &tokens); let first = mk_token(0, &token_offset_pairs);
TextTokenSource { text, start_offsets, tokens, curr: (first, 0) } TextTokenSource { text, token_offset_pairs, curr: (first, 0) }
} }
} }