Reimplemented lexer with vectors instead of iterators

This commit is contained in:
Veetaha 2020-01-26 20:44:49 +02:00
parent ad24976da3
commit ac37a11f04
10 changed files with 254 additions and 200 deletions

View file

@ -2,7 +2,7 @@
use hir::ModuleSource; use hir::ModuleSource;
use ra_db::{RelativePath, RelativePathBuf, SourceDatabase, SourceDatabaseExt}; use ra_db::{RelativePath, RelativePathBuf, SourceDatabase, SourceDatabaseExt};
use ra_syntax::{algo::find_node_at_offset, ast, tokenize, AstNode, SyntaxKind, SyntaxNode}; use ra_syntax::{algo::find_node_at_offset, ast, single_token, AstNode, SyntaxKind, SyntaxNode};
use ra_text_edit::TextEdit; use ra_text_edit::TextEdit;
use crate::{ use crate::{
@ -17,11 +17,9 @@ pub(crate) fn rename(
position: FilePosition, position: FilePosition,
new_name: &str, new_name: &str,
) -> Option<RangeInfo<SourceChange>> { ) -> Option<RangeInfo<SourceChange>> {
let tokens = tokenize(new_name); match single_token(new_name)?.token.kind {
if tokens.len() != 1 SyntaxKind::IDENT | SyntaxKind::UNDERSCORE => (),
|| (tokens[0].kind != SyntaxKind::IDENT && tokens[0].kind != SyntaxKind::UNDERSCORE) _ => return None,
{
return None;
} }
let parse = db.parse(position.file_id); let parse = db.parse(position.file_id);

View file

@ -1,7 +1,7 @@
//! FIXME: write short doc here //! FIXME: write short doc here
use ra_parser::{Token, TokenSource}; use ra_parser::{Token, TokenSource};
use ra_syntax::{classify_literal, SmolStr, SyntaxKind, SyntaxKind::*, T}; use ra_syntax::{single_token, SmolStr, SyntaxKind, SyntaxKind::*, T};
use std::cell::{Cell, Ref, RefCell}; use std::cell::{Cell, Ref, RefCell};
use tt::buffer::{Cursor, TokenBuffer}; use tt::buffer::{Cursor, TokenBuffer};
@ -129,8 +129,10 @@ fn convert_delim(d: Option<tt::DelimiterKind>, closing: bool) -> TtToken {
} }
fn convert_literal(l: &tt::Literal) -> TtToken { fn convert_literal(l: &tt::Literal) -> TtToken {
let kind = let kind = single_token(&l.text)
classify_literal(&l.text).map(|tkn| tkn.kind).unwrap_or_else(|| match l.text.as_ref() { .map(|parsed| parsed.token.kind)
.filter(|kind| kind.is_literal())
.unwrap_or_else(|| match l.text.as_ref() {
"true" => T![true], "true" => T![true],
"false" => T![false], "false" => T![false],
_ => panic!("Fail to convert given literal {:#?}", &l), _ => panic!("Fail to convert given literal {:#?}", &l),

View file

@ -41,7 +41,7 @@ use crate::syntax_node::GreenNode;
pub use crate::{ pub use crate::{
algo::InsertPosition, algo::InsertPosition,
ast::{AstNode, AstToken}, ast::{AstNode, AstToken},
parsing::{classify_literal, tokenize, Token}, parsing::{first_token, single_token, tokenize, tokenize_append, Token, TokenizeError},
ptr::{AstPtr, SyntaxNodePtr}, ptr::{AstPtr, SyntaxNodePtr},
syntax_error::{Location, SyntaxError, SyntaxErrorKind}, syntax_error::{Location, SyntaxError, SyntaxErrorKind},
syntax_node::{ syntax_node::{

View file

@ -7,15 +7,17 @@ mod text_tree_sink;
mod reparsing; mod reparsing;
use crate::{syntax_node::GreenNode, SyntaxError}; use crate::{syntax_node::GreenNode, SyntaxError};
use text_token_source::TextTokenSource;
use text_tree_sink::TextTreeSink;
pub use self::lexer::{classify_literal, tokenize, Token}; pub use lexer::*;
pub(crate) use self::reparsing::incremental_reparse; pub(crate) use self::reparsing::incremental_reparse;
pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec<SyntaxError>) { pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec<SyntaxError>) {
let tokens = tokenize(&text); let ParsedTokens { tokens, errors } = tokenize(&text);
let mut token_source = text_token_source::TextTokenSource::new(text, &tokens); let mut token_source = TextTokenSource::new(text, &tokens);
let mut tree_sink = text_tree_sink::TextTreeSink::new(text, &tokens); let mut tree_sink = TextTreeSink::new(text, &tokens, errors);
ra_parser::parse(&mut token_source, &mut tree_sink); ra_parser::parse(&mut token_source, &mut tree_sink);
tree_sink.finish() tree_sink.finish()
} }

View file

@ -1,10 +1,10 @@
//! Lexer analyzes raw input string and produces lexemes (tokens). //! Lexer analyzes raw input string and produces lexemes (tokens).
//! It is just a bridge to `rustc_lexer`.
use std::iter::{FromIterator, IntoIterator};
use crate::{ use crate::{
SyntaxError, SyntaxErrorKind,
SyntaxKind::{self, *}, SyntaxKind::{self, *},
TextUnit, TextRange, TextUnit,
}; };
/// A token of Rust source. /// A token of Rust source.
@ -15,93 +15,96 @@ pub struct Token {
/// The length of the token. /// The length of the token.
pub len: TextUnit, pub len: TextUnit,
} }
impl Token {
pub const fn new(kind: SyntaxKind, len: TextUnit) -> Self {
Self { kind, len }
}
}
#[derive(Debug)] #[derive(Debug)]
/// Represents the result of parsing one token. /// Represents the result of parsing one token. Beware that the token may be malformed.
pub struct ParsedToken { pub struct ParsedToken {
/// Parsed token. /// Parsed token.
pub token: Token, pub token: Token,
/// If error is present then parsed token is malformed. /// If error is present then parsed token is malformed.
pub error: Option<TokenizeError>, pub error: Option<SyntaxError>,
}
impl ParsedToken {
pub const fn new(token: Token, error: Option<TokenizeError>) -> Self {
Self { token, error }
}
} }
#[derive(Debug, Default)] #[derive(Debug, Default)]
/// Represents the result of parsing one token. /// Represents the result of parsing source code of Rust language.
pub struct ParsedTokens { pub struct ParsedTokens {
/// Parsed token. /// Parsed tokens in order they appear in source code.
pub tokens: Vec<Token>, pub tokens: Vec<Token>,
/// If error is present then parsed token is malformed. /// Collection of all occured tokenization errors.
pub errors: Vec<TokenizeError>, /// In general `self.errors.len() <= self.tokens.len()`
pub errors: Vec<SyntaxError>,
} }
impl ParsedTokens {
impl FromIterator<ParsedToken> for ParsedTokens { /// Append `token` and `error` (if pressent) to the result.
fn from_iter<I: IntoIterator<Item = ParsedToken>>(iter: I) -> Self { pub fn push(&mut self, ParsedToken { token, error }: ParsedToken) {
let res = Self::default(); self.tokens.push(token);
for entry in iter { if let Some(error) = error {
res.tokens.push(entry.token); self.errors.push(error)
if let Some(error) = entry.error {
res.errors.push(error);
} }
} }
res
}
} }
/// Returns the first encountered token from the string. /// Same as `tokenize_append()`, just a shortcut for creating `ParsedTokens`
/// If the string contains zero or two or more tokens returns `None`. /// and returning the result the usual way.
pub fn tokenize(text: &str) -> ParsedTokens {
let mut parsed = ParsedTokens::default();
tokenize_append(text, &mut parsed);
parsed
}
/// Break a string up into its component tokens.
/// Returns `ParsedTokens` which are basically a pair `(Vec<Token>, Vec<SyntaxError>)`.
/// Beware that it checks for shebang first and its length contributes to resulting
/// tokens offsets.
pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) {
// non-empty string is a precondtion of `rustc_lexer::strip_shebang()`.
if text.is_empty() {
return;
}
let mut offset: usize = rustc_lexer::strip_shebang(text)
.map(|shebang_len| {
parsed.tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) });
shebang_len
})
.unwrap_or(0);
let text_without_shebang = &text[offset..];
for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
parsed.push(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from_usize(offset)));
offset += rustc_token.len;
}
}
/// Returns the first encountered token at the beginning of the string.
/// If the string contains zero or *two or more tokens* returns `None`.
///
/// The main difference between `first_token()` and `single_token()` is that
/// the latter returns `None` if the string contains more than one token.
pub fn single_token(text: &str) -> Option<ParsedToken> { pub fn single_token(text: &str) -> Option<ParsedToken> {
// TODO: test whether this condition indeed checks for a single token
first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len()) first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len())
} }
/* /// Returns the first encountered token at the beginning of the string.
/// Returns `ParsedTokens` which are basically a pair `(Vec<Token>, Vec<TokenizeError>)` /// If the string contains zero tokens returns `None`.
/// This is just a shorthand for `tokenize(text).collect()` ///
pub fn tokenize_to_vec_with_errors(text: &str) -> ParsedTokens { /// The main difference between `first_token() and single_token()` is that
tokenize(text).collect() /// the latter returns `None` if the string contains more than one token.
pub fn first_token(text: &str) -> Option<ParsedToken> {
// non-empty string is a precondtion of `rustc_lexer::first_token()`.
if text.is_empty() {
None
} else {
let rustc_token = rustc_lexer::first_token(text);
Some(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from(0)))
}
} }
/// The simplest version of tokenize, it just retunst a ready-made `Vec<Token>`. /// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant.
/// It discards all tokenization errors while parsing. If you need that infromation /// It describes all the types of errors that may happen during the tokenization
/// consider using `tokenize()` or `tokenize_to_vec_with_errors()`. /// of Rust source.
pub fn tokenize_to_vec(text: &str) -> Vec<Token> { #[derive(Debug, Clone, PartialEq, Eq, Hash)]
tokenize(text).map(|parsed_token| parsed_token.token).collect()
}
*/
/// Break a string up into its component tokens
/// This is the core function, all other `tokenize*()` functions are simply
/// handy shortcuts for this one.
pub fn tokenize(text: &str) -> impl Iterator<Item = ParsedToken> + '_ {
let shebang = rustc_lexer::strip_shebang(text).map(|shebang_len| {
text = &text[shebang_len..];
ParsedToken::new(Token::new(SHEBANG, TextUnit::from_usize(shebang_len)), None)
});
// Notice that we eagerly evaluate shebang since it may change text slice
// and we cannot simplify this into a single method call chain
shebang.into_iter().chain(tokenize_without_shebang(text))
}
pub fn tokenize_without_shebang(text: &str) -> impl Iterator<Item = ParsedToken> + '_ {
rustc_lexer::tokenize(text).map(|rustc_token| {
let token_text = &text[..rustc_token.len];
text = &text[rustc_token.len..];
rustc_token_kind_to_parsed_token(&rustc_token.kind, token_text)
})
}
#[derive(Debug)]
pub enum TokenizeError { pub enum TokenizeError {
/// Base prefix was provided, but there were no digits /// Base prefix was provided, but there were no digits
/// after it, e.g. `0x`. /// after it, e.g. `0x`.
@ -124,30 +127,41 @@ pub enum TokenizeError {
/// Raw byte string literal lacks trailing delimiter e.g. `"##` /// Raw byte string literal lacks trailing delimiter e.g. `"##`
UnterminatedRawByteString, UnterminatedRawByteString,
/// Raw string lacks a quote after pound characters e.g. `r###` /// Raw string lacks a quote after the pound characters e.g. `r###`
UnstartedRawString, UnstartedRawString,
/// Raw byte string lacks a quote after pound characters e.g. `br###` /// Raw byte string lacks a quote after the pound characters e.g. `br###`
UnstartedRawByteString, UnstartedRawByteString,
/// Lifetime starts with a number e.g. `'4ever` /// Lifetime starts with a number e.g. `'4ever`
LifetimeStartsWithNumber, LifetimeStartsWithNumber,
} }
fn rustc_token_kind_to_parsed_token( /// Mapper function that converts `rustc_lexer::Token` with some additional context
rustc_token_kind: &rustc_lexer::TokenKind, /// to `ParsedToken`
token_text: &str, fn rustc_token_to_parsed_token(
rustc_token: &rustc_lexer::Token,
text: &str,
token_start_offset: TextUnit,
) -> ParsedToken { ) -> ParsedToken {
use rustc_lexer::TokenKind as TK;
use TokenizeError as TE;
// We drop some useful infromation here (see patterns with double dots `..`) // We drop some useful infromation here (see patterns with double dots `..`)
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of // Storing that info in `SyntaxKind` is not possible due to its layout requirements of
// being `u16` that come from `rowan::SyntaxKind` type and changes to `rowan::SyntaxKind` // being `u16` that come from `rowan::SyntaxKind` type and changes to `rowan::SyntaxKind`
// would mean hell of a rewrite. // would mean hell of a rewrite
let (syntax_kind, error) = match *rustc_token_kind { let token_range =
TextRange::offset_len(token_start_offset, TextUnit::from_usize(rustc_token.len));
let token_text = &text[token_range];
let (syntax_kind, error) = {
use rustc_lexer::TokenKind as TK;
use TokenizeError as TE;
match rustc_token.kind {
TK::LineComment => ok(COMMENT), TK::LineComment => ok(COMMENT),
TK::BlockComment { terminated } => ok_if(terminated, COMMENT, TE::UnterminatedBlockComment), TK::BlockComment { terminated } => {
ok_if(terminated, COMMENT, TE::UnterminatedBlockComment)
}
TK::Whitespace => ok(WHITESPACE), TK::Whitespace => ok(WHITESPACE),
TK::Ident => ok(if token_text == "_" { TK::Ident => ok(if token_text == "_" {
UNDERSCORE UNDERSCORE
@ -187,31 +201,21 @@ fn rustc_token_kind_to_parsed_token(
TK::Caret => ok(CARET), TK::Caret => ok(CARET),
TK::Percent => ok(PERCENT), TK::Percent => ok(PERCENT),
TK::Unknown => ok(ERROR), TK::Unknown => ok(ERROR),
}
}; };
return ParsedToken::new( return ParsedToken {
Token::new(syntax_kind, TextUnit::from_usize(token_text.len())), token: Token { kind: syntax_kind, len: token_range.len() },
error, error: error
); .map(|error| SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)),
};
type ParsedSyntaxKind = (SyntaxKind, Option<TokenizeError>); type ParsedSyntaxKind = (SyntaxKind, Option<TokenizeError>);
const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind { fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind {
(syntax_kind, None)
}
const fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind {
if cond {
ok(syntax_kind)
} else {
err(syntax_kind, error)
}
}
const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind {
(syntax_kind, Some(error))
}
const fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind {
use rustc_lexer::LiteralKind as LK; use rustc_lexer::LiteralKind as LK;
use TokenizeError as TE;
match *kind { match *kind {
LK::Int { empty_int, .. } => ok_if(!empty_int, INT_NUMBER, TE::EmptyInt), LK::Int { empty_int, .. } => ok_if(!empty_int, INT_NUMBER, TE::EmptyInt),
LK::Float { empty_exponent, .. } => { LK::Float { empty_exponent, .. } => {
@ -237,27 +241,17 @@ fn rustc_token_kind_to_parsed_token(
} }
} }
} }
const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind {
(syntax_kind, None)
} }
const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind {
pub fn first_token(text: &str) -> Option<ParsedToken> { (syntax_kind, Some(error))
// Checking for emptyness because of `rustc_lexer::first_token()` invariant (see its body) }
if text.is_empty() { fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind {
None if cond {
ok(syntax_kind)
} else { } else {
let rustc_token = rustc_lexer::first_token(text); err(syntax_kind, error)
Some(rustc_token_kind_to_parsed_token(&rustc_token.kind, &text[..rustc_token.len]))
} }
} }
// TODO: think what to do with this ad hoc function
pub fn classify_literal(text: &str) -> Option<ParsedToken> {
let t = rustc_lexer::first_token(text);
if t.len != text.len() {
return None;
}
let kind = match t.kind {
rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind),
_ => return None,
};
Some(ParsedToken::new(Token::new(kind, TextUnit::from_usize(t.len))))
} }

View file

@ -12,7 +12,7 @@ use ra_text_edit::AtomTextEdit;
use crate::{ use crate::{
algo, algo,
parsing::{ parsing::{
lexer::{tokenize, Token}, lexer::{single_token, tokenize, ParsedTokens, Token},
text_token_source::TextTokenSource, text_token_source::TextTokenSource,
text_tree_sink::TextTreeSink, text_tree_sink::TextTreeSink,
}, },
@ -41,36 +41,42 @@ fn reparse_token<'node>(
root: &'node SyntaxNode, root: &'node SyntaxNode,
edit: &AtomTextEdit, edit: &AtomTextEdit,
) -> Option<(GreenNode, TextRange)> { ) -> Option<(GreenNode, TextRange)> {
let token = algo::find_covering_element(root, edit.delete).as_token()?.clone(); let prev_token = algo::find_covering_element(root, edit.delete).as_token()?.clone();
match token.kind() { let prev_token_kind = prev_token.kind();
match prev_token_kind {
WHITESPACE | COMMENT | IDENT | STRING | RAW_STRING => { WHITESPACE | COMMENT | IDENT | STRING | RAW_STRING => {
if token.kind() == WHITESPACE || token.kind() == COMMENT { if prev_token_kind == WHITESPACE || prev_token_kind == COMMENT {
// removing a new line may extends previous token // removing a new line may extends previous token
if token.text()[edit.delete - token.text_range().start()].contains('\n') { let deleted_range = edit.delete - prev_token.text_range().start();
if prev_token.text()[deleted_range].contains('\n') {
return None; return None;
} }
} }
let text = get_text_after_edit(token.clone().into(), &edit); let mut new_text = get_text_after_edit(prev_token.clone().into(), &edit);
let lex_tokens = tokenize(&text); let new_token_kind = single_token(&new_text)?.token.kind;
let lex_token = match lex_tokens[..] {
[lex_token] if lex_token.kind == token.kind() => lex_token,
_ => return None,
};
if lex_token.kind == IDENT && is_contextual_kw(&text) { if new_token_kind != prev_token_kind
|| (new_token_kind == IDENT && is_contextual_kw(&new_text))
{
return None; return None;
} }
if let Some(next_char) = root.text().char_at(token.text_range().end()) { // Check that edited token is not a part of the bigger token.
let tokens_with_next_char = tokenize(&format!("{}{}", text, next_char)); // E.g. if for source code `bruh"str"` the user removed `ruh`, then
if tokens_with_next_char.len() == 1 { // `b` no longer remains an identifier, but becomes a part of byte string literal
if let Some(next_char) = root.text().char_at(prev_token.text_range().end()) {
new_text.push(next_char);
let token_with_next_char = single_token(&new_text);
if token_with_next_char.is_some() {
return None; return None;
} }
new_text.pop();
} }
let new_token = GreenToken::new(rowan::SyntaxKind(token.kind().into()), text.into()); let new_token =
Some((token.replace_with(new_token), token.text_range())) GreenToken::new(rowan::SyntaxKind(prev_token_kind.into()), new_text.into());
Some((prev_token.replace_with(new_token), prev_token.text_range()))
} }
_ => None, _ => None,
} }
@ -82,12 +88,12 @@ fn reparse_block<'node>(
) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> { ) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> {
let (node, reparser) = find_reparsable_node(root, edit.delete)?; let (node, reparser) = find_reparsable_node(root, edit.delete)?;
let text = get_text_after_edit(node.clone().into(), &edit); let text = get_text_after_edit(node.clone().into(), &edit);
let tokens = tokenize(&text); let ParsedTokens { tokens, errors } = tokenize(&text);
if !is_balanced(&tokens) { if !is_balanced(&tokens) {
return None; return None;
} }
let mut token_source = TextTokenSource::new(&text, &tokens); let mut token_source = TextTokenSource::new(&text, &tokens);
let mut tree_sink = TextTreeSink::new(&text, &tokens); let mut tree_sink = TextTreeSink::new(&text, &tokens, errors);
reparser.parse(&mut token_source, &mut tree_sink); reparser.parse(&mut token_source, &mut tree_sink);
let (green, new_errors) = tree_sink.finish(); let (green, new_errors) = tree_sink.finish();
Some((node.replace_with(green), new_errors, node.text_range())) Some((node.replace_with(green), new_errors, node.text_range()))
@ -96,6 +102,9 @@ fn reparse_block<'node>(
fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String { fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String {
let edit = let edit =
AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone()); AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone());
// Note: we could move this match to a method or even further: use enum_dispatch crate
// https://crates.io/crates/enum_dispatch
let text = match element { let text = match element {
NodeOrToken::Token(token) => token.text().to_string(), NodeOrToken::Token(token) => token.text().to_string(),
NodeOrToken::Node(node) => node.text().to_string(), NodeOrToken::Node(node) => node.text().to_string(),
@ -112,6 +121,9 @@ fn is_contextual_kw(text: &str) -> bool {
fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> { fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> {
let node = algo::find_covering_element(node, range); let node = algo::find_covering_element(node, range);
// Note: we could move this match to a method or even further: use enum_dispatch crate
// https://crates.io/crates/enum_dispatch
let mut ancestors = match node { let mut ancestors = match node {
NodeOrToken::Token(it) => it.parent().ancestors(), NodeOrToken::Token(it) => it.parent().ancestors(),
NodeOrToken::Node(it) => it.ancestors(), NodeOrToken::Node(it) => it.ancestors(),
@ -181,6 +193,8 @@ mod tests {
let fully_reparsed = SourceFile::parse(&after); let fully_reparsed = SourceFile::parse(&after);
let incrementally_reparsed: Parse<SourceFile> = { let incrementally_reparsed: Parse<SourceFile> = {
let f = SourceFile::parse(&before); let f = SourceFile::parse(&before);
// FIXME: it seems this initialization statement is unnecessary (see edit in outer scope)
// Investigate whether it should really be removed.
let edit = AtomTextEdit { delete: range, insert: replace_with.to_string() }; let edit = AtomTextEdit { delete: range, insert: replace_with.to_string() };
let (green, new_errors, range) = let (green, new_errors, range) =
incremental_reparse(f.tree().syntax(), &edit, f.errors.to_vec()).unwrap(); incremental_reparse(f.tree().syntax(), &edit, f.errors.to_vec()).unwrap();

View file

@ -92,14 +92,14 @@ impl<'a> TreeSink for TextTreeSink<'a> {
} }
impl<'a> TextTreeSink<'a> { impl<'a> TextTreeSink<'a> {
pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> TextTreeSink<'a> { pub(super) fn new(text: &'a str, tokens: &'a [Token], errors: Vec<SyntaxError>) -> Self {
TextTreeSink { Self {
text, text,
tokens, tokens,
text_pos: 0.into(), text_pos: 0.into(),
token_pos: 0, token_pos: 0,
state: State::PendingStart, state: State::PendingStart,
inner: SyntaxTreeBuilder::default(), inner: SyntaxTreeBuilder::new(errors),
} }
} }

View file

@ -84,6 +84,9 @@ pub enum SyntaxErrorKind {
ParseError(ParseError), ParseError(ParseError),
EscapeError(EscapeError), EscapeError(EscapeError),
TokenizeError(TokenizeError), TokenizeError(TokenizeError),
// FIXME: the obvious pattern of this enum dictates that the following enum variants
// should be wrapped into something like `SemmanticError(SemmanticError)`
// or `ValidateError(ValidateError)` or `SemmanticValidateError(...)`
InvalidBlockAttr, InvalidBlockAttr,
InvalidMatchInnerAttr, InvalidMatchInnerAttr,
InvalidTupleIndexFormat, InvalidTupleIndexFormat,
@ -106,6 +109,7 @@ impl fmt::Display for SyntaxErrorKind {
} }
ParseError(msg) => write!(f, "{}", msg.0), ParseError(msg) => write!(f, "{}", msg.0),
EscapeError(err) => write!(f, "{}", err), EscapeError(err) => write!(f, "{}", err),
TokenizeError(err) => write!(f, "{}", err),
VisibilityNotAllowed => { VisibilityNotAllowed => {
write!(f, "unnecessary visibility qualifier") write!(f, "unnecessary visibility qualifier")
} }
@ -116,6 +120,44 @@ impl fmt::Display for SyntaxErrorKind {
} }
} }
impl fmt::Display for TokenizeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let msg = match self {
TokenizeError::EmptyInt => "Missing digits after integer base prefix",
TokenizeError::EmptyExponent => "Missing digits after the exponent symbol",
TokenizeError::UnterminatedBlockComment => {
"Missing trailing `*/` symbols to terminate the block comment"
}
TokenizeError::UnterminatedChar => {
"Missing trailing `'` symbol to terminate the character literal"
}
TokenizeError::UnterminatedByte => {
"Missing trailing `'` symbol to terminate the byte literal"
}
TokenizeError::UnterminatedString => {
"Missing trailing `\"` symbol to terminate the string literal"
}
TokenizeError::UnterminatedByteString => {
"Missing trailing `\"` symbol to terminate the byte string literal"
}
TokenizeError::UnterminatedRawString => {
"Missing trailing `\"` with `#` symbols to terminate the raw string literal"
}
TokenizeError::UnterminatedRawByteString => {
"Missing trailing `\"` with `#` symbols to terminate the raw byte string literal"
}
TokenizeError::UnstartedRawString => {
"Missing `\"` symbol after `#` symbols to begin the raw string literal"
}
TokenizeError::UnstartedRawByteString => {
"Missing `\"` symbol after `#` symbols to begin the raw byte string literal"
}
TokenizeError::LifetimeStartsWithNumber => "Lifetime name cannot start with a number",
};
write!(f, "{}", msg)
}
}
impl fmt::Display for EscapeError { impl fmt::Display for EscapeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let msg = match self { let msg = match self {

View file

@ -4,7 +4,7 @@
//! `SyntaxNode`, and a basic traversal API (parent, children, siblings). //! `SyntaxNode`, and a basic traversal API (parent, children, siblings).
//! //!
//! The *real* implementation is in the (language-agnostic) `rowan` crate, this //! The *real* implementation is in the (language-agnostic) `rowan` crate, this
//! modules just wraps its API. //! module just wraps its API.
use ra_parser::ParseError; use ra_parser::ParseError;
use rowan::{GreenNodeBuilder, Language}; use rowan::{GreenNodeBuilder, Language};
@ -38,14 +38,15 @@ pub type SyntaxElementChildren = rowan::SyntaxElementChildren<RustLanguage>;
pub use rowan::{Direction, NodeOrToken}; pub use rowan::{Direction, NodeOrToken};
#[derive(Default)]
pub struct SyntaxTreeBuilder { pub struct SyntaxTreeBuilder {
errors: Vec<SyntaxError>, errors: Vec<SyntaxError>,
inner: GreenNodeBuilder<'static>, inner: GreenNodeBuilder<'static>,
} }
impl Default for SyntaxTreeBuilder { impl SyntaxTreeBuilder {
fn default() -> SyntaxTreeBuilder { pub fn new(errors: Vec<SyntaxError>) -> Self {
SyntaxTreeBuilder { errors: Vec::new(), inner: GreenNodeBuilder::new() } Self { errors, inner: GreenNodeBuilder::default() }
} }
} }

View file

@ -10,7 +10,8 @@ use crate::{fuzz, SourceFile};
#[test] #[test]
fn lexer_tests() { fn lexer_tests() {
dir_tests(&test_data_dir(), &["lexer"], |text, _| { dir_tests(&test_data_dir(), &["lexer"], |text, _| {
let tokens = crate::tokenize(text); // FIXME: add tests for errors (their format is up to discussion)
let tokens = crate::tokenize(text).tokens;
dump_tokens(&tokens, text) dump_tokens(&tokens, text)
}) })
} }