mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-09-27 12:29:21 +00:00
Convert code to text-size
This commit is contained in:
parent
27a7718880
commit
b1d5817dd1
75 changed files with 438 additions and 456 deletions
|
@ -4,7 +4,7 @@
|
|||
use crate::{
|
||||
SyntaxError,
|
||||
SyntaxKind::{self, *},
|
||||
TextRange, TextUnit, T,
|
||||
TextRange, TextSize, T,
|
||||
};
|
||||
|
||||
/// A token of Rust source.
|
||||
|
@ -13,7 +13,7 @@ pub struct Token {
|
|||
/// The kind of token.
|
||||
pub kind: SyntaxKind,
|
||||
/// The length of the token.
|
||||
pub len: TextUnit,
|
||||
pub len: TextSize,
|
||||
}
|
||||
|
||||
/// Break a string up into its component tokens.
|
||||
|
@ -30,7 +30,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
|||
|
||||
let mut offset: usize = rustc_lexer::strip_shebang(text)
|
||||
.map(|shebang_len| {
|
||||
tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) });
|
||||
tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) });
|
||||
shebang_len
|
||||
})
|
||||
.unwrap_or(0);
|
||||
|
@ -38,8 +38,8 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
|||
let text_without_shebang = &text[offset..];
|
||||
|
||||
for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
|
||||
let token_len = TextUnit::from_usize(rustc_token.len);
|
||||
let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len);
|
||||
let token_len = TextSize::from_usize(rustc_token.len);
|
||||
let token_range = TextRange::at(TextSize::from_usize(offset), token_len);
|
||||
|
||||
let (syntax_kind, err_message) =
|
||||
rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]);
|
||||
|
@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
|||
/// Beware that unescape errors are not checked at tokenization time.
|
||||
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
|
||||
lex_first_token(text)
|
||||
.filter(|(token, _)| token.len == TextUnit::of_str(text))
|
||||
.filter(|(token, _)| token.len == TextSize::of(text))
|
||||
.map(|(token, error)| (token.kind, error))
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
|
|||
/// Beware that unescape errors are not checked at tokenization time.
|
||||
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
||||
lex_first_token(text)
|
||||
.filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text))
|
||||
.filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text))
|
||||
.map(|(token, _error)| token.kind)
|
||||
}
|
||||
|
||||
|
@ -96,9 +96,9 @@ fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
|
|||
let rustc_token = rustc_lexer::first_token(text);
|
||||
let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text);
|
||||
|
||||
let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) };
|
||||
let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) };
|
||||
let optional_error = err_message.map(|err_message| {
|
||||
SyntaxError::new(err_message, TextRange::from_to(0.into(), TextUnit::of_str(text)))
|
||||
SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text)))
|
||||
});
|
||||
|
||||
Some((token, optional_error))
|
||||
|
|
|
@ -19,7 +19,7 @@ use crate::{
|
|||
syntax_node::{GreenNode, GreenToken, NodeOrToken, SyntaxElement, SyntaxNode},
|
||||
SyntaxError,
|
||||
SyntaxKind::*,
|
||||
TextRange, TextUnit, T,
|
||||
TextRange, TextSize, T,
|
||||
};
|
||||
|
||||
pub(crate) fn incremental_reparse(
|
||||
|
@ -176,7 +176,7 @@ fn merge_errors(
|
|||
if old_err_range.end() <= range_before_reparse.start() {
|
||||
res.push(old_err);
|
||||
} else if old_err_range.start() >= range_before_reparse.end() {
|
||||
let inserted_len = TextUnit::of_str(&edit.insert);
|
||||
let inserted_len = TextSize::of(&edit.insert);
|
||||
res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len()));
|
||||
// Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
use ra_parser::Token as PToken;
|
||||
use ra_parser::TokenSource;
|
||||
|
||||
use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextUnit};
|
||||
use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextSize};
|
||||
|
||||
pub(crate) struct TextTokenSource<'t> {
|
||||
text: &'t str,
|
||||
|
@ -15,7 +15,7 @@ pub(crate) struct TextTokenSource<'t> {
|
|||
/// 0 7 10
|
||||
/// ```
|
||||
/// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]`
|
||||
start_offsets: Vec<TextUnit>,
|
||||
start_offsets: Vec<TextSize>,
|
||||
/// non-whitespace/comment tokens
|
||||
/// ```non-rust
|
||||
/// struct Foo {}
|
||||
|
@ -51,12 +51,12 @@ impl<'t> TokenSource for TextTokenSource<'t> {
|
|||
if pos >= self.tokens.len() {
|
||||
return false;
|
||||
}
|
||||
let range = TextRange::offset_len(self.start_offsets[pos], self.tokens[pos].len);
|
||||
let range = TextRange::at(self.start_offsets[pos], self.tokens[pos].len);
|
||||
self.text[range] == *kw
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_token(pos: usize, start_offsets: &[TextUnit], tokens: &[Token]) -> PToken {
|
||||
fn mk_token(pos: usize, start_offsets: &[TextSize], tokens: &[Token]) -> PToken {
|
||||
let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF);
|
||||
let is_jointed_to_next = if pos + 1 < start_offsets.len() {
|
||||
start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1]
|
||||
|
|
|
@ -9,7 +9,7 @@ use crate::{
|
|||
syntax_node::GreenNode,
|
||||
SmolStr, SyntaxError,
|
||||
SyntaxKind::{self, *},
|
||||
SyntaxTreeBuilder, TextRange, TextUnit,
|
||||
SyntaxTreeBuilder, TextRange, TextSize,
|
||||
};
|
||||
|
||||
/// Bridges the parser with our specific syntax tree representation.
|
||||
|
@ -18,7 +18,7 @@ use crate::{
|
|||
pub(crate) struct TextTreeSink<'a> {
|
||||
text: &'a str,
|
||||
tokens: &'a [Token],
|
||||
text_pos: TextUnit,
|
||||
text_pos: TextSize,
|
||||
token_pos: usize,
|
||||
state: State,
|
||||
inner: SyntaxTreeBuilder,
|
||||
|
@ -42,7 +42,7 @@ impl<'a> TreeSink for TextTreeSink<'a> {
|
|||
let len = self.tokens[self.token_pos..self.token_pos + n_tokens]
|
||||
.iter()
|
||||
.map(|it| it.len)
|
||||
.sum::<TextUnit>();
|
||||
.sum::<TextSize>();
|
||||
self.do_token(kind, len, n_tokens);
|
||||
}
|
||||
|
||||
|
@ -62,12 +62,12 @@ impl<'a> TreeSink for TextTreeSink<'a> {
|
|||
self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count();
|
||||
let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias];
|
||||
let mut trivia_end =
|
||||
self.text_pos + leading_trivias.iter().map(|it| it.len).sum::<TextUnit>();
|
||||
self.text_pos + leading_trivias.iter().map(|it| it.len).sum::<TextSize>();
|
||||
|
||||
let n_attached_trivias = {
|
||||
let leading_trivias = leading_trivias.iter().rev().map(|it| {
|
||||
let next_end = trivia_end - it.len;
|
||||
let range = TextRange::from_to(next_end, trivia_end);
|
||||
let range = TextRange::new(next_end, trivia_end);
|
||||
trivia_end = next_end;
|
||||
(it.kind, &self.text[range])
|
||||
});
|
||||
|
@ -132,8 +132,8 @@ impl<'a> TextTreeSink<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
fn do_token(&mut self, kind: SyntaxKind, len: TextUnit, n_tokens: usize) {
|
||||
let range = TextRange::offset_len(self.text_pos, len);
|
||||
fn do_token(&mut self, kind: SyntaxKind, len: TextSize, n_tokens: usize) {
|
||||
let range = TextRange::at(self.text_pos, len);
|
||||
let text: SmolStr = self.text[range].into();
|
||||
self.text_pos += len;
|
||||
self.token_pos += n_tokens;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue