mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-10-01 06:11:35 +00:00
switch to new rowan
This commit is contained in:
parent
dec9bde108
commit
9e213385c9
50 changed files with 1026 additions and 1227 deletions
|
@ -1,8 +1,9 @@
|
|||
use ra_db::SourceDatabase;
|
||||
use ra_syntax::{
|
||||
Direction, SyntaxNode, TextRange, TextUnit, AstNode,
|
||||
algo::{find_covering_node, find_leaf_at_offset, LeafAtOffset},
|
||||
SyntaxKind::*,
|
||||
Direction, SyntaxNode, TextRange, TextUnit, AstNode, SyntaxElement,
|
||||
algo::{find_covering_element, find_token_at_offset, TokenAtOffset},
|
||||
SyntaxKind::*, SyntaxToken,
|
||||
ast::Comment,
|
||||
};
|
||||
|
||||
use crate::{FileRange, db::RootDatabase};
|
||||
|
@ -32,53 +33,58 @@ fn try_extend_selection(root: &SyntaxNode, range: TextRange) -> Option<TextRange
|
|||
|
||||
if range.is_empty() {
|
||||
let offset = range.start();
|
||||
let mut leaves = find_leaf_at_offset(root, offset);
|
||||
let mut leaves = find_token_at_offset(root, offset);
|
||||
if leaves.clone().all(|it| it.kind() == WHITESPACE) {
|
||||
return Some(extend_ws(root, leaves.next()?, offset));
|
||||
}
|
||||
let leaf_range = match leaves {
|
||||
LeafAtOffset::None => return None,
|
||||
LeafAtOffset::Single(l) => {
|
||||
TokenAtOffset::None => return None,
|
||||
TokenAtOffset::Single(l) => {
|
||||
if string_kinds.contains(&l.kind()) {
|
||||
extend_single_word_in_comment_or_string(l, offset).unwrap_or_else(|| l.range())
|
||||
} else {
|
||||
l.range()
|
||||
}
|
||||
}
|
||||
LeafAtOffset::Between(l, r) => pick_best(l, r).range(),
|
||||
TokenAtOffset::Between(l, r) => pick_best(l, r).range(),
|
||||
};
|
||||
return Some(leaf_range);
|
||||
};
|
||||
let node = find_covering_node(root, range);
|
||||
let node = match find_covering_element(root, range) {
|
||||
SyntaxElement::Token(token) => {
|
||||
if token.range() != range {
|
||||
return Some(token.range());
|
||||
}
|
||||
if let Some(comment) = Comment::cast(token) {
|
||||
if let Some(range) = extend_comments(comment) {
|
||||
return Some(range);
|
||||
}
|
||||
}
|
||||
token.parent()
|
||||
}
|
||||
SyntaxElement::Node(node) => node,
|
||||
};
|
||||
if node.range() != range {
|
||||
return Some(node.range());
|
||||
}
|
||||
|
||||
// Using shallowest node with same range allows us to traverse siblings.
|
||||
let node = node.ancestors().take_while(|n| n.range() == node.range()).last().unwrap();
|
||||
|
||||
if range == node.range() {
|
||||
if string_kinds.contains(&node.kind()) {
|
||||
if let Some(range) = extend_comments(node) {
|
||||
return Some(range);
|
||||
}
|
||||
}
|
||||
|
||||
if node.parent().map(|n| list_kinds.contains(&n.kind())) == Some(true) {
|
||||
if let Some(range) = extend_list_item(node) {
|
||||
return Some(range);
|
||||
}
|
||||
if node.parent().map(|n| list_kinds.contains(&n.kind())) == Some(true) {
|
||||
if let Some(range) = extend_list_item(node) {
|
||||
return Some(range);
|
||||
}
|
||||
}
|
||||
|
||||
match node.ancestors().skip_while(|n| n.range() == range).next() {
|
||||
None => None,
|
||||
Some(parent) => Some(parent.range()),
|
||||
}
|
||||
node.parent().map(|it| it.range())
|
||||
}
|
||||
|
||||
fn extend_single_word_in_comment_or_string(
|
||||
leaf: &SyntaxNode,
|
||||
leaf: SyntaxToken,
|
||||
offset: TextUnit,
|
||||
) -> Option<TextRange> {
|
||||
let text: &str = leaf.leaf_text()?;
|
||||
let text: &str = leaf.text();
|
||||
let cursor_position: u32 = (offset - leaf.range().start()).into();
|
||||
|
||||
let (before, after) = text.split_at(cursor_position as usize);
|
||||
|
@ -101,14 +107,14 @@ fn extend_single_word_in_comment_or_string(
|
|||
}
|
||||
}
|
||||
|
||||
fn extend_ws(root: &SyntaxNode, ws: &SyntaxNode, offset: TextUnit) -> TextRange {
|
||||
let ws_text = ws.leaf_text().unwrap();
|
||||
fn extend_ws(root: &SyntaxNode, ws: SyntaxToken, offset: TextUnit) -> TextRange {
|
||||
let ws_text = ws.text();
|
||||
let suffix = TextRange::from_to(offset, ws.range().end()) - ws.range().start();
|
||||
let prefix = TextRange::from_to(ws.range().start(), offset) - ws.range().start();
|
||||
let ws_suffix = &ws_text.as_str()[suffix];
|
||||
let ws_prefix = &ws_text.as_str()[prefix];
|
||||
if ws_text.contains('\n') && !ws_suffix.contains('\n') {
|
||||
if let Some(node) = ws.next_sibling() {
|
||||
if let Some(node) = ws.next_sibling_or_token() {
|
||||
let start = match ws_prefix.rfind('\n') {
|
||||
Some(idx) => ws.range().start() + TextUnit::from((idx + 1) as u32),
|
||||
None => node.range().start(),
|
||||
|
@ -124,9 +130,9 @@ fn extend_ws(root: &SyntaxNode, ws: &SyntaxNode, offset: TextUnit) -> TextRange
|
|||
ws.range()
|
||||
}
|
||||
|
||||
fn pick_best<'a>(l: &'a SyntaxNode, r: &'a SyntaxNode) -> &'a SyntaxNode {
|
||||
fn pick_best<'a>(l: SyntaxToken<'a>, r: SyntaxToken<'a>) -> SyntaxToken<'a> {
|
||||
return if priority(r) > priority(l) { r } else { l };
|
||||
fn priority(n: &SyntaxNode) -> usize {
|
||||
fn priority(n: SyntaxToken) -> usize {
|
||||
match n.kind() {
|
||||
WHITESPACE => 0,
|
||||
IDENT | SELF_KW | SUPER_KW | CRATE_KW | LIFETIME => 2,
|
||||
|
@ -137,54 +143,60 @@ fn pick_best<'a>(l: &'a SyntaxNode, r: &'a SyntaxNode) -> &'a SyntaxNode {
|
|||
|
||||
/// Extend list item selection to include nearby comma and whitespace.
|
||||
fn extend_list_item(node: &SyntaxNode) -> Option<TextRange> {
|
||||
fn is_single_line_ws(node: &SyntaxNode) -> bool {
|
||||
node.kind() == WHITESPACE && !node.leaf_text().unwrap().contains('\n')
|
||||
fn is_single_line_ws(node: &SyntaxToken) -> bool {
|
||||
node.kind() == WHITESPACE && !node.text().contains('\n')
|
||||
}
|
||||
|
||||
fn nearby_comma(node: &SyntaxNode, dir: Direction) -> Option<&SyntaxNode> {
|
||||
node.siblings(dir)
|
||||
fn nearby_comma(node: &SyntaxNode, dir: Direction) -> Option<SyntaxToken> {
|
||||
node.siblings_with_tokens(dir)
|
||||
.skip(1)
|
||||
.skip_while(|node| is_single_line_ws(node))
|
||||
.skip_while(|node| match node {
|
||||
SyntaxElement::Node(_) => false,
|
||||
SyntaxElement::Token(it) => is_single_line_ws(it),
|
||||
})
|
||||
.next()
|
||||
.and_then(|it| it.as_token())
|
||||
.filter(|node| node.kind() == COMMA)
|
||||
}
|
||||
|
||||
if let Some(comma_node) = nearby_comma(node, Direction::Prev) {
|
||||
return Some(TextRange::from_to(comma_node.range().start(), node.range().end()));
|
||||
}
|
||||
|
||||
if let Some(comma_node) = nearby_comma(node, Direction::Next) {
|
||||
// Include any following whitespace when comma if after list item.
|
||||
let final_node = comma_node
|
||||
.siblings(Direction::Next)
|
||||
.skip(1)
|
||||
.next()
|
||||
.next_sibling_or_token()
|
||||
.and_then(|it| it.as_token())
|
||||
.filter(|node| is_single_line_ws(node))
|
||||
.unwrap_or(comma_node);
|
||||
|
||||
return Some(TextRange::from_to(node.range().start(), final_node.range().end()));
|
||||
}
|
||||
|
||||
return None;
|
||||
None
|
||||
}
|
||||
|
||||
fn extend_comments(node: &SyntaxNode) -> Option<TextRange> {
|
||||
let prev = adj_comments(node, Direction::Prev);
|
||||
let next = adj_comments(node, Direction::Next);
|
||||
fn extend_comments(comment: Comment) -> Option<TextRange> {
|
||||
let prev = adj_comments(comment, Direction::Prev);
|
||||
let next = adj_comments(comment, Direction::Next);
|
||||
if prev != next {
|
||||
Some(TextRange::from_to(prev.range().start(), next.range().end()))
|
||||
Some(TextRange::from_to(prev.syntax().range().start(), next.syntax().range().end()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn adj_comments(node: &SyntaxNode, dir: Direction) -> &SyntaxNode {
|
||||
let mut res = node;
|
||||
for node in node.siblings(dir) {
|
||||
match node.kind() {
|
||||
COMMENT => res = node,
|
||||
WHITESPACE if !node.leaf_text().unwrap().as_str().contains("\n\n") => (),
|
||||
_ => break,
|
||||
fn adj_comments(comment: Comment, dir: Direction) -> Comment {
|
||||
let mut res = comment;
|
||||
for element in comment.syntax().siblings_with_tokens(dir) {
|
||||
let token = match element.as_token() {
|
||||
None => break,
|
||||
Some(token) => token,
|
||||
};
|
||||
if let Some(c) = Comment::cast(token) {
|
||||
res = c
|
||||
} else if token.kind() != WHITESPACE || token.text().contains("\n\n") {
|
||||
break;
|
||||
}
|
||||
}
|
||||
res
|
||||
|
@ -308,23 +320,13 @@ fn bar(){}
|
|||
/*
|
||||
foo
|
||||
_bar1<|>*/
|
||||
"#,
|
||||
"#,
|
||||
&["_bar1", "/*\nfoo\n_bar1*/"],
|
||||
);
|
||||
|
||||
do_check(
|
||||
r#"
|
||||
//!<|>foo_2 bar
|
||||
"#,
|
||||
&["foo_2", "//!foo_2 bar"],
|
||||
);
|
||||
do_check(r#"//!<|>foo_2 bar"#, &["foo_2", "//!foo_2 bar"]);
|
||||
|
||||
do_check(
|
||||
r#"
|
||||
/<|>/foo bar
|
||||
"#,
|
||||
&["//foo bar"],
|
||||
);
|
||||
do_check(r#"/<|>/foo bar"#, &["//foo bar"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -332,13 +334,13 @@ _bar1<|>*/
|
|||
do_check(
|
||||
r#"
|
||||
fn main() { foo<|>+bar;}
|
||||
"#,
|
||||
"#,
|
||||
&["foo", "foo+bar"],
|
||||
);
|
||||
do_check(
|
||||
r#"
|
||||
fn main() { foo+<|>bar;}
|
||||
"#,
|
||||
"#,
|
||||
&["bar", "foo+bar"],
|
||||
);
|
||||
}
|
||||
|
@ -355,11 +357,11 @@ fn main() { foo+<|>bar;}
|
|||
do_check(
|
||||
r#"
|
||||
impl S {
|
||||
fn foo() {
|
||||
// hel<|>lo world
|
||||
}
|
||||
fn foo() {
|
||||
// hel<|>lo world
|
||||
}
|
||||
"#,
|
||||
}
|
||||
"#,
|
||||
&["hello", "// hello world"],
|
||||
);
|
||||
}
|
||||
|
@ -371,7 +373,7 @@ impl S {
|
|||
fn bar(){}
|
||||
|
||||
" fn f<|>oo() {"
|
||||
"#,
|
||||
"#,
|
||||
&["foo", "\" fn foo() {\""],
|
||||
);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue