mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-10-01 06:11:35 +00:00
Use prev_token and next_token
This commit is contained in:
parent
b7ab079211
commit
384e1ced88
1 changed files with 45 additions and 42 deletions
|
@ -11,7 +11,7 @@ use ra_syntax::{
|
||||||
|
|
||||||
use crate::{db::RootDatabase, expand::descend_into_macros, FileId, FileRange};
|
use crate::{db::RootDatabase, expand::descend_into_macros, FileId, FileRange};
|
||||||
use hir::db::AstDatabase;
|
use hir::db::AstDatabase;
|
||||||
use itertools::Itertools;
|
use std::iter::successors;
|
||||||
|
|
||||||
pub(crate) fn extend_selection(db: &RootDatabase, frange: FileRange) -> TextRange {
|
pub(crate) fn extend_selection(db: &RootDatabase, frange: FileRange) -> TextRange {
|
||||||
let src = db.parse(frange.file_id).tree();
|
let src = db.parse(frange.file_id).tree();
|
||||||
|
@ -110,46 +110,28 @@ fn extend_tokens_from_range(
|
||||||
macro_call: ast::MacroCall,
|
macro_call: ast::MacroCall,
|
||||||
original_range: TextRange,
|
original_range: TextRange,
|
||||||
) -> Option<TextRange> {
|
) -> Option<TextRange> {
|
||||||
// Find all non-whitespace tokens under MacroCall
|
let src = find_covering_element(¯o_call.syntax(), original_range);
|
||||||
let all_tokens: Vec<_> = macro_call
|
let (first_token, last_token) = match src {
|
||||||
.syntax()
|
NodeOrToken::Node(it) => (it.first_token()?, it.last_token()?),
|
||||||
.descendants_with_tokens()
|
NodeOrToken::Token(it) => (it.clone(), it),
|
||||||
.filter_map(|n| {
|
};
|
||||||
let token = n.as_token()?;
|
|
||||||
if token.kind() == WHITESPACE {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(token.clone())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.sorted_by(|a, b| Ord::cmp(&a.text_range().start(), &b.text_range().start()))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// Get all indices which is in original range
|
let mut first_token = skip_whitespace(first_token, Direction::Next)?;
|
||||||
let indices: Vec<_> =
|
let mut last_token = skip_whitespace(last_token, Direction::Prev)?;
|
||||||
all_tokens
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(i, token)| {
|
|
||||||
if token.text_range().is_subrange(&original_range) {
|
|
||||||
Some(i)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// The first and last token index in original_range
|
while !first_token.text_range().is_subrange(&original_range) {
|
||||||
// Note that the indices is sorted
|
first_token = skip_whitespace(first_token.next_token()?, Direction::Next)?;
|
||||||
let first_idx = *indices.first()?;
|
}
|
||||||
let last_idx = *indices.last()?;
|
while !last_token.text_range().is_subrange(&original_range) {
|
||||||
|
last_token = skip_whitespace(last_token.prev_token()?, Direction::Prev)?;
|
||||||
|
}
|
||||||
|
|
||||||
// compute original mapped token range
|
// compute original mapped token range
|
||||||
let expanded = {
|
let expanded = {
|
||||||
let first_node = descend_into_macros(db, file_id, all_tokens[first_idx].clone());
|
let first_node = descend_into_macros(db, file_id, first_token.clone());
|
||||||
let first_node = first_node.map(|it| it.text_range());
|
let first_node = first_node.map(|it| it.text_range());
|
||||||
|
|
||||||
let last_node = descend_into_macros(db, file_id, all_tokens[last_idx].clone());
|
let last_node = descend_into_macros(db, file_id, last_token.clone());
|
||||||
if last_node.file_id == file_id.into() || first_node.file_id != last_node.file_id {
|
if last_node.file_id == file_id.into() || first_node.file_id != last_node.file_id {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -160,20 +142,28 @@ fn extend_tokens_from_range(
|
||||||
let src = db.parse_or_expand(expanded.file_id)?;
|
let src = db.parse_or_expand(expanded.file_id)?;
|
||||||
let parent = shallowest_node(&find_covering_element(&src, expanded.value))?.parent()?;
|
let parent = shallowest_node(&find_covering_element(&src, expanded.value))?.parent()?;
|
||||||
|
|
||||||
let validate = |&idx: &usize| {
|
let validate = |token: SyntaxToken| {
|
||||||
let token: &SyntaxToken = &all_tokens[idx];
|
|
||||||
let node = descend_into_macros(db, file_id, token.clone());
|
let node = descend_into_macros(db, file_id, token.clone());
|
||||||
|
if node.file_id == expanded.file_id
|
||||||
node.file_id == expanded.file_id
|
|
||||||
&& node.value.text_range().is_subrange(&parent.text_range())
|
&& node.value.text_range().is_subrange(&parent.text_range())
|
||||||
|
{
|
||||||
|
Some(token)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Find the first and last text range under expanded parent
|
// Find the first and last text range under expanded parent
|
||||||
let first = (0..=first_idx).rev().take_while(validate).last()?;
|
let first = successors(Some(first_token), |token| {
|
||||||
let last = (last_idx..all_tokens.len()).take_while(validate).last()?;
|
validate(skip_whitespace(token.prev_token()?, Direction::Prev)?)
|
||||||
|
})
|
||||||
let range = union_range(all_tokens[first].text_range(), all_tokens[last].text_range());
|
.last()?;
|
||||||
|
let last = successors(Some(last_token), |token| {
|
||||||
|
validate(skip_whitespace(token.next_token()?, Direction::Next)?)
|
||||||
|
})
|
||||||
|
.last()?;
|
||||||
|
|
||||||
|
let range = union_range(first.text_range(), last.text_range());
|
||||||
if original_range.is_subrange(&range) && original_range != range {
|
if original_range.is_subrange(&range) && original_range != range {
|
||||||
Some(range)
|
Some(range)
|
||||||
} else {
|
} else {
|
||||||
|
@ -181,6 +171,19 @@ fn extend_tokens_from_range(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn skip_whitespace(
|
||||||
|
mut token: SyntaxToken,
|
||||||
|
direction: Direction,
|
||||||
|
) -> Option<SyntaxToken> {
|
||||||
|
while token.kind() == WHITESPACE {
|
||||||
|
token = match direction {
|
||||||
|
Direction::Next => token.next_token()?,
|
||||||
|
Direction::Prev => token.prev_token()?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(token)
|
||||||
|
}
|
||||||
|
|
||||||
fn union_range(range: TextRange, r: TextRange) -> TextRange {
|
fn union_range(range: TextRange, r: TextRange) -> TextRange {
|
||||||
let start = range.start().min(r.start());
|
let start = range.start().min(r.start());
|
||||||
let end = range.end().max(r.end());
|
let end = range.end().max(r.end());
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue