mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-08-22 11:24:24 +00:00
Merge #7218
7218: Fix typos r=Veykril a=regexident Apart from the very last commit on this PR (which fixes a public type's name) all changes are non-breaking. Co-authored-by: Vincent Esche <regexident@gmail.com>
This commit is contained in:
commit
607b9ea160
32 changed files with 114 additions and 118 deletions
|
@ -88,8 +88,8 @@ pub fn least_common_ancestor(u: &SyntaxNode, v: &SyntaxNode) -> Option<SyntaxNod
|
|||
let keep = u_depth.min(v_depth);
|
||||
|
||||
let u_candidates = u.ancestors().skip(u_depth - keep);
|
||||
let v_canidates = v.ancestors().skip(v_depth - keep);
|
||||
let (res, _) = u_candidates.zip(v_canidates).find(|(x, y)| x == y)?;
|
||||
let v_candidates = v.ancestors().skip(v_depth - keep);
|
||||
let (res, _) = u_candidates.zip(v_candidates).find(|(x, y)| x == y)?;
|
||||
Some(res)
|
||||
}
|
||||
|
||||
|
|
|
@ -241,7 +241,7 @@ pub fn wildcard_pat() -> ast::WildcardPat {
|
|||
}
|
||||
}
|
||||
|
||||
/// Creates a tuple of patterns from an interator of patterns.
|
||||
/// Creates a tuple of patterns from an iterator of patterns.
|
||||
///
|
||||
/// Invariant: `pats` must be length > 1
|
||||
///
|
||||
|
|
|
@ -24,7 +24,7 @@ pub struct Token {
|
|||
/// Beware that it checks for shebang first and its length contributes to resulting
|
||||
/// tokens offsets.
|
||||
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
||||
// non-empty string is a precondtion of `rustc_lexer::strip_shebang()`.
|
||||
// non-empty string is a precondition of `rustc_lexer::strip_shebang()`.
|
||||
if text.is_empty() {
|
||||
return Default::default();
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
|
|||
}
|
||||
|
||||
/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
|
||||
/// returns `None` if any tokenization error occured.
|
||||
/// returns `None` if any tokenization error occurred.
|
||||
///
|
||||
/// Beware that unescape errors are not checked at tokenization time.
|
||||
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
||||
|
@ -96,7 +96,7 @@ pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
|||
///
|
||||
/// Beware that unescape errors are not checked at tokenization time.
|
||||
fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
|
||||
// non-empty string is a precondtion of `rustc_lexer::first_token()`.
|
||||
// non-empty string is a precondition of `rustc_lexer::first_token()`.
|
||||
if text.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ fn rustc_token_kind_to_syntax_kind(
|
|||
token_text: &str,
|
||||
) -> (SyntaxKind, Option<&'static str>) {
|
||||
// A note on an intended tradeoff:
|
||||
// We drop some useful infromation here (see patterns with double dots `..`)
|
||||
// We drop some useful information here (see patterns with double dots `..`)
|
||||
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of
|
||||
// being `u16` that come from `rowan::SyntaxKind`.
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ pub(crate) fn validate_block_structure(root: &SyntaxNode) {
|
|||
assert_eq!(
|
||||
node.parent(),
|
||||
pair.parent(),
|
||||
"\nunpaired curleys:\n{}\n{:#?}\n",
|
||||
"\nunpaired curlys:\n{}\n{:#?}\n",
|
||||
root.text(),
|
||||
root,
|
||||
);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue