mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-08-20 10:30:34 +00:00
Merge #7218
7218: Fix typos r=Veykril a=regexident Apart from the very last commit on this PR (which fixes a public type's name) all changes are non-breaking. Co-authored-by: Vincent Esche <regexident@gmail.com>
This commit is contained in:
commit
607b9ea160
32 changed files with 114 additions and 118 deletions
|
@ -1,6 +1,6 @@
|
||||||
//! Utility module for converting between hir_def ids and code_model wrappers.
|
//! Utility module for converting between hir_def ids and code_model wrappers.
|
||||||
//!
|
//!
|
||||||
//! It's unclear if we need this long-term, but it's definitelly useful while we
|
//! It's unclear if we need this long-term, but it's definitely useful while we
|
||||||
//! are splitting the hir.
|
//! are splitting the hir.
|
||||||
|
|
||||||
use hir_def::{
|
use hir_def::{
|
||||||
|
|
|
@ -581,7 +581,7 @@ impl ExprCollector<'_> {
|
||||||
match res.value {
|
match res.value {
|
||||||
Some((mark, expansion)) => {
|
Some((mark, expansion)) => {
|
||||||
// FIXME: Statements are too complicated to recover from error for now.
|
// FIXME: Statements are too complicated to recover from error for now.
|
||||||
// It is because we don't have any hygenine for local variable expansion right now.
|
// It is because we don't have any hygiene for local variable expansion right now.
|
||||||
if T::can_cast(syntax::SyntaxKind::MACRO_STMTS) && res.err.is_some() {
|
if T::can_cast(syntax::SyntaxKind::MACRO_STMTS) && res.err.is_some() {
|
||||||
self.expander.exit(self.db, mark);
|
self.expander.exit(self.db, mark);
|
||||||
collector(self, None);
|
collector(self, None);
|
||||||
|
@ -959,7 +959,7 @@ impl ExprCollector<'_> {
|
||||||
|
|
||||||
fn collect_tuple_pat(&mut self, args: AstChildren<ast::Pat>) -> (Vec<PatId>, Option<usize>) {
|
fn collect_tuple_pat(&mut self, args: AstChildren<ast::Pat>) -> (Vec<PatId>, Option<usize>) {
|
||||||
// Find the location of the `..`, if there is one. Note that we do not
|
// Find the location of the `..`, if there is one. Note that we do not
|
||||||
// consider the possiblity of there being multiple `..` here.
|
// consider the possibility of there being multiple `..` here.
|
||||||
let ellipsis = args.clone().position(|p| matches!(p, ast::Pat::RestPat(_)));
|
let ellipsis = args.clone().position(|p| matches!(p, ast::Pat::RestPat(_)));
|
||||||
// We want to skip the `..` pattern here, since we account for it above.
|
// We want to skip the `..` pattern here, since we account for it above.
|
||||||
let args = args
|
let args = args
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! This module describes hir-level representation of expressions.
|
//! This module describes hir-level representation of expressions.
|
||||||
//!
|
//!
|
||||||
//! This representaion is:
|
//! This representation is:
|
||||||
//!
|
//!
|
||||||
//! 1. Identity-based. Each expression has an `id`, so we can distinguish
|
//! 1. Identity-based. Each expression has an `id`, so we can distinguish
|
||||||
//! between different `1` in `1 + 1`.
|
//! between different `1` in `1 + 1`.
|
||||||
|
|
|
@ -267,7 +267,7 @@ impl DefCollector<'_> {
|
||||||
|
|
||||||
// Resolve all indeterminate resolved imports again
|
// Resolve all indeterminate resolved imports again
|
||||||
// As some of the macros will expand newly import shadowing partial resolved imports
|
// As some of the macros will expand newly import shadowing partial resolved imports
|
||||||
// FIXME: We maybe could skip this, if we handle the Indetermine imports in `resolve_imports`
|
// FIXME: We maybe could skip this, if we handle the indeterminate imports in `resolve_imports`
|
||||||
// correctly
|
// correctly
|
||||||
let partial_resolved = self.resolved_imports.iter().filter_map(|directive| {
|
let partial_resolved = self.resolved_imports.iter().filter_map(|directive| {
|
||||||
if let PartialResolvedImport::Indeterminate(_) = directive.status {
|
if let PartialResolvedImport::Indeterminate(_) = directive.status {
|
||||||
|
@ -402,7 +402,7 @@ impl DefCollector<'_> {
|
||||||
|
|
||||||
/// Define a proc macro
|
/// Define a proc macro
|
||||||
///
|
///
|
||||||
/// A proc macro is similar to normal macro scope, but it would not visiable in legacy textual scoped.
|
/// A proc macro is similar to normal macro scope, but it would not visible in legacy textual scoped.
|
||||||
/// And unconditionally exported.
|
/// And unconditionally exported.
|
||||||
fn define_proc_macro(&mut self, name: Name, macro_: MacroDefId) {
|
fn define_proc_macro(&mut self, name: Name, macro_: MacroDefId) {
|
||||||
self.update(
|
self.update(
|
||||||
|
@ -592,7 +592,7 @@ impl DefCollector<'_> {
|
||||||
// XXX: urgh, so this works by accident! Here, we look at
|
// XXX: urgh, so this works by accident! Here, we look at
|
||||||
// the enum data, and, in theory, this might require us to
|
// the enum data, and, in theory, this might require us to
|
||||||
// look back at the crate_def_map, creating a cycle. For
|
// look back at the crate_def_map, creating a cycle. For
|
||||||
// example, `enum E { crate::some_macro!(); }`. Luckely, the
|
// example, `enum E { crate::some_macro!(); }`. Luckily, the
|
||||||
// only kind of macro that is allowed inside enum is a
|
// only kind of macro that is allowed inside enum is a
|
||||||
// `cfg_macro`, and we don't need to run name resolution for
|
// `cfg_macro`, and we don't need to run name resolution for
|
||||||
// it, but this is sheer luck!
|
// it, but this is sheer luck!
|
||||||
|
@ -655,7 +655,7 @@ impl DefCollector<'_> {
|
||||||
&mut self,
|
&mut self,
|
||||||
module_id: LocalModuleId,
|
module_id: LocalModuleId,
|
||||||
resolutions: &[(Option<Name>, PerNs)],
|
resolutions: &[(Option<Name>, PerNs)],
|
||||||
// All resolutions are imported with this visibility; the visibilies in
|
// All resolutions are imported with this visibility; the visibilities in
|
||||||
// the `PerNs` values are ignored and overwritten
|
// the `PerNs` values are ignored and overwritten
|
||||||
vis: Visibility,
|
vis: Visibility,
|
||||||
import_type: ImportType,
|
import_type: ImportType,
|
||||||
|
|
|
@ -27,7 +27,7 @@ use crate::{
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct Resolver {
|
pub struct Resolver {
|
||||||
// FIXME: all usages generally call `.rev`, so maybe reverse once in consturciton?
|
// FIXME: all usages generally call `.rev`, so maybe reverse once in construction?
|
||||||
scopes: Vec<Scope>,
|
scopes: Vec<Scope>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ impl TokenExpander {
|
||||||
// FIXME switch these to ExpandResult as well
|
// FIXME switch these to ExpandResult as well
|
||||||
TokenExpander::BuiltinDerive(it) => it.expand(db, id, tt).into(),
|
TokenExpander::BuiltinDerive(it) => it.expand(db, id, tt).into(),
|
||||||
TokenExpander::ProcMacro(_) => {
|
TokenExpander::ProcMacro(_) => {
|
||||||
// We store the result in salsa db to prevent non-determinisc behavior in
|
// We store the result in salsa db to prevent non-deterministic behavior in
|
||||||
// some proc-macro implementation
|
// some proc-macro implementation
|
||||||
// See #4315 for details
|
// See #4315 for details
|
||||||
db.expand_proc_macro(id.into()).into()
|
db.expand_proc_macro(id.into()).into()
|
||||||
|
|
|
@ -379,7 +379,7 @@ pub fn record_literal_missing_fields(
|
||||||
id: ExprId,
|
id: ExprId,
|
||||||
expr: &Expr,
|
expr: &Expr,
|
||||||
) -> Option<(VariantId, Vec<LocalFieldId>, /*exhaustive*/ bool)> {
|
) -> Option<(VariantId, Vec<LocalFieldId>, /*exhaustive*/ bool)> {
|
||||||
let (fields, exhausitve) = match expr {
|
let (fields, exhaustive) = match expr {
|
||||||
Expr::RecordLit { path: _, fields, spread } => (fields, spread.is_none()),
|
Expr::RecordLit { path: _, fields, spread } => (fields, spread.is_none()),
|
||||||
_ => return None,
|
_ => return None,
|
||||||
};
|
};
|
||||||
|
@ -400,7 +400,7 @@ pub fn record_literal_missing_fields(
|
||||||
if missed_fields.is_empty() {
|
if missed_fields.is_empty() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
Some((variant_def, missed_fields, exhausitve))
|
Some((variant_def, missed_fields, exhaustive))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn record_pattern_missing_fields(
|
pub fn record_pattern_missing_fields(
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
//! The algorithm implemented here is a modified version of the one described in
|
//! The algorithm implemented here is a modified version of the one described in
|
||||||
//! <http://moscova.inria.fr/~maranget/papers/warn/index.html>.
|
//! <http://moscova.inria.fr/~maranget/papers/warn/index.html>.
|
||||||
//! However, to save future implementors from reading the original paper, we
|
//! However, to save future implementors from reading the original paper, we
|
||||||
//! summarise the algorithm here to hopefully save time and be a little clearer
|
//! summarize the algorithm here to hopefully save time and be a little clearer
|
||||||
//! (without being so rigorous).
|
//! (without being so rigorous).
|
||||||
//!
|
//!
|
||||||
//! The core of the algorithm revolves about a "usefulness" check. In particular, we
|
//! The core of the algorithm revolves about a "usefulness" check. In particular, we
|
||||||
|
@ -132,7 +132,7 @@
|
||||||
//! The algorithm is inductive (on the number of columns: i.e., components of tuple patterns).
|
//! The algorithm is inductive (on the number of columns: i.e., components of tuple patterns).
|
||||||
//! That means we're going to check the components from left-to-right, so the algorithm
|
//! That means we're going to check the components from left-to-right, so the algorithm
|
||||||
//! operates principally on the first component of the matrix and new pattern-stack `p`.
|
//! operates principally on the first component of the matrix and new pattern-stack `p`.
|
||||||
//! This algorithm is realised in the `is_useful` function.
|
//! This algorithm is realized in the `is_useful` function.
|
||||||
//!
|
//!
|
||||||
//! Base case (`n = 0`, i.e., an empty tuple pattern):
|
//! Base case (`n = 0`, i.e., an empty tuple pattern):
|
||||||
//! - If `P` already contains an empty pattern (i.e., if the number of patterns `m > 0`), then
|
//! - If `P` already contains an empty pattern (i.e., if the number of patterns `m > 0`), then
|
||||||
|
|
|
@ -491,16 +491,16 @@ impl Ty {
|
||||||
fn from_hir_path_inner(
|
fn from_hir_path_inner(
|
||||||
ctx: &TyLoweringContext<'_>,
|
ctx: &TyLoweringContext<'_>,
|
||||||
segment: PathSegment<'_>,
|
segment: PathSegment<'_>,
|
||||||
typable: TyDefId,
|
typeable: TyDefId,
|
||||||
infer_args: bool,
|
infer_args: bool,
|
||||||
) -> Ty {
|
) -> Ty {
|
||||||
let generic_def = match typable {
|
let generic_def = match typeable {
|
||||||
TyDefId::BuiltinType(_) => None,
|
TyDefId::BuiltinType(_) => None,
|
||||||
TyDefId::AdtId(it) => Some(it.into()),
|
TyDefId::AdtId(it) => Some(it.into()),
|
||||||
TyDefId::TypeAliasId(it) => Some(it.into()),
|
TyDefId::TypeAliasId(it) => Some(it.into()),
|
||||||
};
|
};
|
||||||
let substs = substs_from_path_segment(ctx, segment, generic_def, infer_args);
|
let substs = substs_from_path_segment(ctx, segment, generic_def, infer_args);
|
||||||
ctx.db.ty(typable).subst(&substs)
|
ctx.db.ty(typeable).subst(&substs)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Collect generic arguments from a path into a `Substs`. See also
|
/// Collect generic arguments from a path into a `Substs`. See also
|
||||||
|
|
|
@ -39,7 +39,7 @@ pub(crate) fn rewrite_links(db: &RootDatabase, markdown: &str, definition: &Defi
|
||||||
if target.contains("://") {
|
if target.contains("://") {
|
||||||
(target.to_string(), title.to_string())
|
(target.to_string(), title.to_string())
|
||||||
} else {
|
} else {
|
||||||
// Two posibilities:
|
// Two possibilities:
|
||||||
// * path-based links: `../../module/struct.MyStruct.html`
|
// * path-based links: `../../module/struct.MyStruct.html`
|
||||||
// * module-based links (AKA intra-doc links): `super::super::module::MyStruct`
|
// * module-based links (AKA intra-doc links): `super::super::module::MyStruct`
|
||||||
if let Some(rewritten) = rewrite_intra_doc_link(db, *definition, target, title) {
|
if let Some(rewritten) = rewrite_intra_doc_link(db, *definition, target, title) {
|
||||||
|
@ -442,7 +442,7 @@ fn get_symbol_fragment(db: &dyn HirDatabase, field_or_assoc: &FieldOrAssocItem)
|
||||||
function.as_assoc_item(db).map(|assoc| assoc.container(db)),
|
function.as_assoc_item(db).map(|assoc| assoc.container(db)),
|
||||||
Some(AssocItemContainer::Trait(..))
|
Some(AssocItemContainer::Trait(..))
|
||||||
);
|
);
|
||||||
// This distinction may get more complicated when specialisation is available.
|
// This distinction may get more complicated when specialization is available.
|
||||||
// Rustdoc makes this decision based on whether a method 'has defaultness'.
|
// Rustdoc makes this decision based on whether a method 'has defaultness'.
|
||||||
// Currently this is only the case for provided trait methods.
|
// Currently this is only the case for provided trait methods.
|
||||||
if is_trait_method && !function.has_body(db) {
|
if is_trait_method && !function.has_body(db) {
|
||||||
|
|
|
@ -1953,16 +1953,16 @@ struct S {
|
||||||
/// Test cases:
|
/// Test cases:
|
||||||
/// case 1. bare URL: https://www.example.com/
|
/// case 1. bare URL: https://www.example.com/
|
||||||
/// case 2. inline URL with title: [example](https://www.example.com/)
|
/// case 2. inline URL with title: [example](https://www.example.com/)
|
||||||
/// case 3. code refrence: [`Result`]
|
/// case 3. code reference: [`Result`]
|
||||||
/// case 4. code refrence but miss footnote: [`String`]
|
/// case 4. code reference but miss footnote: [`String`]
|
||||||
/// case 5. autolink: <http://www.example.com/>
|
/// case 5. autolink: <http://www.example.com/>
|
||||||
/// case 6. email address: <test@example.com>
|
/// case 6. email address: <test@example.com>
|
||||||
/// case 7. refrence: [example][example]
|
/// case 7. reference: [example][example]
|
||||||
/// case 8. collapsed link: [example][]
|
/// case 8. collapsed link: [example][]
|
||||||
/// case 9. shortcut link: [example]
|
/// case 9. shortcut link: [example]
|
||||||
/// case 10. inline without URL: [example]()
|
/// case 10. inline without URL: [example]()
|
||||||
/// case 11. refrence: [foo][foo]
|
/// case 11. reference: [foo][foo]
|
||||||
/// case 12. refrence: [foo][bar]
|
/// case 12. reference: [foo][bar]
|
||||||
/// case 13. collapsed link: [foo][]
|
/// case 13. collapsed link: [foo][]
|
||||||
/// case 14. shortcut link: [foo]
|
/// case 14. shortcut link: [foo]
|
||||||
/// case 15. inline without URL: [foo]()
|
/// case 15. inline without URL: [foo]()
|
||||||
|
@ -1989,16 +1989,16 @@ pub fn fo$0o() {}
|
||||||
Test cases:
|
Test cases:
|
||||||
case 1. bare URL: https://www.example.com/
|
case 1. bare URL: https://www.example.com/
|
||||||
case 2. inline URL with title: [example](https://www.example.com/)
|
case 2. inline URL with title: [example](https://www.example.com/)
|
||||||
case 3. code refrence: `Result`
|
case 3. code reference: `Result`
|
||||||
case 4. code refrence but miss footnote: `String`
|
case 4. code reference but miss footnote: `String`
|
||||||
case 5. autolink: http://www.example.com/
|
case 5. autolink: http://www.example.com/
|
||||||
case 6. email address: test@example.com
|
case 6. email address: test@example.com
|
||||||
case 7. refrence: example
|
case 7. reference: example
|
||||||
case 8. collapsed link: example
|
case 8. collapsed link: example
|
||||||
case 9. shortcut link: example
|
case 9. shortcut link: example
|
||||||
case 10. inline without URL: example
|
case 10. inline without URL: example
|
||||||
case 11. refrence: foo
|
case 11. reference: foo
|
||||||
case 12. refrence: foo
|
case 12. reference: foo
|
||||||
case 13. collapsed link: foo
|
case 13. collapsed link: foo
|
||||||
case 14. shortcut link: foo
|
case 14. shortcut link: foo
|
||||||
case 15. inline without URL: foo
|
case 15. inline without URL: foo
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
//! or `ast::NameRef`. If it's a `ast::NameRef`, at the classification step we
|
//! or `ast::NameRef`. If it's a `ast::NameRef`, at the classification step we
|
||||||
//! try to resolve the direct tree parent of this element, otherwise we
|
//! try to resolve the direct tree parent of this element, otherwise we
|
||||||
//! already have a definition and just need to get its HIR together with
|
//! already have a definition and just need to get its HIR together with
|
||||||
//! some information that is needed for futher steps of searching.
|
//! some information that is needed for further steps of searching.
|
||||||
//! After that, we collect files that might contain references and look
|
//! After that, we collect files that might contain references and look
|
||||||
//! for text occurrences of the identifier. If there's an `ast::NameRef`
|
//! for text occurrences of the identifier. If there's an `ast::NameRef`
|
||||||
//! at the index that the match starts at and its tree parent is
|
//! at the index that the match starts at and its tree parent is
|
||||||
|
|
|
@ -945,7 +945,7 @@ use crate::foo$0::FooContent;
|
||||||
//- /lib.rs
|
//- /lib.rs
|
||||||
mod fo$0o;
|
mod fo$0o;
|
||||||
//- /foo/mod.rs
|
//- /foo/mod.rs
|
||||||
// emtpy
|
// empty
|
||||||
"#,
|
"#,
|
||||||
expect![[r#"
|
expect![[r#"
|
||||||
RangeInfo {
|
RangeInfo {
|
||||||
|
@ -995,7 +995,7 @@ mod fo$0o;
|
||||||
mod outer { mod fo$0o; }
|
mod outer { mod fo$0o; }
|
||||||
|
|
||||||
//- /outer/foo.rs
|
//- /outer/foo.rs
|
||||||
// emtpy
|
// empty
|
||||||
"#,
|
"#,
|
||||||
expect![[r#"
|
expect![[r#"
|
||||||
RangeInfo {
|
RangeInfo {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//! This module contains an import search funcionality that is provided to the assists module.
|
//! This module contains an import search functionality that is provided to the assists module.
|
||||||
//! Later, this should be moved away to a separate crate that is accessible from the assists module.
|
//! Later, this should be moved away to a separate crate that is accessible from the assists module.
|
||||||
|
|
||||||
use hir::{import_map, AsAssocItem, Crate, MacroDef, ModuleDef, Semantics};
|
use hir::{import_map, AsAssocItem, Crate, MacroDef, ModuleDef, Semantics};
|
||||||
|
|
|
@ -24,7 +24,7 @@ use crate::{
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub enum ParseError {
|
pub enum ParseError {
|
||||||
Expected(String),
|
Expected(String),
|
||||||
RepetitionEmtpyTokenTree,
|
RepetitionEmptyTokenTree,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
@ -270,7 +270,7 @@ fn validate(pattern: &MetaTemplate) -> Result<(), ParseError> {
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}) {
|
}) {
|
||||||
return Err(ParseError::RepetitionEmtpyTokenTree);
|
return Err(ParseError::RepetitionEmptyTokenTree);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
validate(subtree)?
|
validate(subtree)?
|
||||||
|
|
|
@ -378,7 +378,7 @@ pub(super) fn match_repeat(
|
||||||
src: &mut TtIter,
|
src: &mut TtIter,
|
||||||
) -> Result<(), ExpandError> {
|
) -> Result<(), ExpandError> {
|
||||||
// Dirty hack to make macro-expansion terminate.
|
// Dirty hack to make macro-expansion terminate.
|
||||||
// This should be replaced by a propper macro-by-example implementation
|
// This should be replaced by a proper macro-by-example implementation
|
||||||
let mut limit = 65536;
|
let mut limit = 65536;
|
||||||
let mut counter = 0;
|
let mut counter = 0;
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ struct NestingState {
|
||||||
/// because there is no variable in use by the current repetition
|
/// because there is no variable in use by the current repetition
|
||||||
hit: bool,
|
hit: bool,
|
||||||
/// `at_end` is currently necessary to tell `expand_repeat` if it should stop
|
/// `at_end` is currently necessary to tell `expand_repeat` if it should stop
|
||||||
/// because there is no more value avaible for the current repetition
|
/// because there is no more value available for the current repetition
|
||||||
at_end: bool,
|
at_end: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,11 +179,7 @@ fn expand_repeat(
|
||||||
|
|
||||||
counter += 1;
|
counter += 1;
|
||||||
if counter == limit {
|
if counter == limit {
|
||||||
log::warn!(
|
log::warn!("expand_tt in repeat pattern exceed limit => {:#?}\n{:#?}", template, ctx);
|
||||||
"expand_tt excced in repeat pattern exceed limit => {:#?}\n{:#?}",
|
|
||||||
template,
|
|
||||||
ctx
|
|
||||||
);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ impl TokenMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_delim(&mut self, idx: usize) {
|
fn remove_delim(&mut self, idx: usize) {
|
||||||
// FIXME: This could be accidently quadratic
|
// FIXME: This could be accidentally quadratic
|
||||||
self.entries.remove(idx);
|
self.entries.remove(idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -476,14 +476,14 @@ impl Convertor {
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
enum SynToken {
|
enum SynToken {
|
||||||
Ordiniary(SyntaxToken),
|
Ordinary(SyntaxToken),
|
||||||
Punch(SyntaxToken, TextSize),
|
Punch(SyntaxToken, TextSize),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SynToken {
|
impl SynToken {
|
||||||
fn token(&self) -> &SyntaxToken {
|
fn token(&self) -> &SyntaxToken {
|
||||||
match self {
|
match self {
|
||||||
SynToken::Ordiniary(it) => it,
|
SynToken::Ordinary(it) => it,
|
||||||
SynToken::Punch(it, _) => it,
|
SynToken::Punch(it, _) => it,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -495,7 +495,7 @@ impl SrcToken for SynToken {
|
||||||
}
|
}
|
||||||
fn to_char(&self) -> Option<char> {
|
fn to_char(&self) -> Option<char> {
|
||||||
match self {
|
match self {
|
||||||
SynToken::Ordiniary(_) => None,
|
SynToken::Ordinary(_) => None,
|
||||||
SynToken::Punch(it, i) => it.text().chars().nth((*i).into()),
|
SynToken::Punch(it, i) => it.text().chars().nth((*i).into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -535,7 +535,7 @@ impl TokenConvertor for Convertor {
|
||||||
} else {
|
} else {
|
||||||
self.punct_offset = None;
|
self.punct_offset = None;
|
||||||
let range = curr.text_range();
|
let range = curr.text_range();
|
||||||
(SynToken::Ordiniary(curr), range)
|
(SynToken::Ordinary(curr), range)
|
||||||
};
|
};
|
||||||
|
|
||||||
Some(token)
|
Some(token)
|
||||||
|
@ -557,7 +557,7 @@ impl TokenConvertor for Convertor {
|
||||||
let token = if curr.kind().is_punct() {
|
let token = if curr.kind().is_punct() {
|
||||||
SynToken::Punch(curr, 0.into())
|
SynToken::Punch(curr, 0.into())
|
||||||
} else {
|
} else {
|
||||||
SynToken::Ordiniary(curr)
|
SynToken::Ordinary(curr)
|
||||||
};
|
};
|
||||||
Some(token)
|
Some(token)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1967,7 +1967,7 @@ fn test_no_space_after_semi_colon() {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rustc_issue_57597() {
|
fn test_rustc_issue_57597() {
|
||||||
fn test_error(fixture: &str) {
|
fn test_error(fixture: &str) {
|
||||||
assert_eq!(parse_macro_error(fixture), ParseError::RepetitionEmtpyTokenTree);
|
assert_eq!(parse_macro_error(fixture), ParseError::RepetitionEmptyTokenTree);
|
||||||
}
|
}
|
||||||
|
|
||||||
test_error("macro_rules! foo { ($($($i:ident)?)+) => {}; }");
|
test_error("macro_rules! foo { ($($($i:ident)?)+) => {}; }");
|
||||||
|
|
|
@ -46,7 +46,7 @@ fn use_tree(p: &mut Parser, top_level: bool) {
|
||||||
// test use_tree_list
|
// test use_tree_list
|
||||||
// use {crate::path::from::root, or::path::from::crate_name}; // Rust 2018 (with a crate named `or`)
|
// use {crate::path::from::root, or::path::from::crate_name}; // Rust 2018 (with a crate named `or`)
|
||||||
// use {path::from::root}; // Rust 2015
|
// use {path::from::root}; // Rust 2015
|
||||||
// use ::{some::arbritrary::path}; // Rust 2015
|
// use ::{some::arbitrary::path}; // Rust 2015
|
||||||
// use ::{{{root::export}}}; // Nonsensical but perfectly legal nesting
|
// use ::{{{root::export}}}; // Nonsensical but perfectly legal nesting
|
||||||
T!['{'] => {
|
T!['{'] => {
|
||||||
use_tree_list(p);
|
use_tree_list(p);
|
||||||
|
|
|
@ -79,7 +79,7 @@ impl Message for Response {}
|
||||||
fn read_json(inp: &mut impl BufRead) -> io::Result<Option<String>> {
|
fn read_json(inp: &mut impl BufRead) -> io::Result<Option<String>> {
|
||||||
let mut buf = String::new();
|
let mut buf = String::new();
|
||||||
inp.read_line(&mut buf)?;
|
inp.read_line(&mut buf)?;
|
||||||
buf.pop(); // Remove traling '\n'
|
buf.pop(); // Remove trailing '\n'
|
||||||
Ok(match buf.len() {
|
Ok(match buf.len() {
|
||||||
0 => None,
|
0 => None,
|
||||||
_ => Some(buf),
|
_ => Some(buf),
|
||||||
|
|
|
@ -251,7 +251,7 @@ impl<S> DecodeMut<'_, '_, S> for String {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Simplied version of panic payloads, ignoring
|
/// Simplified version of panic payloads, ignoring
|
||||||
/// types other than `&'static str` and `String`.
|
/// types other than `&'static str` and `String`.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum PanicMessage {
|
pub enum PanicMessage {
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
//! The lib-proc-macro server backend is `TokenStream`-agnostic, such that
|
//! The lib-proc-macro server backend is `TokenStream`-agnostic, such that
|
||||||
//! we could provide any TokenStream implementation.
|
//! we could provide any TokenStream implementation.
|
||||||
//! The original idea from fedochet is using proc-macro2 as backend,
|
//! The original idea from fedochet is using proc-macro2 as backend,
|
||||||
//! we use tt instead for better intergation with RA.
|
//! we use tt instead for better integration with RA.
|
||||||
//!
|
//!
|
||||||
//! FIXME: No span and source file information is implemented yet
|
//! FIXME: No span and source file information is implemented yet
|
||||||
|
|
||||||
|
|
|
@ -130,7 +130,7 @@ pub(crate) fn apply_document_changes(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks that the edits inside the completion and the additional edits do not overlap.
|
/// Checks that the edits inside the completion and the additional edits do not overlap.
|
||||||
/// LSP explicitly forbits the additional edits to overlap both with the main edit and themselves.
|
/// LSP explicitly forbids the additional edits to overlap both with the main edit and themselves.
|
||||||
pub(crate) fn all_edits_are_disjoint(
|
pub(crate) fn all_edits_are_disjoint(
|
||||||
completion: &lsp_types::CompletionItem,
|
completion: &lsp_types::CompletionItem,
|
||||||
additional_edits: &[lsp_types::TextEdit],
|
additional_edits: &[lsp_types::TextEdit],
|
||||||
|
@ -290,7 +290,7 @@ mod tests {
|
||||||
Some(vec![disjoint_edit.clone(), joint_edit.clone()]);
|
Some(vec![disjoint_edit.clone(), joint_edit.clone()]);
|
||||||
assert!(
|
assert!(
|
||||||
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
||||||
"Completion with disjoint edits fails the validaton even with empty extra edits"
|
"Completion with disjoint edits fails the validation even with empty extra edits"
|
||||||
);
|
);
|
||||||
|
|
||||||
completion_with_joint_edits.text_edit =
|
completion_with_joint_edits.text_edit =
|
||||||
|
@ -298,7 +298,7 @@ mod tests {
|
||||||
completion_with_joint_edits.additional_text_edits = Some(vec![joint_edit.clone()]);
|
completion_with_joint_edits.additional_text_edits = Some(vec![joint_edit.clone()]);
|
||||||
assert!(
|
assert!(
|
||||||
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
||||||
"Completion with disjoint edits fails the validaton even with empty extra edits"
|
"Completion with disjoint edits fails the validation even with empty extra edits"
|
||||||
);
|
);
|
||||||
|
|
||||||
completion_with_joint_edits.text_edit =
|
completion_with_joint_edits.text_edit =
|
||||||
|
@ -310,7 +310,7 @@ mod tests {
|
||||||
completion_with_joint_edits.additional_text_edits = None;
|
completion_with_joint_edits.additional_text_edits = None;
|
||||||
assert!(
|
assert!(
|
||||||
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
||||||
"Completion with disjoint edits fails the validaton even with empty extra edits"
|
"Completion with disjoint edits fails the validation even with empty extra edits"
|
||||||
);
|
);
|
||||||
|
|
||||||
completion_with_joint_edits.text_edit =
|
completion_with_joint_edits.text_edit =
|
||||||
|
@ -322,7 +322,7 @@ mod tests {
|
||||||
completion_with_joint_edits.additional_text_edits = Some(vec![joint_edit]);
|
completion_with_joint_edits.additional_text_edits = Some(vec![joint_edit]);
|
||||||
assert!(
|
assert!(
|
||||||
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
!all_edits_are_disjoint(&completion_with_joint_edits, &[]),
|
||||||
"Completion with disjoint edits fails the validaton even with empty extra edits"
|
"Completion with disjoint edits fails the validation even with empty extra edits"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_format_docs_preserves_newlines() {
|
fn test_format_docs_preserves_newlines() {
|
||||||
let comment = "this\nis\nultiline";
|
let comment = "this\nis\nmultiline";
|
||||||
assert_eq!(format_docs(comment), comment);
|
assert_eq!(format_docs(comment), comment);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -88,8 +88,8 @@ pub fn least_common_ancestor(u: &SyntaxNode, v: &SyntaxNode) -> Option<SyntaxNod
|
||||||
let keep = u_depth.min(v_depth);
|
let keep = u_depth.min(v_depth);
|
||||||
|
|
||||||
let u_candidates = u.ancestors().skip(u_depth - keep);
|
let u_candidates = u.ancestors().skip(u_depth - keep);
|
||||||
let v_canidates = v.ancestors().skip(v_depth - keep);
|
let v_candidates = v.ancestors().skip(v_depth - keep);
|
||||||
let (res, _) = u_candidates.zip(v_canidates).find(|(x, y)| x == y)?;
|
let (res, _) = u_candidates.zip(v_candidates).find(|(x, y)| x == y)?;
|
||||||
Some(res)
|
Some(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -241,7 +241,7 @@ pub fn wildcard_pat() -> ast::WildcardPat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a tuple of patterns from an interator of patterns.
|
/// Creates a tuple of patterns from an iterator of patterns.
|
||||||
///
|
///
|
||||||
/// Invariant: `pats` must be length > 1
|
/// Invariant: `pats` must be length > 1
|
||||||
///
|
///
|
||||||
|
|
|
@ -24,7 +24,7 @@ pub struct Token {
|
||||||
/// Beware that it checks for shebang first and its length contributes to resulting
|
/// Beware that it checks for shebang first and its length contributes to resulting
|
||||||
/// tokens offsets.
|
/// tokens offsets.
|
||||||
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
||||||
// non-empty string is a precondtion of `rustc_lexer::strip_shebang()`.
|
// non-empty string is a precondition of `rustc_lexer::strip_shebang()`.
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return Default::default();
|
return Default::default();
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
|
/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
|
||||||
/// returns `None` if any tokenization error occured.
|
/// returns `None` if any tokenization error occurred.
|
||||||
///
|
///
|
||||||
/// Beware that unescape errors are not checked at tokenization time.
|
/// Beware that unescape errors are not checked at tokenization time.
|
||||||
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
||||||
|
@ -96,7 +96,7 @@ pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
||||||
///
|
///
|
||||||
/// Beware that unescape errors are not checked at tokenization time.
|
/// Beware that unescape errors are not checked at tokenization time.
|
||||||
fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
|
fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
|
||||||
// non-empty string is a precondtion of `rustc_lexer::first_token()`.
|
// non-empty string is a precondition of `rustc_lexer::first_token()`.
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ fn rustc_token_kind_to_syntax_kind(
|
||||||
token_text: &str,
|
token_text: &str,
|
||||||
) -> (SyntaxKind, Option<&'static str>) {
|
) -> (SyntaxKind, Option<&'static str>) {
|
||||||
// A note on an intended tradeoff:
|
// A note on an intended tradeoff:
|
||||||
// We drop some useful infromation here (see patterns with double dots `..`)
|
// We drop some useful information here (see patterns with double dots `..`)
|
||||||
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of
|
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of
|
||||||
// being `u16` that come from `rowan::SyntaxKind`.
|
// being `u16` that come from `rowan::SyntaxKind`.
|
||||||
|
|
||||||
|
|
|
@ -173,7 +173,7 @@ pub(crate) fn validate_block_structure(root: &SyntaxNode) {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
node.parent(),
|
node.parent(),
|
||||||
pair.parent(),
|
pair.parent(),
|
||||||
"\nunpaired curleys:\n{}\n{:#?}\n",
|
"\nunpaired curlys:\n{}\n{:#?}\n",
|
||||||
root.text(),
|
root.text(),
|
||||||
root,
|
root,
|
||||||
);
|
);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
SOURCE_FILE@0..249
|
SOURCE_FILE@0..248
|
||||||
USE@0..58
|
USE@0..58
|
||||||
USE_KW@0..3 "use"
|
USE_KW@0..3 "use"
|
||||||
WHITESPACE@3..4 " "
|
WHITESPACE@3..4 " "
|
||||||
|
@ -75,62 +75,62 @@ SOURCE_FILE@0..249
|
||||||
R_CURLY@119..120 "}"
|
R_CURLY@119..120 "}"
|
||||||
SEMICOLON@120..121 ";"
|
SEMICOLON@120..121 ";"
|
||||||
WHITESPACE@121..122 " "
|
WHITESPACE@121..122 " "
|
||||||
USE@122..166
|
USE@122..165
|
||||||
COMMENT@122..134 "// Rust 2015"
|
COMMENT@122..134 "// Rust 2015"
|
||||||
WHITESPACE@134..135 "\n"
|
WHITESPACE@134..135 "\n"
|
||||||
USE_KW@135..138 "use"
|
USE_KW@135..138 "use"
|
||||||
WHITESPACE@138..139 " "
|
WHITESPACE@138..139 " "
|
||||||
USE_TREE@139..165
|
USE_TREE@139..164
|
||||||
COLON2@139..141 "::"
|
COLON2@139..141 "::"
|
||||||
USE_TREE_LIST@141..165
|
USE_TREE_LIST@141..164
|
||||||
L_CURLY@141..142 "{"
|
L_CURLY@141..142 "{"
|
||||||
USE_TREE@142..164
|
USE_TREE@142..163
|
||||||
PATH@142..164
|
PATH@142..163
|
||||||
PATH@142..158
|
PATH@142..157
|
||||||
PATH@142..146
|
PATH@142..146
|
||||||
PATH_SEGMENT@142..146
|
PATH_SEGMENT@142..146
|
||||||
NAME_REF@142..146
|
NAME_REF@142..146
|
||||||
IDENT@142..146 "some"
|
IDENT@142..146 "some"
|
||||||
COLON2@146..148 "::"
|
COLON2@146..148 "::"
|
||||||
PATH_SEGMENT@148..158
|
PATH_SEGMENT@148..157
|
||||||
NAME_REF@148..158
|
NAME_REF@148..157
|
||||||
IDENT@148..158 "arbritrary"
|
IDENT@148..157 "arbitrary"
|
||||||
COLON2@158..160 "::"
|
COLON2@157..159 "::"
|
||||||
PATH_SEGMENT@160..164
|
PATH_SEGMENT@159..163
|
||||||
NAME_REF@160..164
|
NAME_REF@159..163
|
||||||
IDENT@160..164 "path"
|
IDENT@159..163 "path"
|
||||||
R_CURLY@164..165 "}"
|
R_CURLY@163..164 "}"
|
||||||
SEMICOLON@165..166 ";"
|
SEMICOLON@164..165 ";"
|
||||||
WHITESPACE@166..167 " "
|
WHITESPACE@165..166 " "
|
||||||
USE@167..205
|
USE@166..204
|
||||||
COMMENT@167..179 "// Rust 2015"
|
COMMENT@166..178 "// Rust 2015"
|
||||||
WHITESPACE@179..180 "\n"
|
WHITESPACE@178..179 "\n"
|
||||||
USE_KW@180..183 "use"
|
USE_KW@179..182 "use"
|
||||||
WHITESPACE@183..184 " "
|
WHITESPACE@182..183 " "
|
||||||
USE_TREE@184..204
|
USE_TREE@183..203
|
||||||
COLON2@184..186 "::"
|
COLON2@183..185 "::"
|
||||||
USE_TREE_LIST@186..204
|
USE_TREE_LIST@185..203
|
||||||
L_CURLY@186..187 "{"
|
L_CURLY@185..186 "{"
|
||||||
USE_TREE@187..203
|
USE_TREE@186..202
|
||||||
USE_TREE_LIST@187..203
|
USE_TREE_LIST@186..202
|
||||||
L_CURLY@187..188 "{"
|
L_CURLY@186..187 "{"
|
||||||
USE_TREE@188..202
|
USE_TREE@187..201
|
||||||
USE_TREE_LIST@188..202
|
USE_TREE_LIST@187..201
|
||||||
L_CURLY@188..189 "{"
|
L_CURLY@187..188 "{"
|
||||||
USE_TREE@189..201
|
USE_TREE@188..200
|
||||||
PATH@189..201
|
PATH@188..200
|
||||||
PATH@189..193
|
PATH@188..192
|
||||||
PATH_SEGMENT@189..193
|
PATH_SEGMENT@188..192
|
||||||
NAME_REF@189..193
|
NAME_REF@188..192
|
||||||
IDENT@189..193 "root"
|
IDENT@188..192 "root"
|
||||||
COLON2@193..195 "::"
|
COLON2@192..194 "::"
|
||||||
PATH_SEGMENT@195..201
|
PATH_SEGMENT@194..200
|
||||||
NAME_REF@195..201
|
NAME_REF@194..200
|
||||||
IDENT@195..201 "export"
|
IDENT@194..200 "export"
|
||||||
R_CURLY@201..202 "}"
|
R_CURLY@200..201 "}"
|
||||||
R_CURLY@202..203 "}"
|
R_CURLY@201..202 "}"
|
||||||
R_CURLY@203..204 "}"
|
R_CURLY@202..203 "}"
|
||||||
SEMICOLON@204..205 ";"
|
SEMICOLON@203..204 ";"
|
||||||
WHITESPACE@205..206 " "
|
WHITESPACE@204..205 " "
|
||||||
COMMENT@206..248 "// Nonsensical but pe ..."
|
COMMENT@205..247 "// Nonsensical but pe ..."
|
||||||
WHITESPACE@248..249 "\n"
|
WHITESPACE@247..248 "\n"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use {crate::path::from::root, or::path::from::crate_name}; // Rust 2018 (with a crate named `or`)
|
use {crate::path::from::root, or::path::from::crate_name}; // Rust 2018 (with a crate named `or`)
|
||||||
use {path::from::root}; // Rust 2015
|
use {path::from::root}; // Rust 2015
|
||||||
use ::{some::arbritrary::path}; // Rust 2015
|
use ::{some::arbitrary::path}; // Rust 2015
|
||||||
use ::{{{root::export}}}; // Nonsensical but perfectly legal nesting
|
use ::{{{root::export}}}; // Nonsensical but perfectly legal nesting
|
||||||
|
|
|
@ -63,7 +63,7 @@ pub fn extract_offset(text: &str) -> (TextSize, String) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the offset of the first occurence of `$0` marker and the copy of `text`
|
/// Returns the offset of the first occurrence of `$0` marker and the copy of `text`
|
||||||
/// without the marker.
|
/// without the marker.
|
||||||
fn try_extract_offset(text: &str) -> Option<(TextSize, String)> {
|
fn try_extract_offset(text: &str) -> Option<(TextSize, String)> {
|
||||||
let cursor_pos = text.find(CURSOR_MARKER)?;
|
let cursor_pos = text.find(CURSOR_MARKER)?;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue