mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-11-01 04:18:20 +00:00
Merge from rust-lang/rust
This commit is contained in:
commit
f373437c22
10 changed files with 50 additions and 39 deletions
|
|
@ -11,8 +11,8 @@
|
|||
use std::ops;
|
||||
|
||||
use rustc_literal_escaper::{
|
||||
EscapeError, Mode, unescape_byte, unescape_byte_str, unescape_c_str, unescape_char,
|
||||
unescape_str,
|
||||
unescape_byte, unescape_byte_str, unescape_c_str, unescape_char, unescape_str, EscapeError,
|
||||
Mode,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
|
@ -44,7 +44,9 @@ impl<'a> LexedStr<'a> {
|
|||
|
||||
// Re-create the tokenizer from scratch every token because `GuardedStrPrefix` is one token in the lexer
|
||||
// but we want to split it to two in edition <2024.
|
||||
while let Some(token) = rustc_lexer::tokenize(&text[conv.offset..]).next() {
|
||||
while let Some(token) =
|
||||
rustc_lexer::tokenize(&text[conv.offset..], rustc_lexer::FrontmatterAllowed::No).next()
|
||||
{
|
||||
let token_text = &text[conv.offset..][..token.len as usize];
|
||||
|
||||
conv.extend_token(&token.kind, token_text);
|
||||
|
|
@ -58,7 +60,7 @@ impl<'a> LexedStr<'a> {
|
|||
return None;
|
||||
}
|
||||
|
||||
let token = rustc_lexer::tokenize(text).next()?;
|
||||
let token = rustc_lexer::tokenize(text, rustc_lexer::FrontmatterAllowed::No).next()?;
|
||||
if token.len as usize != text.len() {
|
||||
return None;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue