mirror of
				https://github.com/rust-lang/rust-analyzer.git
				synced 2025-10-31 03:54:42 +00:00 
			
		
		
		
	feat(lexer): Allow including frontmatter with 'tokenize'
This commit is contained in:
		
							parent
							
								
									3a5e13a8b3
								
							
						
					
					
						commit
						f4d9018a48
					
				
					 3 changed files with 8 additions and 6 deletions
				
			
		|  | @ -11,8 +11,8 @@ | |||
| use std::ops; | ||||
| 
 | ||||
| use rustc_literal_escaper::{ | ||||
|     EscapeError, Mode, unescape_byte, unescape_byte_str, unescape_c_str, unescape_char, | ||||
|     unescape_str, | ||||
|     unescape_byte, unescape_byte_str, unescape_c_str, unescape_char, unescape_str, EscapeError, | ||||
|     Mode, | ||||
| }; | ||||
| 
 | ||||
| use crate::{ | ||||
|  | @ -44,7 +44,9 @@ impl<'a> LexedStr<'a> { | |||
| 
 | ||||
|         // Re-create the tokenizer from scratch every token because `GuardedStrPrefix` is one token in the lexer
 | ||||
|         // but we want to split it to two in edition <2024.
 | ||||
|         while let Some(token) = rustc_lexer::tokenize(&text[conv.offset..]).next() { | ||||
|         while let Some(token) = | ||||
|             rustc_lexer::tokenize(&text[conv.offset..], rustc_lexer::FrontmatterAllowed::No).next() | ||||
|         { | ||||
|             let token_text = &text[conv.offset..][..token.len as usize]; | ||||
| 
 | ||||
|             conv.extend_token(&token.kind, token_text); | ||||
|  | @ -58,7 +60,7 @@ impl<'a> LexedStr<'a> { | |||
|             return None; | ||||
|         } | ||||
| 
 | ||||
|         let token = rustc_lexer::tokenize(text).next()?; | ||||
|         let token = rustc_lexer::tokenize(text, rustc_lexer::FrontmatterAllowed::No).next()?; | ||||
|         if token.len as usize != text.len() { | ||||
|             return None; | ||||
|         } | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Ed Page
						Ed Page