Flatten rustpython_parser interface

This commit is contained in:
Jeong YunWon 2023-02-22 17:14:20 +09:00
parent 8580e4ebb5
commit cb8c6fb78d
7 changed files with 168 additions and 177 deletions

View file

@ -7,10 +7,9 @@ use crate::{
ast, ast,
lexer::{LexicalError, LexicalErrorType}, lexer::{LexicalError, LexicalErrorType},
function::{ArgumentList, parse_args, parse_params, validate_arguments}, function::{ArgumentList, parse_args, parse_params, validate_arguments},
lexer,
context::set_context, context::set_context,
string::parse_strings, string::parse_strings,
token::StringKind, token::{self, StringKind},
}; };
use num_bigint::BigInt; use num_bigint::BigInt;
@ -1937,106 +1936,106 @@ extern {
type Location = ast::Location; type Location = ast::Location;
type Error = LexicalError; type Error = LexicalError;
enum lexer::Tok { enum token::Tok {
Indent => lexer::Tok::Indent, Indent => token::Tok::Indent,
Dedent => lexer::Tok::Dedent, Dedent => token::Tok::Dedent,
StartModule => lexer::Tok::StartModule, StartModule => token::Tok::StartModule,
StartInteractive => lexer::Tok::StartInteractive, StartInteractive => token::Tok::StartInteractive,
StartExpression => lexer::Tok::StartExpression, StartExpression => token::Tok::StartExpression,
"+" => lexer::Tok::Plus, "+" => token::Tok::Plus,
"-" => lexer::Tok::Minus, "-" => token::Tok::Minus,
"~" => lexer::Tok::Tilde, "~" => token::Tok::Tilde,
":" => lexer::Tok::Colon, ":" => token::Tok::Colon,
"." => lexer::Tok::Dot, "." => token::Tok::Dot,
"..." => lexer::Tok::Ellipsis, "..." => token::Tok::Ellipsis,
"," => lexer::Tok::Comma, "," => token::Tok::Comma,
"*" => lexer::Tok::Star, "*" => token::Tok::Star,
"**" => lexer::Tok::DoubleStar, "**" => token::Tok::DoubleStar,
"&" => lexer::Tok::Amper, "&" => token::Tok::Amper,
"@" => lexer::Tok::At, "@" => token::Tok::At,
"%" => lexer::Tok::Percent, "%" => token::Tok::Percent,
"//" => lexer::Tok::DoubleSlash, "//" => token::Tok::DoubleSlash,
"^" => lexer::Tok::CircumFlex, "^" => token::Tok::CircumFlex,
"|" => lexer::Tok::Vbar, "|" => token::Tok::Vbar,
"<<" => lexer::Tok::LeftShift, "<<" => token::Tok::LeftShift,
">>" => lexer::Tok::RightShift, ">>" => token::Tok::RightShift,
"/" => lexer::Tok::Slash, "/" => token::Tok::Slash,
"(" => lexer::Tok::Lpar, "(" => token::Tok::Lpar,
")" => lexer::Tok::Rpar, ")" => token::Tok::Rpar,
"[" => lexer::Tok::Lsqb, "[" => token::Tok::Lsqb,
"]" => lexer::Tok::Rsqb, "]" => token::Tok::Rsqb,
"{" => lexer::Tok::Lbrace, "{" => token::Tok::Lbrace,
"}" => lexer::Tok::Rbrace, "}" => token::Tok::Rbrace,
"=" => lexer::Tok::Equal, "=" => token::Tok::Equal,
"+=" => lexer::Tok::PlusEqual, "+=" => token::Tok::PlusEqual,
"-=" => lexer::Tok::MinusEqual, "-=" => token::Tok::MinusEqual,
"*=" => lexer::Tok::StarEqual, "*=" => token::Tok::StarEqual,
"@=" => lexer::Tok::AtEqual, "@=" => token::Tok::AtEqual,
"/=" => lexer::Tok::SlashEqual, "/=" => token::Tok::SlashEqual,
"%=" => lexer::Tok::PercentEqual, "%=" => token::Tok::PercentEqual,
"&=" => lexer::Tok::AmperEqual, "&=" => token::Tok::AmperEqual,
"|=" => lexer::Tok::VbarEqual, "|=" => token::Tok::VbarEqual,
"^=" => lexer::Tok::CircumflexEqual, "^=" => token::Tok::CircumflexEqual,
"<<=" => lexer::Tok::LeftShiftEqual, "<<=" => token::Tok::LeftShiftEqual,
">>=" => lexer::Tok::RightShiftEqual, ">>=" => token::Tok::RightShiftEqual,
"**=" => lexer::Tok::DoubleStarEqual, "**=" => token::Tok::DoubleStarEqual,
"//=" => lexer::Tok::DoubleSlashEqual, "//=" => token::Tok::DoubleSlashEqual,
":=" => lexer::Tok::ColonEqual, ":=" => token::Tok::ColonEqual,
"==" => lexer::Tok::EqEqual, "==" => token::Tok::EqEqual,
"!=" => lexer::Tok::NotEqual, "!=" => token::Tok::NotEqual,
"<" => lexer::Tok::Less, "<" => token::Tok::Less,
"<=" => lexer::Tok::LessEqual, "<=" => token::Tok::LessEqual,
">" => lexer::Tok::Greater, ">" => token::Tok::Greater,
">=" => lexer::Tok::GreaterEqual, ">=" => token::Tok::GreaterEqual,
"->" => lexer::Tok::Rarrow, "->" => token::Tok::Rarrow,
"and" => lexer::Tok::And, "and" => token::Tok::And,
"as" => lexer::Tok::As, "as" => token::Tok::As,
"assert" => lexer::Tok::Assert, "assert" => token::Tok::Assert,
"async" => lexer::Tok::Async, "async" => token::Tok::Async,
"await" => lexer::Tok::Await, "await" => token::Tok::Await,
"break" => lexer::Tok::Break, "break" => token::Tok::Break,
"class" => lexer::Tok::Class, "class" => token::Tok::Class,
"continue" => lexer::Tok::Continue, "continue" => token::Tok::Continue,
"def" => lexer::Tok::Def, "def" => token::Tok::Def,
"del" => lexer::Tok::Del, "del" => token::Tok::Del,
"elif" => lexer::Tok::Elif, "elif" => token::Tok::Elif,
"else" => lexer::Tok::Else, "else" => token::Tok::Else,
"except" => lexer::Tok::Except, "except" => token::Tok::Except,
"finally" => lexer::Tok::Finally, "finally" => token::Tok::Finally,
"for" => lexer::Tok::For, "for" => token::Tok::For,
"from" => lexer::Tok::From, "from" => token::Tok::From,
"global" => lexer::Tok::Global, "global" => token::Tok::Global,
"if" => lexer::Tok::If, "if" => token::Tok::If,
"import" => lexer::Tok::Import, "import" => token::Tok::Import,
"in" => lexer::Tok::In, "in" => token::Tok::In,
"is" => lexer::Tok::Is, "is" => token::Tok::Is,
"lambda" => lexer::Tok::Lambda, "lambda" => token::Tok::Lambda,
"nonlocal" => lexer::Tok::Nonlocal, "nonlocal" => token::Tok::Nonlocal,
"not" => lexer::Tok::Not, "not" => token::Tok::Not,
"or" => lexer::Tok::Or, "or" => token::Tok::Or,
"pass" => lexer::Tok::Pass, "pass" => token::Tok::Pass,
"raise" => lexer::Tok::Raise, "raise" => token::Tok::Raise,
"return" => lexer::Tok::Return, "return" => token::Tok::Return,
"try" => lexer::Tok::Try, "try" => token::Tok::Try,
"while" => lexer::Tok::While, "while" => token::Tok::While,
"match" => lexer::Tok::Match, "match" => token::Tok::Match,
"case" => lexer::Tok::Case, "case" => token::Tok::Case,
"with" => lexer::Tok::With, "with" => token::Tok::With,
"yield" => lexer::Tok::Yield, "yield" => token::Tok::Yield,
"True" => lexer::Tok::True, "True" => token::Tok::True,
"False" => lexer::Tok::False, "False" => token::Tok::False,
"None" => lexer::Tok::None, "None" => token::Tok::None,
int => lexer::Tok::Int { value: <BigInt> }, int => token::Tok::Int { value: <BigInt> },
float => lexer::Tok::Float { value: <f64> }, float => token::Tok::Float { value: <f64> },
complex => lexer::Tok::Complex { real: <f64>, imag: <f64> }, complex => token::Tok::Complex { real: <f64>, imag: <f64> },
string => lexer::Tok::String { string => token::Tok::String {
value: <String>, value: <String>,
kind: <StringKind>, kind: <StringKind>,
triple_quoted: <bool> triple_quoted: <bool>
}, },
name => lexer::Tok::Name { name: <String> }, name => token::Tok::Name { name: <String> },
"\n" => lexer::Tok::Newline, "\n" => token::Tok::Newline,
";" => lexer::Tok::Semi, ";" => token::Tok::Semi,
"#" => lexer::Tok::Comment(_), "#" => token::Tok::Comment(_),
} }
} }

View file

@ -1,7 +1,9 @@
// Contains functions that perform validation and parsing of arguments and parameters. // Contains functions that perform validation and parsing of arguments and parameters.
// Checks apply both to functions and to lambdas. // Checks apply both to functions and to lambdas.
use crate::ast; use crate::{
use crate::lexer::{LexicalError, LexicalErrorType}; ast,
lexer::{LexicalError, LexicalErrorType},
};
use rustc_hash::FxHashSet; use rustc_hash::FxHashSet;
pub(crate) struct ArgumentList { pub(crate) struct ArgumentList {

View file

@ -12,9 +12,7 @@
//! # Example //! # Example
//! //!
//! ``` //! ```
//! use rustpython_parser::lexer::{lex, Tok}; //! use rustpython_parser::{lexer::lex, Tok, Mode, StringKind};
//! use rustpython_parser::mode::Mode;
//! use rustpython_parser::token::StringKind;
//! //!
//! let source = "x = 'RustPython'"; //! let source = "x = 'RustPython'";
//! let tokens = lex(source, Mode::Module) //! let tokens = lex(source, Mode::Module)
@ -33,19 +31,16 @@
//! ``` //! ```
//! //!
//! [Lexical analysis]: https://docs.python.org/3/reference/lexical_analysis.html //! [Lexical analysis]: https://docs.python.org/3/reference/lexical_analysis.html
pub use super::token::{StringKind, Tok}; use crate::{
use crate::ast::Location; ast::Location,
use crate::mode::Mode; mode::Mode,
use crate::soft_keywords::SoftKeywordTransformer; soft_keywords::SoftKeywordTransformer,
use crate::string::FStringErrorType; string::FStringErrorType,
token::{StringKind, Tok},
};
use num_bigint::BigInt; use num_bigint::BigInt;
use num_traits::identities::Zero; use num_traits::{Num, Zero};
use num_traits::Num; use std::{char, cmp::Ordering, ops::Index, slice::SliceIndex, str::FromStr};
use std::char;
use std::cmp::Ordering;
use std::ops::Index;
use std::slice::SliceIndex;
use std::str::FromStr;
use unic_emoji_char::is_emoji_presentation; use unic_emoji_char::is_emoji_presentation;
use unic_ucd_ident::{is_xid_continue, is_xid_start}; use unic_ucd_ident::{is_xid_continue, is_xid_start};
@ -200,8 +195,7 @@ pub type LexResult = Result<Spanned, LexicalError>;
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// use rustpython_parser::mode::Mode; /// use rustpython_parser::{Mode, lexer::lex};
/// use rustpython_parser::lexer::{lex};
/// ///
/// let source = "def hello(): return 'world'"; /// let source = "def hello(): return 'world'";
/// let lexer = lex(source, Mode::Module); /// let lexer = lex(source, Mode::Module);
@ -1320,8 +1314,7 @@ impl std::fmt::Display for LexicalErrorType {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{lex, StringKind, Tok}; use super::*;
use crate::mode::Mode;
use num_bigint::BigInt; use num_bigint::BigInt;
const WINDOWS_EOL: &str = "\r\n"; const WINDOWS_EOL: &str = "\r\n";

View file

@ -54,20 +54,18 @@
//! //!
//! The functionality of this crate is split into several modules: //! The functionality of this crate is split into several modules:
//! //!
//! - [token]: This module contains the definition of the tokens that are generated by the lexer. //! - token: This module contains the definition of the tokens that are generated by the lexer.
//! - [lexer]: This module contains the lexer and is responsible for generating the tokens. //! - [lexer]: This module contains the lexer and is responsible for generating the tokens.
//! - [parser]: This module contains an interface to the parser and is responsible for generating the AST. //! - parser: This module contains an interface to the parser and is responsible for generating the AST.
//! - Functions and strings have special parsing requirements that are handled in additional files. //! - Functions and strings have special parsing requirements that are handled in additional files.
//! - [mode]: This module contains the definition of the different modes that the parser can be in. //! - mode: This module contains the definition of the different modes that the parser can be in.
//! - [error]: This module contains the definition of the errors that can be returned by the parser.
//! //!
//! # Examples //! # Examples
//! //!
//! For example, to get a stream of tokens from a given string, one could do this: //! For example, to get a stream of tokens from a given string, one could do this:
//! //!
//! ``` //! ```
//! use rustpython_parser::mode::Mode; //! use rustpython_parser::{lexer::lex, Mode};
//! use rustpython_parser::lexer::lex;
//! //!
//! let python_source = r#" //! let python_source = r#"
//! def is_odd(i): //! def is_odd(i):
@ -80,9 +78,7 @@
//! These tokens can be directly fed into the parser to generate an AST: //! These tokens can be directly fed into the parser to generate an AST:
//! //!
//! ``` //! ```
//! use rustpython_parser::lexer::lex; //! use rustpython_parser::{lexer::lex, Mode, parse_tokens};
//! use rustpython_parser::mode::Mode;
//! use rustpython_parser::parser::parse_tokens;
//! //!
//! let python_source = r#" //! let python_source = r#"
//! def is_odd(i): //! def is_odd(i):
@ -98,7 +94,7 @@
//! mode or tokenizing the source beforehand: //! mode or tokenizing the source beforehand:
//! //!
//! ``` //! ```
//! use rustpython_parser::parser::parse_program; //! use rustpython_parser::parse_program;
//! //!
//! let python_source = r#" //! let python_source = r#"
//! def is_odd(i): //! def is_odd(i):
@ -111,11 +107,7 @@
//! //!
//! [lexical analysis]: https://en.wikipedia.org/wiki/Lexical_analysis //! [lexical analysis]: https://en.wikipedia.org/wiki/Lexical_analysis
//! [parsing]: https://en.wikipedia.org/wiki/Parsing //! [parsing]: https://en.wikipedia.org/wiki/Parsing
//! [token]: crate::token
//! [lexer]: crate::lexer //! [lexer]: crate::lexer
//! [parser]: crate::parser
//! [mode]: crate::mode
//! [error]: crate::error
#![doc(html_logo_url = "https://raw.githubusercontent.com/RustPython/RustPython/main/logo.png")] #![doc(html_logo_url = "https://raw.githubusercontent.com/RustPython/RustPython/main/logo.png")]
#![doc(html_root_url = "https://docs.rs/rustpython-parser/")] #![doc(html_root_url = "https://docs.rs/rustpython-parser/")]
@ -125,12 +117,21 @@ extern crate log;
pub use rustpython_ast as ast; pub use rustpython_ast as ast;
mod function; mod function;
// Skip flattening lexer to distinguish from full parser
pub mod lexer; pub mod lexer;
pub mod mode; mod mode;
pub mod parser; mod parser;
mod string; mod string;
#[rustfmt::skip] #[rustfmt::skip]
mod python; mod python;
mod context; mod context;
mod soft_keywords; mod soft_keywords;
pub mod token; mod token;
pub use mode::Mode;
pub use parser::{
parse, parse_expression, parse_expression_located, parse_located, parse_program, parse_tokens,
ParseError, ParseErrorType,
};
pub use string::FStringErrorType;
pub use token::{StringKind, Tok};

View file

@ -12,10 +12,13 @@
//! [Abstract Syntax Tree]: https://en.wikipedia.org/wiki/Abstract_syntax_tree //! [Abstract Syntax Tree]: https://en.wikipedia.org/wiki/Abstract_syntax_tree
//! [`Mode`]: crate::mode //! [`Mode`]: crate::mode
use crate::lexer::{LexResult, LexicalError, LexicalErrorType, Tok}; use crate::{
pub use crate::mode::Mode; ast::{self, Location},
use crate::{ast, lexer, python}; lexer::{self, LexResult, LexicalError, LexicalErrorType},
use ast::Location; mode::Mode,
python,
token::Tok,
};
use itertools::Itertools; use itertools::Itertools;
use std::iter; use std::iter;
@ -31,7 +34,7 @@ pub(super) use lalrpop_util::ParseError as LalrpopError;
/// For example, parsing a simple function definition and a call to that function: /// For example, parsing a simple function definition and a call to that function:
/// ///
/// ``` /// ```
/// use rustpython_parser::parser; /// use rustpython_parser as parser;
/// let source = r#" /// let source = r#"
/// def foo(): /// def foo():
/// return 42 /// return 42
@ -59,7 +62,7 @@ pub fn parse_program(source: &str, source_path: &str) -> Result<ast::Suite, Pars
/// ///
/// ``` /// ```
/// extern crate num_bigint; /// extern crate num_bigint;
/// use rustpython_parser::{parser, ast}; /// use rustpython_parser as parser;
/// let expr = parser::parse_expression("1 + 2", "<embedded>"); /// let expr = parser::parse_expression("1 + 2", "<embedded>");
/// ///
/// assert!(expr.is_ok()); /// assert!(expr.is_ok());
@ -80,8 +83,7 @@ pub fn parse_expression(source: &str, path: &str) -> Result<ast::Expr, ParseErro
/// somewhat silly, location: /// somewhat silly, location:
/// ///
/// ``` /// ```
/// use rustpython_parser::parser::parse_expression_located; /// use rustpython_parser::{ast::Location, parse_expression_located};
/// use rustpython_parser::ast::Location;
/// ///
/// let expr = parse_expression_located("1 + 2", "<embedded>", Location::new(5, 20)); /// let expr = parse_expression_located("1 + 2", "<embedded>", Location::new(5, 20));
/// assert!(expr.is_ok()); /// assert!(expr.is_ok());
@ -108,8 +110,7 @@ pub fn parse_expression_located(
/// parsing: /// parsing:
/// ///
/// ``` /// ```
/// use rustpython_parser::mode::Mode; /// use rustpython_parser::{Mode, parse};
/// use rustpython_parser::parser::parse;
/// ///
/// let expr = parse("1 + 2", Mode::Expression, "<embedded>"); /// let expr = parse("1 + 2", Mode::Expression, "<embedded>");
/// assert!(expr.is_ok()); /// assert!(expr.is_ok());
@ -118,8 +119,7 @@ pub fn parse_expression_located(
/// Alternatively, we can parse a full Python program consisting of multiple lines: /// Alternatively, we can parse a full Python program consisting of multiple lines:
/// ///
/// ``` /// ```
/// use rustpython_parser::mode::Mode; /// use rustpython_parser::{Mode, parse};
/// use rustpython_parser::parser::parse;
/// ///
/// let source = r#" /// let source = r#"
/// class Greeter: /// class Greeter:
@ -142,9 +142,7 @@ pub fn parse(source: &str, mode: Mode, source_path: &str) -> Result<ast::Mod, Pa
/// # Example /// # Example
/// ///
/// ``` /// ```
/// use rustpython_parser::ast::Location; /// use rustpython_parser::{ast::Location, Mode, parse_located};
/// use rustpython_parser::mode::Mode;
/// use rustpython_parser::parser::parse_located;
/// ///
/// let source = r#" /// let source = r#"
/// def fib(i): /// def fib(i):
@ -178,9 +176,7 @@ pub fn parse_located(
/// them using the [`lexer::lex`] function: /// them using the [`lexer::lex`] function:
/// ///
/// ``` /// ```
/// use rustpython_parser::lexer::lex; /// use rustpython_parser::{lexer::lex, Mode, parse_tokens};
/// use rustpython_parser::mode::Mode;
/// use rustpython_parser::parser::parse_tokens;
/// ///
/// let expr = parse_tokens(lex("1 + 2", Mode::Expression), Mode::Expression, "<embedded>"); /// let expr = parse_tokens(lex("1 + 2", Mode::Expression), Mode::Expression, "<embedded>");
/// assert!(expr.is_ok()); /// assert!(expr.is_ok());
@ -200,9 +196,7 @@ pub fn parse_tokens(
} }
/// Represents represent errors that occur during parsing and are /// Represents represent errors that occur during parsing and are
/// returned by the `parse_*` functions in the [parser] module. /// returned by the `parse_*` functions.
///
/// [parser]: crate::parser
pub type ParseError = rustpython_compiler_core::BaseError<ParseErrorType>; pub type ParseError = rustpython_compiler_core::BaseError<ParseErrorType>;
/// Represents the different types of errors that can occur during parsing. /// Represents the different types of errors that can occur during parsing.

View file

@ -1,8 +1,6 @@
use crate::{lexer::LexResult, mode::Mode, token::Tok};
use itertools::{Itertools, MultiPeek}; use itertools::{Itertools, MultiPeek};
use crate::lexer::{LexResult, Tok};
pub use crate::mode::Mode;
/// An [`Iterator`] that transforms a token stream to accommodate soft keywords (namely, `match` /// An [`Iterator`] that transforms a token stream to accommodate soft keywords (namely, `match`
/// and `case`). /// and `case`).
/// ///

View file

@ -3,7 +3,6 @@
// The lexer doesn't do any special handling of f-strings, it just treats them as // The lexer doesn't do any special handling of f-strings, it just treats them as
// regular strings. Since the parser has no definition of f-string formats (Pending PEP 701) // regular strings. Since the parser has no definition of f-string formats (Pending PEP 701)
// we have to do the parsing here, manually. // we have to do the parsing here, manually.
use self::FStringErrorType::*;
use crate::{ use crate::{
ast::{Constant, ConversionFlag, Expr, ExprKind, Location}, ast::{Constant, ConversionFlag, Expr, ExprKind, Location},
lexer::{LexicalError, LexicalErrorType}, lexer::{LexicalError, LexicalErrorType},
@ -11,13 +10,12 @@ use crate::{
token::{StringKind, Tok}, token::{StringKind, Tok},
}; };
use itertools::Itertools; use itertools::Itertools;
use std::{iter, str};
// unicode_name2 does not expose `MAX_NAME_LENGTH`, so we replicate that constant here, fix #3798 // unicode_name2 does not expose `MAX_NAME_LENGTH`, so we replicate that constant here, fix #3798
const MAX_UNICODE_NAME: usize = 88; const MAX_UNICODE_NAME: usize = 88;
struct StringParser<'a> { struct StringParser<'a> {
chars: iter::Peekable<str::Chars<'a>>, chars: std::iter::Peekable<std::str::Chars<'a>>,
kind: StringKind, kind: StringKind,
start: Location, start: Location,
end: Location, end: Location,
@ -177,6 +175,8 @@ impl<'a> StringParser<'a> {
} }
fn parse_formatted_value(&mut self, nested: u8) -> Result<Vec<Expr>, LexicalError> { fn parse_formatted_value(&mut self, nested: u8) -> Result<Vec<Expr>, LexicalError> {
use FStringErrorType::*;
let mut expression = String::new(); let mut expression = String::new();
let mut spec = None; let mut spec = None;
let mut delims = Vec::new(); let mut delims = Vec::new();
@ -402,6 +402,8 @@ impl<'a> StringParser<'a> {
} }
fn parse_fstring(&mut self, nested: u8) -> Result<Vec<Expr>, LexicalError> { fn parse_fstring(&mut self, nested: u8) -> Result<Vec<Expr>, LexicalError> {
use FStringErrorType::*;
if nested >= 2 { if nested >= 2 {
return Err(FStringError::new(ExpressionNestedTooDeeply, self.get_pos()).into()); return Err(FStringError::new(ExpressionNestedTooDeeply, self.get_pos()).into());
} }
@ -653,7 +655,7 @@ pub(crate) fn parse_strings(
// TODO: consolidate these with ParseError // TODO: consolidate these with ParseError
/// An error that occurred during parsing of an f-string. /// An error that occurred during parsing of an f-string.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct FStringError { struct FStringError {
/// The type of error that occurred. /// The type of error that occurred.
pub error: FStringErrorType, pub error: FStringErrorType,
/// The location of the error. /// The location of the error.
@ -708,28 +710,29 @@ pub enum FStringErrorType {
impl std::fmt::Display for FStringErrorType { impl std::fmt::Display for FStringErrorType {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use FStringErrorType::*;
match self { match self {
FStringErrorType::UnclosedLbrace => write!(f, "expecting '}}'"), UnclosedLbrace => write!(f, "expecting '}}'"),
FStringErrorType::UnopenedRbrace => write!(f, "Unopened '}}'"), UnopenedRbrace => write!(f, "Unopened '}}'"),
FStringErrorType::ExpectedRbrace => write!(f, "Expected '}}' after conversion flag."), ExpectedRbrace => write!(f, "Expected '}}' after conversion flag."),
FStringErrorType::InvalidExpression(error) => { InvalidExpression(error) => {
write!(f, "{error}") write!(f, "{error}")
} }
FStringErrorType::InvalidConversionFlag => write!(f, "invalid conversion character"), InvalidConversionFlag => write!(f, "invalid conversion character"),
FStringErrorType::EmptyExpression => write!(f, "empty expression not allowed"), EmptyExpression => write!(f, "empty expression not allowed"),
FStringErrorType::MismatchedDelimiter(first, second) => write!( MismatchedDelimiter(first, second) => write!(
f, f,
"closing parenthesis '{second}' does not match opening parenthesis '{first}'" "closing parenthesis '{second}' does not match opening parenthesis '{first}'"
), ),
FStringErrorType::SingleRbrace => write!(f, "single '}}' is not allowed"), SingleRbrace => write!(f, "single '}}' is not allowed"),
FStringErrorType::Unmatched(delim) => write!(f, "unmatched '{delim}'"), Unmatched(delim) => write!(f, "unmatched '{delim}'"),
FStringErrorType::ExpressionNestedTooDeeply => { ExpressionNestedTooDeeply => {
write!(f, "expressions nested too deeply") write!(f, "expressions nested too deeply")
} }
FStringErrorType::UnterminatedString => { UnterminatedString => {
write!(f, "unterminated string") write!(f, "unterminated string")
} }
FStringErrorType::ExpressionCannotInclude(c) => { ExpressionCannotInclude(c) => {
if *c == '\\' { if *c == '\\' {
write!(f, "f-string expression part cannot include a backslash") write!(f, "f-string expression part cannot include a backslash")
} else { } else {
@ -832,6 +835,7 @@ mod tests {
#[test] #[test]
fn test_parse_invalid_fstring() { fn test_parse_invalid_fstring() {
use FStringErrorType::*;
assert_eq!(parse_fstring_error("{5!a"), UnclosedLbrace); assert_eq!(parse_fstring_error("{5!a"), UnclosedLbrace);
assert_eq!(parse_fstring_error("{5!a1}"), UnclosedLbrace); assert_eq!(parse_fstring_error("{5!a1}"), UnclosedLbrace);
assert_eq!(parse_fstring_error("{5!"), UnclosedLbrace); assert_eq!(parse_fstring_error("{5!"), UnclosedLbrace);