Redshift: UNLOAD

This commit is contained in:
Yoav Cohen 2025-08-23 09:10:05 +03:00
parent 376f47e3d1
commit 3b2bb4530e
4 changed files with 411 additions and 18 deletions

View file

@ -4291,15 +4291,24 @@ pub enum Statement {
/// ```
/// Note: this is a MySQL-specific statement. See <https://dev.mysql.com/doc/refman/8.0/en/lock-tables.html>
UnlockTables,
/// Unloads the result of a query to file
///
/// [Athena](https://docs.aws.amazon.com/athena/latest/ug/unload.html):
/// ```sql
/// UNLOAD(statement) TO <destination> [ WITH options ]
/// ```
/// See Redshift <https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html> and
// Athena <https://docs.aws.amazon.com/athena/latest/ug/unload.html>
///
/// [Redshift](https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html):
/// ```sql
/// UNLOAD('statement') TO <destination> [ OPTIONS ]
/// ```
Unload {
query: Box<Query>,
query: Option<Box<Query>>,
query_text: Option<String>,
to: Ident,
auth: Option<IamRoleKind>,
with: Vec<SqlOption>,
options: Vec<CopyLegacyOption>,
},
/// ```sql
/// OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
@ -6277,13 +6286,31 @@ impl fmt::Display for Statement {
Statement::UnlockTables => {
write!(f, "UNLOCK TABLES")
}
Statement::Unload { query, to, with } => {
write!(f, "UNLOAD({query}) TO {to}")?;
Statement::Unload {
query,
query_text,
to,
auth,
with,
options,
} => {
write!(f, "UNLOAD(")?;
if let Some(query) = query {
write!(f, "{query}")?;
}
if let Some(query_text) = query_text {
write!(f, "'{query_text}'")?;
}
write!(f, ") TO {to}")?;
if let Some(auth) = auth {
write!(f, " IAM_ROLE {auth}")?;
}
if !with.is_empty() {
write!(f, " WITH ({})", display_comma_separated(with))?;
}
if !options.is_empty() {
write!(f, " {}", display_separated(options, " "))?;
}
Ok(())
}
Statement::OptimizeTable {
@ -8784,10 +8811,18 @@ pub enum CopyLegacyOption {
AcceptAnyDate,
/// ACCEPTINVCHARS
AcceptInvChars(Option<String>),
/// ADDQUOTES
AddQuotes,
/// ALLOWOVERWRITE
AllowOverwrite,
/// BINARY
Binary,
/// BLANKSASNULL
BlankAsNull,
/// BZIP2
Bzip2,
/// CLEANPATH
CleanPath,
/// CSV ...
Csv(Vec<CopyLegacyCsvOption>),
/// DATEFORMAT \[ AS \] {'dateformat_string' | 'auto' }
@ -8796,16 +8831,46 @@ pub enum CopyLegacyOption {
Delimiter(char),
/// EMPTYASNULL
EmptyAsNull,
/// ENCRYPTED \[ AUTO \]
Encrypted { auto: bool },
/// ESCAPE
Escape,
/// EXTENSION 'extension-name'
Extension(String),
/// FIXEDWIDTH \[ AS \] 'fixedwidth-spec'
FixedWidth(String),
/// GZIP
Gzip,
/// HEADER
Header,
/// IAM_ROLE { DEFAULT | 'arn:aws:iam::123456789:role/role1' }
IamRole(IamRoleKind),
/// IGNOREHEADER \[ AS \] number_rows
IgnoreHeader(u64),
/// JSON
Json,
/// MANIFEST \[ VERBOSE \]
Manifest { verbose: bool },
/// MAXFILESIZE \[ AS \] max-size \[ MB | GB \]
MaxFileSize(FileSize),
/// NULL \[ AS \] 'null_string'
Null(String),
/// PARALLEL
Parallel(Option<bool>),
/// PARQUET
Parquet,
/// PARTITION BY ( column_name [, ... ] ) \[ INCLUDE \]
PartitionBy(PartitionBy),
/// REGION \[ AS \] 'aws-region' }
Region(String),
/// ROWGROUPSIZE \[ AS \] size \[ MB | GB \]
RowGroupSize(FileSize),
/// TIMEFORMAT \[ AS \] {'timeformat_string' | 'auto' | 'epochsecs' | 'epochmillisecs' }
TimeFormat(Option<String>),
/// TRUNCATECOLUMNS
TruncateColumns,
/// ZSTD
Zstd,
}
impl fmt::Display for CopyLegacyOption {
@ -8820,8 +8885,12 @@ impl fmt::Display for CopyLegacyOption {
}
Ok(())
}
AddQuotes => write!(f, "ADDQUOTES"),
AllowOverwrite => write!(f, "ALLOWOVERWRITE"),
Binary => write!(f, "BINARY"),
BlankAsNull => write!(f, "BLANKSASNULL"),
Bzip2 => write!(f, "BZIP2"),
CleanPath => write!(f, "CLEANPATH"),
Csv(opts) => {
write!(f, "CSV")?;
if !opts.is_empty() {
@ -8838,9 +8907,37 @@ impl fmt::Display for CopyLegacyOption {
}
Delimiter(char) => write!(f, "DELIMITER '{char}'"),
EmptyAsNull => write!(f, "EMPTYASNULL"),
Encrypted { auto } => write!(f, "ENCRYPTED{}", if *auto { " AUTO" } else { "" }),
Escape => write!(f, "ESCAPE"),
Extension(ext) => write!(f, "EXTENSION '{}'", value::escape_single_quote_string(ext)),
FixedWidth(spec) => write!(
f,
"FIXEDWIDTH '{}'",
value::escape_single_quote_string(spec)
),
Gzip => write!(f, "GZIP"),
Header => write!(f, "HEADER"),
IamRole(role) => write!(f, "IAM_ROLE {role}"),
IgnoreHeader(num_rows) => write!(f, "IGNOREHEADER {num_rows}"),
Json => write!(f, "JSON"),
Manifest { verbose } => write!(f, "MANIFEST{}", if *verbose { " VERBOSE" } else { "" }),
MaxFileSize(file_size) => write!(f, "MAXFILESIZE {file_size}"),
Null(string) => write!(f, "NULL '{}'", value::escape_single_quote_string(string)),
Parallel(enabled) => {
write!(
f,
"PARALLEL{}",
match enabled {
Some(true) => " TRUE",
Some(false) => " FALSE",
_ => "",
}
)
}
Parquet => write!(f, "PARQUET"),
PartitionBy(p) => write!(f, "{p}"),
Region(region) => write!(f, "REGION '{}'", value::escape_single_quote_string(region)),
RowGroupSize(file_size) => write!(f, "ROWGROUPSIZE {file_size}"),
TimeFormat(fmt) => {
write!(f, "TIMEFORMAT")?;
if let Some(fmt) = fmt {
@ -8849,10 +8946,73 @@ impl fmt::Display for CopyLegacyOption {
Ok(())
}
TruncateColumns => write!(f, "TRUNCATECOLUMNS"),
Zstd => write!(f, "ZSTD"),
}
}
}
/// ```sql
/// SIZE \[ MB | GB \]
/// ```
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct FileSize {
pub size: Value,
pub unit: Option<FileSizeUnit>,
}
impl fmt::Display for FileSize {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.size)?;
if let Some(unit) = &self.unit {
write!(f, " {unit}")?;
}
Ok(())
}
}
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum FileSizeUnit {
MB,
GB,
}
impl fmt::Display for FileSizeUnit {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
FileSizeUnit::MB => write!(f, "MB"),
FileSizeUnit::GB => write!(f, "GB"),
}
}
}
/// Specifies the partition keys for the unload operation
///
/// ```sql
/// PARTITION BY ( column_name [, ... ] ) [ INCLUDE ]
/// ```
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct PartitionBy {
pub columns: Vec<Ident>,
pub include: bool,
}
impl fmt::Display for PartitionBy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"PARTITION BY ({}){}",
display_comma_separated(&self.columns),
if self.include { " INCLUDE" } else { "" }
)
}
}
/// An `IAM_ROLE` option in the AWS ecosystem
///
/// [Redshift COPY](https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-iam-role)

View file

@ -82,6 +82,7 @@ define_keywords!(
ACCOUNT,
ACTION,
ADD,
ADDQUOTES,
ADMIN,
AFTER,
AGAINST,
@ -92,6 +93,7 @@ define_keywords!(
ALIAS,
ALL,
ALLOCATE,
ALLOWOVERWRITE,
ALTER,
ALWAYS,
ANALYZE,
@ -159,6 +161,7 @@ define_keywords!(
BYPASSRLS,
BYTEA,
BYTES,
BZIP2,
CACHE,
CALL,
CALLED,
@ -190,6 +193,7 @@ define_keywords!(
CHECK,
CHECKSUM,
CIRCLE,
CLEANPATH,
CLEAR,
CLOB,
CLONE,
@ -322,6 +326,7 @@ define_keywords!(
ENABLE,
ENABLE_SCHEMA_EVOLUTION,
ENCODING,
ENCRYPTED,
ENCRYPTION,
END,
END_EXEC = "END-EXEC",
@ -380,6 +385,7 @@ define_keywords!(
FIRST,
FIRST_VALUE,
FIXEDSTRING,
FIXEDWIDTH,
FLATTEN,
FLOAT,
FLOAT32,
@ -411,6 +417,7 @@ define_keywords!(
FUNCTIONS,
FUSION,
FUTURE,
GB,
GENERAL,
GENERATE,
GENERATED,
@ -426,6 +433,7 @@ define_keywords!(
GROUP,
GROUPING,
GROUPS,
GZIP,
HASH,
HAVING,
HEADER,
@ -550,6 +558,7 @@ define_keywords!(
MANAGE,
MANAGED,
MANAGEDLOCATION,
MANIFEST,
MAP,
MASKING,
MATCH,
@ -560,9 +569,11 @@ define_keywords!(
MATERIALIZE,
MATERIALIZED,
MAX,
MAXFILESIZE,
MAXVALUE,
MAX_DATA_EXTENSION_TIME_IN_DAYS,
MAX_ROWS,
MB,
MEASURES,
MEDIUMBLOB,
MEDIUMINT,
@ -761,6 +772,7 @@ define_keywords!(
REFRESH_MODE,
REGCLASS,
REGEXP,
REGION,
REGR_AVGX,
REGR_AVGY,
REGR_COUNT,
@ -813,6 +825,7 @@ define_keywords!(
ROLLUP,
ROOT,
ROW,
ROWGROUPSIZE,
ROWID,
ROWS,
ROW_FORMAT,
@ -1061,7 +1074,8 @@ define_keywords!(
YEAR,
YEARS,
ZONE,
ZORDER
ZORDER,
ZSTD
);
/// These keywords can't be used as a table alias, so that `FROM table_name alias`

View file

@ -630,7 +630,10 @@ impl<'a> Parser<'a> {
Keyword::NOTIFY if self.dialect.supports_listen_notify() => self.parse_notify(),
// `PRAGMA` is sqlite specific https://www.sqlite.org/pragma.html
Keyword::PRAGMA => self.parse_pragma(),
Keyword::UNLOAD => self.parse_unload(),
Keyword::UNLOAD => {
self.prev_token();
self.parse_unload()
}
Keyword::RENAME => self.parse_rename(),
// `INSTALL` is duckdb specific https://duckdb.org/docs/extensions/overview
Keyword::INSTALL if dialect_of!(self is DuckDbDialect | GenericDialect) => {
@ -9610,17 +9613,36 @@ impl<'a> Parser<'a> {
let ret = match self.parse_one_of_keywords(&[
Keyword::ACCEPTANYDATE,
Keyword::ACCEPTINVCHARS,
Keyword::ADDQUOTES,
Keyword::ALLOWOVERWRITE,
Keyword::BINARY,
Keyword::BLANKSASNULL,
Keyword::BZIP2,
Keyword::CLEANPATH,
Keyword::CSV,
Keyword::DATEFORMAT,
Keyword::DELIMITER,
Keyword::EMPTYASNULL,
Keyword::ENCRYPTED,
Keyword::ESCAPE,
Keyword::EXTENSION,
Keyword::FIXEDWIDTH,
Keyword::GZIP,
Keyword::HEADER,
Keyword::IAM_ROLE,
Keyword::IGNOREHEADER,
Keyword::JSON,
Keyword::MANIFEST,
Keyword::MAXFILESIZE,
Keyword::NULL,
Keyword::PARALLEL,
Keyword::PARQUET,
Keyword::PARTITION,
Keyword::REGION,
Keyword::ROWGROUPSIZE,
Keyword::TIMEFORMAT,
Keyword::TRUNCATECOLUMNS,
Keyword::ZSTD,
]) {
Some(Keyword::ACCEPTANYDATE) => CopyLegacyOption::AcceptAnyDate,
Some(Keyword::ACCEPTINVCHARS) => {
@ -9632,8 +9654,12 @@ impl<'a> Parser<'a> {
};
CopyLegacyOption::AcceptInvChars(ch)
}
Some(Keyword::ADDQUOTES) => CopyLegacyOption::AddQuotes,
Some(Keyword::ALLOWOVERWRITE) => CopyLegacyOption::AllowOverwrite,
Some(Keyword::BINARY) => CopyLegacyOption::Binary,
Some(Keyword::BLANKSASNULL) => CopyLegacyOption::BlankAsNull,
Some(Keyword::BZIP2) => CopyLegacyOption::Bzip2,
Some(Keyword::CLEANPATH) => CopyLegacyOption::CleanPath,
Some(Keyword::CSV) => CopyLegacyOption::Csv({
let mut opts = vec![];
while let Some(opt) =
@ -9657,16 +9683,81 @@ impl<'a> Parser<'a> {
CopyLegacyOption::Delimiter(self.parse_literal_char()?)
}
Some(Keyword::EMPTYASNULL) => CopyLegacyOption::EmptyAsNull,
Some(Keyword::ENCRYPTED) => {
let auto = self.parse_keyword(Keyword::AUTO);
CopyLegacyOption::Encrypted { auto }
}
Some(Keyword::ESCAPE) => CopyLegacyOption::Escape,
Some(Keyword::EXTENSION) => {
let ext = self.parse_literal_string()?;
CopyLegacyOption::Extension(ext)
}
Some(Keyword::FIXEDWIDTH) => {
let spec = self.parse_literal_string()?;
CopyLegacyOption::FixedWidth(spec)
}
Some(Keyword::GZIP) => CopyLegacyOption::Gzip,
Some(Keyword::HEADER) => CopyLegacyOption::Header,
Some(Keyword::IAM_ROLE) => CopyLegacyOption::IamRole(self.parse_iam_role_kind()?),
Some(Keyword::IGNOREHEADER) => {
let _ = self.parse_keyword(Keyword::AS);
let num_rows = self.parse_literal_uint()?;
CopyLegacyOption::IgnoreHeader(num_rows)
}
Some(Keyword::JSON) => CopyLegacyOption::Json,
Some(Keyword::MANIFEST) => {
let verbose = self.parse_keyword(Keyword::VERBOSE);
CopyLegacyOption::Manifest { verbose }
}
Some(Keyword::MAXFILESIZE) => {
let _ = self.parse_keyword(Keyword::AS);
let size = self.parse_number_value()?.value;
let unit = match self.parse_one_of_keywords(&[Keyword::MB, Keyword::GB]) {
Some(Keyword::MB) => Some(FileSizeUnit::MB),
Some(Keyword::GB) => Some(FileSizeUnit::GB),
_ => None,
};
CopyLegacyOption::MaxFileSize(FileSize { size, unit })
}
Some(Keyword::NULL) => {
let _ = self.parse_keyword(Keyword::AS);
CopyLegacyOption::Null(self.parse_literal_string()?)
}
Some(Keyword::PARALLEL) => {
let enabled = match self.parse_one_of_keywords(&[
Keyword::TRUE,
Keyword::FALSE,
Keyword::ON,
Keyword::OFF,
]) {
Some(Keyword::TRUE) | Some(Keyword::ON) => Some(true),
Some(Keyword::FALSE) | Some(Keyword::OFF) => Some(false),
_ => None,
};
CopyLegacyOption::Parallel(enabled)
}
Some(Keyword::PARQUET) => CopyLegacyOption::Parquet,
Some(Keyword::PARTITION) => {
self.expect_keyword(Keyword::BY)?;
let columns = self.parse_parenthesized_column_list(IsOptional::Mandatory, false)?;
let include = self.parse_keyword(Keyword::INCLUDE);
CopyLegacyOption::PartitionBy(PartitionBy { columns, include })
}
Some(Keyword::REGION) => {
let _ = self.parse_keyword(Keyword::AS);
let region = self.parse_literal_string()?;
CopyLegacyOption::Region(region)
}
Some(Keyword::ROWGROUPSIZE) => {
let _ = self.parse_keyword(Keyword::AS);
let size = self.parse_number_value()?.value;
let unit = match self.parse_one_of_keywords(&[Keyword::MB, Keyword::GB]) {
Some(Keyword::MB) => Some(FileSizeUnit::MB),
Some(Keyword::GB) => Some(FileSizeUnit::GB),
_ => None,
};
CopyLegacyOption::RowGroupSize(FileSize { size, unit })
}
Some(Keyword::TIMEFORMAT) => {
let _ = self.parse_keyword(Keyword::AS);
let fmt = if matches!(self.peek_token().token, Token::SingleQuotedString(_)) {
@ -9677,6 +9768,7 @@ impl<'a> Parser<'a> {
CopyLegacyOption::TimeFormat(fmt)
}
Some(Keyword::TRUNCATECOLUMNS) => CopyLegacyOption::TruncateColumns,
Some(Keyword::ZSTD) => CopyLegacyOption::Zstd,
_ => self.expected("option", self.peek_token())?,
};
Ok(ret)
@ -16477,19 +16569,35 @@ impl<'a> Parser<'a> {
}
pub fn parse_unload(&mut self) -> Result<Statement, ParserError> {
self.expect_keyword(Keyword::UNLOAD)?;
self.expect_token(&Token::LParen)?;
let query = self.parse_query()?;
let (query, query_text) = if matches!(self.peek_token().token, Token::SingleQuotedString(_))
{
(None, Some(self.parse_literal_string()?))
} else {
(Some(self.parse_query()?), None)
};
self.expect_token(&Token::RParen)?;
self.expect_keyword_is(Keyword::TO)?;
let to = self.parse_identifier()?;
let with_options = self.parse_options(Keyword::WITH)?;
let auth = if self.parse_keyword(Keyword::IAM_ROLE) {
Some(self.parse_iam_role_kind()?)
} else {
None
};
let with = self.parse_options(Keyword::WITH)?;
let mut options = vec![];
while let Some(opt) = self.maybe_parse(|parser| parser.parse_copy_legacy_option())? {
options.push(opt);
}
Ok(Statement::Unload {
query,
query_text,
to,
with: with_options,
auth,
with,
options,
})
}

View file

@ -11887,7 +11887,7 @@ fn parse_unload() {
assert_eq!(
unload,
Statement::Unload {
query: Box::new(Query {
query: Some(Box::new(Query {
body: Box::new(SetExpr::Select(Box::new(Select {
select_token: AttachedToken::empty(),
distinct: None,
@ -11924,7 +11924,7 @@ fn parse_unload() {
settings: None,
format_clause: None,
pipe_operators: vec![],
}),
})),
to: Ident {
value: "s3://...".to_string(),
quote_style: Some('\''),
@ -11939,9 +11939,120 @@ fn parse_unload() {
value: Expr::Value(
(Value::SingleQuotedString("AVRO".to_string())).with_empty_span()
)
}]
}],
query_text: None,
auth: None,
options: vec![],
}
);
one_statement_parses_to(
concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"FORMAT AS CSV ",
"FORMAT AS PARQUET ",
"FORMAT AS JSON ",
"MAXFILESIZE AS 10 MB ",
"ROWGROUPSIZE AS 10 MB ",
"PARALLEL ON ",
"PARALLEL OFF ",
"REGION AS 'us-east-1'"
),
concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"CSV ",
"PARQUET ",
"JSON ",
"MAXFILESIZE 10 MB ",
"ROWGROUPSIZE 10 MB ",
"PARALLEL TRUE ",
"PARALLEL FALSE ",
"REGION 'us-east-1'"
),
);
verified_stmt(concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"PARTITION BY (c1, c2, c3)",
));
verified_stmt(concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"PARTITION BY (c1, c2, c3) INCLUDE",
));
verified_stmt(concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"PARTITION BY (c1, c2, c3) INCLUDE ",
"MANIFEST"
));
verified_stmt(concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"PARTITION BY (c1, c2, c3) INCLUDE ",
"MANIFEST VERBOSE"
));
verified_stmt(concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"PARTITION BY (c1, c2, c3) INCLUDE ",
"MANIFEST VERBOSE ",
"HEADER ",
"FIXEDWIDTH 'col1:1,col2:2' ",
"ENCRYPTED"
));
verified_stmt(concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"PARTITION BY (c1, c2, c3) INCLUDE ",
"MANIFEST VERBOSE ",
"HEADER ",
"FIXEDWIDTH 'col1:1,col2:2' ",
"ENCRYPTED AUTO"
));
verified_stmt(concat!(
"UNLOAD('SELECT 1') ",
"TO 's3://...' ",
"IAM_ROLE 'arn:aws:iam::123456789:role/role1' ",
"PARTITION BY (c1, c2, c3) INCLUDE ",
"MANIFEST VERBOSE ",
"HEADER ",
"FIXEDWIDTH 'col1:1,col2:2' ",
"ENCRYPTED AUTO ",
"BZIP2 ",
"GZIP ",
"ZSTD ",
"ADDQUOTES ",
"NULL 'nil' ",
"ESCAPE ",
"ALLOWOVERWRITE ",
"CLEANPATH ",
"PARALLEL ",
"PARALLEL TRUE ",
"PARALLEL FALSE ",
"MAXFILESIZE 10 ",
"MAXFILESIZE 10 MB ",
"MAXFILESIZE 10 GB ",
"ROWGROUPSIZE 10 ",
"ROWGROUPSIZE 10 MB ",
"ROWGROUPSIZE 10 GB ",
"REGION 'us-east-1' ",
"EXTENSION 'ext1'"
));
}
#[test]
@ -16978,7 +17089,7 @@ fn test_parse_semantic_view_table_factor() {
for sql in invalid_sqls {
let result = dialects.parse_sql_statements(sql);
assert!(result.is_err(), "Expected error for invalid SQL: {}", sql);
assert!(result.is_err(), "Expected error for invalid SQL: {sql}");
}
let ast_sql = r#"SELECT * FROM SEMANTIC_VIEW(