mirror of
https://github.com/apache/datafusion-sqlparser-rs.git
synced 2025-08-23 23:44:07 +00:00
Rebase and code review comments
This commit is contained in:
parent
c45d605578
commit
23dd7c28f1
5 changed files with 71 additions and 518 deletions
|
@ -30,13 +30,7 @@ use sqlparser_derive::{Visit, VisitMut};
|
|||
|
||||
use crate::ast::value::escape_single_quote_string;
|
||||
use crate::ast::{
|
||||
display_comma_separated, display_separated, ArgMode, CommentDef, CreateFunctionBody,
|
||||
CreateFunctionUsing, CreateTableLikeKind, CreateTableOptions, DataType, Expr, FileFormat,
|
||||
FunctionBehavior, FunctionCalledOnNull, FunctionDeterminismSpecifier, FunctionParallel,
|
||||
HiveDistributionStyle, HiveFormat, HiveIOFormat, HiveRowFormat, Ident, MySQLColumnPosition,
|
||||
ObjectName, OnCommit, OneOrManyWithParens, OperateFunctionArg, OrderByExpr, ProjectionSelect,
|
||||
Query, RowAccessPolicy, SequenceOptions, Spanned, SqlOption, StorageSerializationPolicy, Tag,
|
||||
Value, ValueWithSpan, WrappedCollection,
|
||||
display_comma_separated, display_separated, ArgMode, CommentDef, CreateFunctionBody, CreateFunctionUsing, CreateTableLikeKind, CreateTableOptions, DataType, Expr, FileFormat, FunctionBehavior, FunctionCalledOnNull, FunctionDeterminismSpecifier, FunctionParallel, HiveDistributionStyle, HiveFormat, HiveIOFormat, HiveRowFormat, Ident, InitializeKind, MySQLColumnPosition, ObjectName, OnCommit, OneOrManyWithParens, OperateFunctionArg, OrderByExpr, ProjectionSelect, Query, RefreshModeKind, RowAccessPolicy, SequenceOptions, Spanned, SqlOption, StorageSerializationPolicy, TableVersion, Tag, Value, ValueWithSpan, WrappedCollection
|
||||
};
|
||||
use crate::display_utils::{DisplayCommaSeparated, Indent, NewLine, SpaceOrNewline};
|
||||
use crate::keywords::Keyword;
|
||||
|
@ -2428,6 +2422,7 @@ pub struct CreateTable {
|
|||
pub or_replace: bool,
|
||||
pub temporary: bool,
|
||||
pub external: bool,
|
||||
pub dynamic: bool,
|
||||
pub global: Option<bool>,
|
||||
pub if_not_exists: bool,
|
||||
pub transient: bool,
|
||||
|
@ -2448,6 +2443,7 @@ pub struct CreateTable {
|
|||
pub without_rowid: bool,
|
||||
pub like: Option<CreateTableLikeKind>,
|
||||
pub clone: Option<ObjectName>,
|
||||
pub version: Option<TableVersion>,
|
||||
// For Hive dialect, the table comment is after the column definitions without `=`,
|
||||
// so the `comment` field is optional and different than the comment field in the general options list.
|
||||
// [Hive](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTable)
|
||||
|
@ -2525,6 +2521,21 @@ pub struct CreateTable {
|
|||
/// Snowflake "STORAGE_SERIALIZATION_POLICY" clause for Iceberg tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
|
||||
pub storage_serialization_policy: Option<StorageSerializationPolicy>,
|
||||
/// Snowflake "TARGET_LAG" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub target_lag: Option<String>,
|
||||
/// Snowflake "WAREHOUSE" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub warehouse: Option<Ident>,
|
||||
/// Snowflake "REFRESH_MODE" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub refresh_mode: Option<RefreshModeKind>,
|
||||
/// Snowflake "INITIALIZE" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub initialize: Option<InitializeKind>,
|
||||
/// Snowflake "REQUIRE USER" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub require_user: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CreateTable {
|
||||
|
@ -2538,7 +2549,7 @@ impl fmt::Display for CreateTable {
|
|||
// `CREATE TABLE t (a INT) AS SELECT a from t2`
|
||||
write!(
|
||||
f,
|
||||
"CREATE {or_replace}{external}{global}{temporary}{transient}{volatile}{iceberg}TABLE {if_not_exists}{name}",
|
||||
"CREATE {or_replace}{external}{global}{temporary}{transient}{volatile}{dynamic}{iceberg}TABLE {if_not_exists}{name}",
|
||||
or_replace = if self.or_replace { "OR REPLACE " } else { "" },
|
||||
external = if self.external { "EXTERNAL " } else { "" },
|
||||
global = self.global
|
||||
|
@ -2556,6 +2567,7 @@ impl fmt::Display for CreateTable {
|
|||
volatile = if self.volatile { "VOLATILE " } else { "" },
|
||||
// Only for Snowflake
|
||||
iceberg = if self.iceberg { "ICEBERG " } else { "" },
|
||||
dynamic = if self.dynamic { "DYNAMIC " } else { "" },
|
||||
name = self.name,
|
||||
)?;
|
||||
if let Some(on_cluster) = &self.on_cluster {
|
||||
|
@ -2598,6 +2610,10 @@ impl fmt::Display for CreateTable {
|
|||
write!(f, " CLONE {c}")?;
|
||||
}
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
write!(f, " {version}")?;
|
||||
}
|
||||
|
||||
match &self.hive_distribution {
|
||||
HiveDistributionStyle::PARTITIONED { columns } => {
|
||||
write!(f, " PARTITIONED BY ({})", display_comma_separated(columns))?;
|
||||
|
@ -2700,27 +2716,27 @@ impl fmt::Display for CreateTable {
|
|||
write!(f, " {options}")?;
|
||||
}
|
||||
if let Some(external_volume) = self.external_volume.as_ref() {
|
||||
write!(f, " EXTERNAL_VOLUME = '{external_volume}'")?;
|
||||
write!(f, " EXTERNAL_VOLUME='{external_volume}'")?;
|
||||
}
|
||||
|
||||
if let Some(catalog) = self.catalog.as_ref() {
|
||||
write!(f, " CATALOG = '{catalog}'")?;
|
||||
write!(f, " CATALOG='{catalog}'")?;
|
||||
}
|
||||
|
||||
if self.iceberg {
|
||||
if let Some(base_location) = self.base_location.as_ref() {
|
||||
write!(f, " BASE_LOCATION = '{base_location}'")?;
|
||||
write!(f, " BASE_LOCATION='{base_location}'")?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(catalog_sync) = self.catalog_sync.as_ref() {
|
||||
write!(f, " CATALOG_SYNC = '{catalog_sync}'")?;
|
||||
write!(f, " CATALOG_SYNC='{catalog_sync}'")?;
|
||||
}
|
||||
|
||||
if let Some(storage_serialization_policy) = self.storage_serialization_policy.as_ref() {
|
||||
write!(
|
||||
f,
|
||||
" STORAGE_SERIALIZATION_POLICY = {storage_serialization_policy}"
|
||||
" STORAGE_SERIALIZATION_POLICY={storage_serialization_policy}"
|
||||
)?;
|
||||
}
|
||||
|
||||
|
@ -2774,6 +2790,26 @@ impl fmt::Display for CreateTable {
|
|||
write!(f, " WITH TAG ({})", display_comma_separated(tag.as_slice()))?;
|
||||
}
|
||||
|
||||
if let Some(target_lag) = &self.target_lag {
|
||||
write!(f, " TARGET_LAG='{target_lag}'")?;
|
||||
}
|
||||
|
||||
if let Some(warehouse) = &self.warehouse {
|
||||
write!(f, " WAREHOUSE={warehouse}")?;
|
||||
}
|
||||
|
||||
if let Some(refresh_mode) = &self.refresh_mode {
|
||||
write!(f, " REFRESH_MODE={refresh_mode}")?;
|
||||
}
|
||||
|
||||
if let Some(initialize) = &self.initialize {
|
||||
write!(f, " INITIALIZE={initialize}")?;
|
||||
}
|
||||
|
||||
if self.require_user {
|
||||
write!(f, " REQUIRE USER")?;
|
||||
}
|
||||
|
||||
if self.on_commit.is_some() {
|
||||
let on_commit = match self.on_commit {
|
||||
Some(OnCommit::DeleteRows) => "ON COMMIT DELETE ROWS",
|
||||
|
|
499
src/ast/dml.rs
499
src/ast/dml.rs
|
@ -37,505 +37,6 @@ use super::{
|
|||
Setting, SqliteOnConflict, TableObject, TableWithJoins,
|
||||
};
|
||||
|
||||
/// Index column type.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
|
||||
pub struct IndexColumn {
|
||||
pub column: OrderByExpr,
|
||||
pub operator_class: Option<Ident>,
|
||||
}
|
||||
|
||||
impl Display for IndexColumn {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.column)?;
|
||||
if let Some(operator_class) = &self.operator_class {
|
||||
write!(f, " {operator_class}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// CREATE INDEX statement.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
|
||||
pub struct CreateIndex {
|
||||
/// index name
|
||||
pub name: Option<ObjectName>,
|
||||
#[cfg_attr(feature = "visitor", visit(with = "visit_relation"))]
|
||||
pub table_name: ObjectName,
|
||||
pub using: Option<IndexType>,
|
||||
pub columns: Vec<IndexColumn>,
|
||||
pub unique: bool,
|
||||
pub concurrently: bool,
|
||||
pub if_not_exists: bool,
|
||||
pub include: Vec<Ident>,
|
||||
pub nulls_distinct: Option<bool>,
|
||||
/// WITH clause: <https://www.postgresql.org/docs/current/sql-createindex.html>
|
||||
pub with: Vec<Expr>,
|
||||
pub predicate: Option<Expr>,
|
||||
}
|
||||
|
||||
impl Display for CreateIndex {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"CREATE {unique}INDEX {concurrently}{if_not_exists}",
|
||||
unique = if self.unique { "UNIQUE " } else { "" },
|
||||
concurrently = if self.concurrently {
|
||||
"CONCURRENTLY "
|
||||
} else {
|
||||
""
|
||||
},
|
||||
if_not_exists = if self.if_not_exists {
|
||||
"IF NOT EXISTS "
|
||||
} else {
|
||||
""
|
||||
},
|
||||
)?;
|
||||
if let Some(value) = &self.name {
|
||||
write!(f, "{value} ")?;
|
||||
}
|
||||
write!(f, "ON {}", self.table_name)?;
|
||||
if let Some(value) = &self.using {
|
||||
write!(f, " USING {value} ")?;
|
||||
}
|
||||
write!(f, "({})", display_separated(&self.columns, ","))?;
|
||||
if !self.include.is_empty() {
|
||||
write!(f, " INCLUDE ({})", display_separated(&self.include, ","))?;
|
||||
}
|
||||
if let Some(value) = self.nulls_distinct {
|
||||
if value {
|
||||
write!(f, " NULLS DISTINCT")?;
|
||||
} else {
|
||||
write!(f, " NULLS NOT DISTINCT")?;
|
||||
}
|
||||
}
|
||||
if !self.with.is_empty() {
|
||||
write!(f, " WITH ({})", display_comma_separated(&self.with))?;
|
||||
}
|
||||
if let Some(predicate) = &self.predicate {
|
||||
write!(f, " WHERE {predicate}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// CREATE TABLE statement.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
|
||||
pub struct CreateTable {
|
||||
pub or_replace: bool,
|
||||
pub temporary: bool,
|
||||
pub external: bool,
|
||||
pub dynamic: bool,
|
||||
pub global: Option<bool>,
|
||||
pub if_not_exists: bool,
|
||||
pub transient: bool,
|
||||
pub volatile: bool,
|
||||
pub iceberg: bool,
|
||||
/// Table name
|
||||
#[cfg_attr(feature = "visitor", visit(with = "visit_relation"))]
|
||||
pub name: ObjectName,
|
||||
/// Optional schema
|
||||
pub columns: Vec<ColumnDef>,
|
||||
pub constraints: Vec<TableConstraint>,
|
||||
pub hive_distribution: HiveDistributionStyle,
|
||||
pub hive_formats: Option<HiveFormat>,
|
||||
pub table_options: CreateTableOptions,
|
||||
pub file_format: Option<FileFormat>,
|
||||
pub location: Option<String>,
|
||||
pub query: Option<Box<Query>>,
|
||||
pub without_rowid: bool,
|
||||
pub like: Option<ObjectName>,
|
||||
pub clone: Option<ObjectName>,
|
||||
pub version: Option<TableVersion>,
|
||||
// For Hive dialect, the table comment is after the column definitions without `=`,
|
||||
// so the `comment` field is optional and different than the comment field in the general options list.
|
||||
// [Hive](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTable)
|
||||
pub comment: Option<CommentDef>,
|
||||
pub on_commit: Option<OnCommit>,
|
||||
/// ClickHouse "ON CLUSTER" clause:
|
||||
/// <https://clickhouse.com/docs/en/sql-reference/distributed-ddl/>
|
||||
pub on_cluster: Option<Ident>,
|
||||
/// ClickHouse "PRIMARY KEY " clause.
|
||||
/// <https://clickhouse.com/docs/en/sql-reference/statements/create/table/>
|
||||
pub primary_key: Option<Box<Expr>>,
|
||||
/// ClickHouse "ORDER BY " clause. Note that omitted ORDER BY is different
|
||||
/// than empty (represented as ()), the latter meaning "no sorting".
|
||||
/// <https://clickhouse.com/docs/en/sql-reference/statements/create/table/>
|
||||
pub order_by: Option<OneOrManyWithParens<Expr>>,
|
||||
/// BigQuery: A partition expression for the table.
|
||||
/// <https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#partition_expression>
|
||||
pub partition_by: Option<Box<Expr>>,
|
||||
/// BigQuery: Table clustering column list.
|
||||
/// <https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#table_option_list>
|
||||
/// Snowflake: Table clustering list which contains base column, expressions on base columns.
|
||||
/// <https://docs.snowflake.com/en/user-guide/tables-clustering-keys#defining-a-clustering-key-for-a-table>
|
||||
pub cluster_by: Option<WrappedCollection<Vec<Expr>>>,
|
||||
/// Hive: Table clustering column list.
|
||||
/// <https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTable>
|
||||
pub clustered_by: Option<ClusteredBy>,
|
||||
/// Postgres `INHERITs` clause, which contains the list of tables from which
|
||||
/// the new table inherits.
|
||||
/// <https://www.postgresql.org/docs/current/ddl-inherit.html>
|
||||
/// <https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-INHERITS>
|
||||
pub inherits: Option<Vec<ObjectName>>,
|
||||
/// SQLite "STRICT" clause.
|
||||
/// if the "STRICT" table-option keyword is added to the end, after the closing ")",
|
||||
/// then strict typing rules apply to that table.
|
||||
pub strict: bool,
|
||||
/// Snowflake "COPY GRANTS" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub copy_grants: bool,
|
||||
/// Snowflake "ENABLE_SCHEMA_EVOLUTION" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub enable_schema_evolution: Option<bool>,
|
||||
/// Snowflake "CHANGE_TRACKING" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub change_tracking: Option<bool>,
|
||||
/// Snowflake "DATA_RETENTION_TIME_IN_DAYS" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub data_retention_time_in_days: Option<u64>,
|
||||
/// Snowflake "MAX_DATA_EXTENSION_TIME_IN_DAYS" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub max_data_extension_time_in_days: Option<u64>,
|
||||
/// Snowflake "DEFAULT_DDL_COLLATION" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub default_ddl_collation: Option<String>,
|
||||
/// Snowflake "WITH AGGREGATION POLICY" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub with_aggregation_policy: Option<ObjectName>,
|
||||
/// Snowflake "WITH ROW ACCESS POLICY" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub with_row_access_policy: Option<RowAccessPolicy>,
|
||||
/// Snowflake "WITH TAG" clause
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
|
||||
pub with_tags: Option<Vec<Tag>>,
|
||||
/// Snowflake "EXTERNAL_VOLUME" clause for Iceberg tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
|
||||
pub external_volume: Option<String>,
|
||||
/// Snowflake "BASE_LOCATION" clause for Iceberg tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
|
||||
pub base_location: Option<String>,
|
||||
/// Snowflake "CATALOG" clause for Iceberg tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
|
||||
pub catalog: Option<String>,
|
||||
/// Snowflake "CATALOG_SYNC" clause for Iceberg tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
|
||||
pub catalog_sync: Option<String>,
|
||||
/// Snowflake "STORAGE_SERIALIZATION_POLICY" clause for Iceberg tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
|
||||
pub storage_serialization_policy: Option<StorageSerializationPolicy>,
|
||||
/// Snowflake "TARGET_LAG" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub target_lag: Option<String>,
|
||||
/// Snowflake "WAREHOUSE" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub warehouse: Option<Ident>,
|
||||
/// Snowflake "REFRESH_MODE" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub refresh_mode: Option<RefreshModeKind>,
|
||||
/// Snowflake "INITIALIZE" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub initialize: Option<InitializeKind>,
|
||||
/// Snowflake "REQUIRE USER" clause for dybamic tables
|
||||
/// <https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table>
|
||||
pub require_user: bool,
|
||||
}
|
||||
|
||||
impl Display for CreateTable {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
// We want to allow the following options
|
||||
// Empty column list, allowed by PostgreSQL:
|
||||
// `CREATE TABLE t ()`
|
||||
// No columns provided for CREATE TABLE AS:
|
||||
// `CREATE TABLE t AS SELECT a from t2`
|
||||
// Columns provided for CREATE TABLE AS:
|
||||
// `CREATE TABLE t (a INT) AS SELECT a from t2`
|
||||
write!(
|
||||
f,
|
||||
"CREATE {or_replace}{external}{global}{temporary}{transient}{volatile}{dynamic}{iceberg}TABLE {if_not_exists}{name}",
|
||||
or_replace = if self.or_replace { "OR REPLACE " } else { "" },
|
||||
external = if self.external { "EXTERNAL " } else { "" },
|
||||
global = self.global
|
||||
.map(|global| {
|
||||
if global {
|
||||
"GLOBAL "
|
||||
} else {
|
||||
"LOCAL "
|
||||
}
|
||||
})
|
||||
.unwrap_or(""),
|
||||
if_not_exists = if self.if_not_exists { "IF NOT EXISTS " } else { "" },
|
||||
temporary = if self.temporary { "TEMPORARY " } else { "" },
|
||||
transient = if self.transient { "TRANSIENT " } else { "" },
|
||||
volatile = if self.volatile { "VOLATILE " } else { "" },
|
||||
// Only for Snowflake
|
||||
iceberg = if self.iceberg { "ICEBERG " } else { "" },
|
||||
dynamic = if self.dynamic { "DYNAMIC " } else { "" },
|
||||
name = self.name,
|
||||
)?;
|
||||
if let Some(on_cluster) = &self.on_cluster {
|
||||
write!(f, " ON CLUSTER {on_cluster}")?;
|
||||
}
|
||||
if !self.columns.is_empty() || !self.constraints.is_empty() {
|
||||
f.write_str(" (")?;
|
||||
NewLine.fmt(f)?;
|
||||
Indent(DisplayCommaSeparated(&self.columns)).fmt(f)?;
|
||||
if !self.columns.is_empty() && !self.constraints.is_empty() {
|
||||
f.write_str(",")?;
|
||||
SpaceOrNewline.fmt(f)?;
|
||||
}
|
||||
Indent(DisplayCommaSeparated(&self.constraints)).fmt(f)?;
|
||||
NewLine.fmt(f)?;
|
||||
f.write_str(")")?;
|
||||
} else if self.query.is_none() && self.like.is_none() && self.clone.is_none() {
|
||||
// PostgreSQL allows `CREATE TABLE t ();`, but requires empty parens
|
||||
f.write_str(" ()")?;
|
||||
}
|
||||
|
||||
// Hive table comment should be after column definitions, please refer to:
|
||||
// [Hive](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTable)
|
||||
if let Some(comment) = &self.comment {
|
||||
write!(f, " COMMENT '{comment}'")?;
|
||||
}
|
||||
|
||||
// Only for SQLite
|
||||
if self.without_rowid {
|
||||
write!(f, " WITHOUT ROWID")?;
|
||||
}
|
||||
|
||||
// Only for Hive
|
||||
if let Some(l) = &self.like {
|
||||
write!(f, " LIKE {l}")?;
|
||||
}
|
||||
|
||||
if let Some(c) = &self.clone {
|
||||
write!(f, " CLONE {c}")?;
|
||||
}
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
write!(f, " {version}")?;
|
||||
}
|
||||
|
||||
match &self.hive_distribution {
|
||||
HiveDistributionStyle::PARTITIONED { columns } => {
|
||||
write!(f, " PARTITIONED BY ({})", display_comma_separated(columns))?;
|
||||
}
|
||||
HiveDistributionStyle::SKEWED {
|
||||
columns,
|
||||
on,
|
||||
stored_as_directories,
|
||||
} => {
|
||||
write!(
|
||||
f,
|
||||
" SKEWED BY ({})) ON ({})",
|
||||
display_comma_separated(columns),
|
||||
display_comma_separated(on)
|
||||
)?;
|
||||
if *stored_as_directories {
|
||||
write!(f, " STORED AS DIRECTORIES")?;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
if let Some(clustered_by) = &self.clustered_by {
|
||||
write!(f, " {clustered_by}")?;
|
||||
}
|
||||
|
||||
if let Some(HiveFormat {
|
||||
row_format,
|
||||
serde_properties,
|
||||
storage,
|
||||
location,
|
||||
}) = &self.hive_formats
|
||||
{
|
||||
match row_format {
|
||||
Some(HiveRowFormat::SERDE { class }) => write!(f, " ROW FORMAT SERDE '{class}'")?,
|
||||
Some(HiveRowFormat::DELIMITED { delimiters }) => {
|
||||
write!(f, " ROW FORMAT DELIMITED")?;
|
||||
if !delimiters.is_empty() {
|
||||
write!(f, " {}", display_separated(delimiters, " "))?;
|
||||
}
|
||||
}
|
||||
None => (),
|
||||
}
|
||||
match storage {
|
||||
Some(HiveIOFormat::IOF {
|
||||
input_format,
|
||||
output_format,
|
||||
}) => write!(
|
||||
f,
|
||||
" STORED AS INPUTFORMAT {input_format} OUTPUTFORMAT {output_format}"
|
||||
)?,
|
||||
Some(HiveIOFormat::FileFormat { format }) if !self.external => {
|
||||
write!(f, " STORED AS {format}")?
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
if let Some(serde_properties) = serde_properties.as_ref() {
|
||||
write!(
|
||||
f,
|
||||
" WITH SERDEPROPERTIES ({})",
|
||||
display_comma_separated(serde_properties)
|
||||
)?;
|
||||
}
|
||||
if !self.external {
|
||||
if let Some(loc) = location {
|
||||
write!(f, " LOCATION '{loc}'")?;
|
||||
}
|
||||
}
|
||||
}
|
||||
if self.external {
|
||||
if let Some(file_format) = self.file_format {
|
||||
write!(f, " STORED AS {file_format}")?;
|
||||
}
|
||||
write!(f, " LOCATION '{}'", self.location.as_ref().unwrap())?;
|
||||
}
|
||||
|
||||
match &self.table_options {
|
||||
options @ CreateTableOptions::With(_)
|
||||
| options @ CreateTableOptions::Plain(_)
|
||||
| options @ CreateTableOptions::TableProperties(_) => write!(f, " {options}")?,
|
||||
_ => (),
|
||||
}
|
||||
|
||||
if let Some(primary_key) = &self.primary_key {
|
||||
write!(f, " PRIMARY KEY {primary_key}")?;
|
||||
}
|
||||
if let Some(order_by) = &self.order_by {
|
||||
write!(f, " ORDER BY {order_by}")?;
|
||||
}
|
||||
if let Some(inherits) = &self.inherits {
|
||||
write!(f, " INHERITS ({})", display_comma_separated(inherits))?;
|
||||
}
|
||||
if let Some(partition_by) = self.partition_by.as_ref() {
|
||||
write!(f, " PARTITION BY {partition_by}")?;
|
||||
}
|
||||
if let Some(cluster_by) = self.cluster_by.as_ref() {
|
||||
write!(f, " CLUSTER BY {cluster_by}")?;
|
||||
}
|
||||
if let options @ CreateTableOptions::Options(_) = &self.table_options {
|
||||
write!(f, " {options}")?;
|
||||
}
|
||||
if let Some(external_volume) = self.external_volume.as_ref() {
|
||||
write!(f, " EXTERNAL_VOLUME='{external_volume}'")?;
|
||||
}
|
||||
|
||||
if let Some(catalog) = self.catalog.as_ref() {
|
||||
write!(f, " CATALOG='{catalog}'")?;
|
||||
}
|
||||
|
||||
if self.iceberg {
|
||||
if let Some(base_location) = self.base_location.as_ref() {
|
||||
write!(f, " BASE_LOCATION='{base_location}'")?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(catalog_sync) = self.catalog_sync.as_ref() {
|
||||
write!(f, " CATALOG_SYNC='{catalog_sync}'")?;
|
||||
}
|
||||
|
||||
if let Some(storage_serialization_policy) = self.storage_serialization_policy.as_ref() {
|
||||
write!(
|
||||
f,
|
||||
" STORAGE_SERIALIZATION_POLICY={storage_serialization_policy}"
|
||||
)?;
|
||||
}
|
||||
|
||||
if self.copy_grants {
|
||||
write!(f, " COPY GRANTS")?;
|
||||
}
|
||||
|
||||
if let Some(is_enabled) = self.enable_schema_evolution {
|
||||
write!(
|
||||
f,
|
||||
" ENABLE_SCHEMA_EVOLUTION={}",
|
||||
if is_enabled { "TRUE" } else { "FALSE" }
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(is_enabled) = self.change_tracking {
|
||||
write!(
|
||||
f,
|
||||
" CHANGE_TRACKING={}",
|
||||
if is_enabled { "TRUE" } else { "FALSE" }
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(data_retention_time_in_days) = self.data_retention_time_in_days {
|
||||
write!(
|
||||
f,
|
||||
" DATA_RETENTION_TIME_IN_DAYS={data_retention_time_in_days}",
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(max_data_extension_time_in_days) = self.max_data_extension_time_in_days {
|
||||
write!(
|
||||
f,
|
||||
" MAX_DATA_EXTENSION_TIME_IN_DAYS={max_data_extension_time_in_days}",
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(default_ddl_collation) = &self.default_ddl_collation {
|
||||
write!(f, " DEFAULT_DDL_COLLATION='{default_ddl_collation}'",)?;
|
||||
}
|
||||
|
||||
if let Some(with_aggregation_policy) = &self.with_aggregation_policy {
|
||||
write!(f, " WITH AGGREGATION POLICY {with_aggregation_policy}",)?;
|
||||
}
|
||||
|
||||
if let Some(row_access_policy) = &self.with_row_access_policy {
|
||||
write!(f, " {row_access_policy}",)?;
|
||||
}
|
||||
|
||||
if let Some(tag) = &self.with_tags {
|
||||
write!(f, " WITH TAG ({})", display_comma_separated(tag.as_slice()))?;
|
||||
}
|
||||
|
||||
if let Some(target_lag) = &self.target_lag {
|
||||
write!(f, " TARGET_LAG='{target_lag}'")?;
|
||||
}
|
||||
|
||||
if let Some(warehouse) = &self.warehouse {
|
||||
write!(f, " WAREHOUSE={warehouse}")?;
|
||||
}
|
||||
|
||||
if let Some(refresh_mode) = &self.refresh_mode {
|
||||
write!(f, " REFRESH_MODE={refresh_mode}")?;
|
||||
}
|
||||
|
||||
if let Some(initialize) = &self.initialize {
|
||||
write!(f, " INITIALIZE={initialize}")?;
|
||||
}
|
||||
|
||||
if self.require_user {
|
||||
write!(f, " REQUIRE USER")?;
|
||||
}
|
||||
|
||||
if self.on_commit.is_some() {
|
||||
let on_commit = match self.on_commit {
|
||||
Some(OnCommit::DeleteRows) => "ON COMMIT DELETE ROWS",
|
||||
Some(OnCommit::PreserveRows) => "ON COMMIT PRESERVE ROWS",
|
||||
Some(OnCommit::Drop) => "ON COMMIT DROP",
|
||||
None => "",
|
||||
};
|
||||
write!(f, " {on_commit}")?;
|
||||
}
|
||||
if self.strict {
|
||||
write!(f, " STRICT")?;
|
||||
}
|
||||
if let Some(query) = &self.query {
|
||||
write!(f, " AS {query}")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// INSERT statement.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
|
|
|
@ -28,7 +28,7 @@ use crate::ast::{
|
|||
ClusteredBy, ColumnDef, CommentDef, CreateTable, CreateTableLikeKind, CreateTableOptions, Expr,
|
||||
FileFormat, HiveDistributionStyle, HiveFormat, Ident, ObjectName, OnCommit,
|
||||
OneOrManyWithParens, Query, RowAccessPolicy, Statement, StorageSerializationPolicy,
|
||||
TableConstraint, Tag, WrappedCollection,
|
||||
TableConstraint, Tag, WrappedCollection, InitializeKind, RefreshModeKind, TableVersion,
|
||||
};
|
||||
|
||||
use crate::parser::ParserError;
|
||||
|
|
|
@ -26,11 +26,7 @@ use crate::ast::helpers::stmt_data_loading::{
|
|||
FileStagingCommand, StageLoadSelectItem, StageLoadSelectItemKind, StageParamsObject,
|
||||
};
|
||||
use crate::ast::{
|
||||
CatalogSyncNamespaceMode, ColumnOption, ColumnPolicy, ColumnPolicyProperty, ContactEntry,
|
||||
CopyIntoSnowflakeKind, CreateTableLikeKind, DollarQuotedString, Ident, IdentityParameters,
|
||||
IdentityProperty, IdentityPropertyFormatKind, IdentityPropertyKind, IdentityPropertyOrder,
|
||||
ObjectName, ObjectNamePart, RowAccessPolicy, ShowObjects, SqlOption, Statement,
|
||||
StorageSerializationPolicy, TagsColumnOption, WrappedCollection,
|
||||
CatalogSyncNamespaceMode, ColumnOption, ColumnPolicy, ColumnPolicyProperty, ContactEntry, CopyIntoSnowflakeKind, CreateTableLikeKind, DollarQuotedString, Ident, IdentityParameters, IdentityProperty, IdentityPropertyFormatKind, IdentityPropertyKind, IdentityPropertyOrder, InitializeKind, ObjectName, ObjectNamePart, RefreshModeKind, RowAccessPolicy, ShowObjects, SqlOption, Statement, StorageSerializationPolicy, TagsColumnOption, WrappedCollection
|
||||
};
|
||||
use crate::dialect::{Dialect, Precedence};
|
||||
use crate::keywords::Keyword;
|
||||
|
|
|
@ -1117,6 +1117,26 @@ fn parse_create_dynamic_table() {
|
|||
" REQUIRE USER",
|
||||
" AS SELECT product_id, product_name FROM staging_table"
|
||||
));
|
||||
|
||||
snowflake().verified_stmt(concat!(
|
||||
"CREATE DYNAMIC TABLE my_dynamic_table",
|
||||
" TARGET_LAG='DOWNSTREAM'",
|
||||
" WAREHOUSE=mywh",
|
||||
" REFRESH_MODE=FULL",
|
||||
" INITIALIZE=ON_SCHEDULE",
|
||||
" REQUIRE USER",
|
||||
" AS SELECT product_id, product_name FROM staging_table"
|
||||
));
|
||||
|
||||
snowflake().verified_stmt(concat!(
|
||||
"CREATE DYNAMIC TABLE my_dynamic_table",
|
||||
" TARGET_LAG='DOWNSTREAM'",
|
||||
" WAREHOUSE=mywh",
|
||||
" REFRESH_MODE=INCREMENTAL",
|
||||
" INITIALIZE=ON_SCHEDULE",
|
||||
" REQUIRE USER",
|
||||
" AS SELECT product_id, product_name FROM staging_table"
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue