mirror of
https://github.com/apache/datafusion-sqlparser-rs.git
synced 2025-10-08 21:20:33 +00:00
ClickHouse: support of create table query with primary key and parametrised table engine (#1289)
This commit is contained in:
parent
4b60866bc7
commit
3c33ac15bd
6 changed files with 168 additions and 25 deletions
|
@ -24,8 +24,8 @@ pub use super::ddl::{ColumnDef, TableConstraint};
|
||||||
use super::{
|
use super::{
|
||||||
display_comma_separated, display_separated, Expr, FileFormat, FromTable, HiveDistributionStyle,
|
display_comma_separated, display_separated, Expr, FileFormat, FromTable, HiveDistributionStyle,
|
||||||
HiveFormat, HiveIOFormat, HiveRowFormat, Ident, InsertAliases, MysqlInsertPriority, ObjectName,
|
HiveFormat, HiveIOFormat, HiveRowFormat, Ident, InsertAliases, MysqlInsertPriority, ObjectName,
|
||||||
OnCommit, OnInsert, OrderByExpr, Query, SelectItem, SqlOption, SqliteOnConflict,
|
OnCommit, OnInsert, OneOrManyWithParens, OrderByExpr, Query, SelectItem, SqlOption,
|
||||||
TableWithJoins,
|
SqliteOnConflict, TableEngine, TableWithJoins,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// CREATE INDEX statement.
|
/// CREATE INDEX statement.
|
||||||
|
@ -73,7 +73,7 @@ pub struct CreateTable {
|
||||||
pub without_rowid: bool,
|
pub without_rowid: bool,
|
||||||
pub like: Option<ObjectName>,
|
pub like: Option<ObjectName>,
|
||||||
pub clone: Option<ObjectName>,
|
pub clone: Option<ObjectName>,
|
||||||
pub engine: Option<String>,
|
pub engine: Option<TableEngine>,
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
pub auto_increment_offset: Option<u32>,
|
pub auto_increment_offset: Option<u32>,
|
||||||
pub default_charset: Option<String>,
|
pub default_charset: Option<String>,
|
||||||
|
@ -82,10 +82,13 @@ pub struct CreateTable {
|
||||||
/// ClickHouse "ON CLUSTER" clause:
|
/// ClickHouse "ON CLUSTER" clause:
|
||||||
/// <https://clickhouse.com/docs/en/sql-reference/distributed-ddl/>
|
/// <https://clickhouse.com/docs/en/sql-reference/distributed-ddl/>
|
||||||
pub on_cluster: Option<String>,
|
pub on_cluster: Option<String>,
|
||||||
|
/// ClickHouse "PRIMARY KEY " clause.
|
||||||
|
/// <https://clickhouse.com/docs/en/sql-reference/statements/create/table/>
|
||||||
|
pub primary_key: Option<Box<Expr>>,
|
||||||
/// ClickHouse "ORDER BY " clause. Note that omitted ORDER BY is different
|
/// ClickHouse "ORDER BY " clause. Note that omitted ORDER BY is different
|
||||||
/// than empty (represented as ()), the latter meaning "no sorting".
|
/// than empty (represented as ()), the latter meaning "no sorting".
|
||||||
/// <https://clickhouse.com/docs/en/sql-reference/statements/create/table/>
|
/// <https://clickhouse.com/docs/en/sql-reference/statements/create/table/>
|
||||||
pub order_by: Option<Vec<Ident>>,
|
pub order_by: Option<OneOrManyWithParens<Expr>>,
|
||||||
/// BigQuery: A partition expression for the table.
|
/// BigQuery: A partition expression for the table.
|
||||||
/// <https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#partition_expression>
|
/// <https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#partition_expression>
|
||||||
pub partition_by: Option<Box<Expr>>,
|
pub partition_by: Option<Box<Expr>>,
|
||||||
|
@ -263,8 +266,11 @@ impl Display for CreateTable {
|
||||||
if let Some(auto_increment_offset) = self.auto_increment_offset {
|
if let Some(auto_increment_offset) = self.auto_increment_offset {
|
||||||
write!(f, " AUTO_INCREMENT {auto_increment_offset}")?;
|
write!(f, " AUTO_INCREMENT {auto_increment_offset}")?;
|
||||||
}
|
}
|
||||||
|
if let Some(primary_key) = &self.primary_key {
|
||||||
|
write!(f, " PRIMARY KEY {}", primary_key)?;
|
||||||
|
}
|
||||||
if let Some(order_by) = &self.order_by {
|
if let Some(order_by) = &self.order_by {
|
||||||
write!(f, " ORDER BY ({})", display_comma_separated(order_by))?;
|
write!(f, " ORDER BY {}", order_by)?;
|
||||||
}
|
}
|
||||||
if let Some(partition_by) = self.partition_by.as_ref() {
|
if let Some(partition_by) = self.partition_by.as_ref() {
|
||||||
write!(f, " PARTITION BY {partition_by}")?;
|
write!(f, " PARTITION BY {partition_by}")?;
|
||||||
|
|
|
@ -10,7 +10,7 @@ use sqlparser_derive::{Visit, VisitMut};
|
||||||
use super::super::dml::CreateTable;
|
use super::super::dml::CreateTable;
|
||||||
use crate::ast::{
|
use crate::ast::{
|
||||||
ColumnDef, Expr, FileFormat, HiveDistributionStyle, HiveFormat, Ident, ObjectName, OnCommit,
|
ColumnDef, Expr, FileFormat, HiveDistributionStyle, HiveFormat, Ident, ObjectName, OnCommit,
|
||||||
Query, SqlOption, Statement, TableConstraint,
|
OneOrManyWithParens, Query, SqlOption, Statement, TableConstraint, TableEngine,
|
||||||
};
|
};
|
||||||
use crate::parser::ParserError;
|
use crate::parser::ParserError;
|
||||||
|
|
||||||
|
@ -65,14 +65,15 @@ pub struct CreateTableBuilder {
|
||||||
pub without_rowid: bool,
|
pub without_rowid: bool,
|
||||||
pub like: Option<ObjectName>,
|
pub like: Option<ObjectName>,
|
||||||
pub clone: Option<ObjectName>,
|
pub clone: Option<ObjectName>,
|
||||||
pub engine: Option<String>,
|
pub engine: Option<TableEngine>,
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
pub auto_increment_offset: Option<u32>,
|
pub auto_increment_offset: Option<u32>,
|
||||||
pub default_charset: Option<String>,
|
pub default_charset: Option<String>,
|
||||||
pub collation: Option<String>,
|
pub collation: Option<String>,
|
||||||
pub on_commit: Option<OnCommit>,
|
pub on_commit: Option<OnCommit>,
|
||||||
pub on_cluster: Option<String>,
|
pub on_cluster: Option<String>,
|
||||||
pub order_by: Option<Vec<Ident>>,
|
pub primary_key: Option<Box<Expr>>,
|
||||||
|
pub order_by: Option<OneOrManyWithParens<Expr>>,
|
||||||
pub partition_by: Option<Box<Expr>>,
|
pub partition_by: Option<Box<Expr>>,
|
||||||
pub cluster_by: Option<Vec<Ident>>,
|
pub cluster_by: Option<Vec<Ident>>,
|
||||||
pub options: Option<Vec<SqlOption>>,
|
pub options: Option<Vec<SqlOption>>,
|
||||||
|
@ -108,6 +109,7 @@ impl CreateTableBuilder {
|
||||||
collation: None,
|
collation: None,
|
||||||
on_commit: None,
|
on_commit: None,
|
||||||
on_cluster: None,
|
on_cluster: None,
|
||||||
|
primary_key: None,
|
||||||
order_by: None,
|
order_by: None,
|
||||||
partition_by: None,
|
partition_by: None,
|
||||||
cluster_by: None,
|
cluster_by: None,
|
||||||
|
@ -203,7 +205,7 @@ impl CreateTableBuilder {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn engine(mut self, engine: Option<String>) -> Self {
|
pub fn engine(mut self, engine: Option<TableEngine>) -> Self {
|
||||||
self.engine = engine;
|
self.engine = engine;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
@ -238,7 +240,12 @@ impl CreateTableBuilder {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn order_by(mut self, order_by: Option<Vec<Ident>>) -> Self {
|
pub fn primary_key(mut self, primary_key: Option<Box<Expr>>) -> Self {
|
||||||
|
self.primary_key = primary_key;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn order_by(mut self, order_by: Option<OneOrManyWithParens<Expr>>) -> Self {
|
||||||
self.order_by = order_by;
|
self.order_by = order_by;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
@ -291,6 +298,7 @@ impl CreateTableBuilder {
|
||||||
collation: self.collation,
|
collation: self.collation,
|
||||||
on_commit: self.on_commit,
|
on_commit: self.on_commit,
|
||||||
on_cluster: self.on_cluster,
|
on_cluster: self.on_cluster,
|
||||||
|
primary_key: self.primary_key,
|
||||||
order_by: self.order_by,
|
order_by: self.order_by,
|
||||||
partition_by: self.partition_by,
|
partition_by: self.partition_by,
|
||||||
cluster_by: self.cluster_by,
|
cluster_by: self.cluster_by,
|
||||||
|
@ -334,6 +342,7 @@ impl TryFrom<Statement> for CreateTableBuilder {
|
||||||
collation,
|
collation,
|
||||||
on_commit,
|
on_commit,
|
||||||
on_cluster,
|
on_cluster,
|
||||||
|
primary_key,
|
||||||
order_by,
|
order_by,
|
||||||
partition_by,
|
partition_by,
|
||||||
cluster_by,
|
cluster_by,
|
||||||
|
@ -366,6 +375,7 @@ impl TryFrom<Statement> for CreateTableBuilder {
|
||||||
collation,
|
collation,
|
||||||
on_commit,
|
on_commit,
|
||||||
on_cluster,
|
on_cluster,
|
||||||
|
primary_key,
|
||||||
order_by,
|
order_by,
|
||||||
partition_by,
|
partition_by,
|
||||||
cluster_by,
|
cluster_by,
|
||||||
|
|
|
@ -6315,6 +6315,29 @@ impl Display for MySQLColumnPosition {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Engine of DB. Some warehouse has parameters of engine, e.g. [clickhouse]
|
||||||
|
///
|
||||||
|
/// [clickhouse]: https://clickhouse.com/docs/en/engines/table-engines
|
||||||
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
|
||||||
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
|
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
|
||||||
|
pub struct TableEngine {
|
||||||
|
pub name: String,
|
||||||
|
pub parameters: Option<Vec<Ident>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for TableEngine {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "{}", self.name)?;
|
||||||
|
|
||||||
|
if let Some(parameters) = self.parameters.as_ref() {
|
||||||
|
write!(f, "({})", display_comma_separated(parameters))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
|
@ -5262,7 +5262,15 @@ impl<'a> Parser<'a> {
|
||||||
self.expect_token(&Token::Eq)?;
|
self.expect_token(&Token::Eq)?;
|
||||||
let next_token = self.next_token();
|
let next_token = self.next_token();
|
||||||
match next_token.token {
|
match next_token.token {
|
||||||
Token::Word(w) => Some(w.value),
|
Token::Word(w) => {
|
||||||
|
let name = w.value;
|
||||||
|
let parameters = if self.peek_token() == Token::LParen {
|
||||||
|
Some(self.parse_parenthesized_identifiers()?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
Some(TableEngine { name, parameters })
|
||||||
|
}
|
||||||
_ => self.expected("identifier", next_token)?,
|
_ => self.expected("identifier", next_token)?,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -5280,17 +5288,27 @@ impl<'a> Parser<'a> {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ClickHouse supports `PRIMARY KEY`, before `ORDER BY`
|
||||||
|
// https://clickhouse.com/docs/en/sql-reference/statements/create/table#primary-key
|
||||||
|
let primary_key = if dialect_of!(self is ClickHouseDialect | GenericDialect)
|
||||||
|
&& self.parse_keywords(&[Keyword::PRIMARY, Keyword::KEY])
|
||||||
|
{
|
||||||
|
Some(Box::new(self.parse_expr()?))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let order_by = if self.parse_keywords(&[Keyword::ORDER, Keyword::BY]) {
|
let order_by = if self.parse_keywords(&[Keyword::ORDER, Keyword::BY]) {
|
||||||
if self.consume_token(&Token::LParen) {
|
if self.consume_token(&Token::LParen) {
|
||||||
let columns = if self.peek_token() != Token::RParen {
|
let columns = if self.peek_token() != Token::RParen {
|
||||||
self.parse_comma_separated(|p| p.parse_identifier(false))?
|
self.parse_comma_separated(|p| p.parse_expr())?
|
||||||
} else {
|
} else {
|
||||||
vec![]
|
vec![]
|
||||||
};
|
};
|
||||||
self.expect_token(&Token::RParen)?;
|
self.expect_token(&Token::RParen)?;
|
||||||
Some(columns)
|
Some(OneOrManyWithParens::Many(columns))
|
||||||
} else {
|
} else {
|
||||||
Some(vec![self.parse_identifier(false)?])
|
Some(OneOrManyWithParens::One(self.parse_expr()?))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
@ -5388,6 +5406,7 @@ impl<'a> Parser<'a> {
|
||||||
.partition_by(big_query_config.partition_by)
|
.partition_by(big_query_config.partition_by)
|
||||||
.cluster_by(big_query_config.cluster_by)
|
.cluster_by(big_query_config.cluster_by)
|
||||||
.options(big_query_config.options)
|
.options(big_query_config.options)
|
||||||
|
.primary_key(primary_key)
|
||||||
.strict(strict)
|
.strict(strict)
|
||||||
.build())
|
.build())
|
||||||
}
|
}
|
||||||
|
@ -9041,7 +9060,7 @@ impl<'a> Parser<'a> {
|
||||||
let partitions: Vec<Ident> = if dialect_of!(self is MySqlDialect | GenericDialect)
|
let partitions: Vec<Ident> = if dialect_of!(self is MySqlDialect | GenericDialect)
|
||||||
&& self.parse_keyword(Keyword::PARTITION)
|
&& self.parse_keyword(Keyword::PARTITION)
|
||||||
{
|
{
|
||||||
self.parse_partitions()?
|
self.parse_parenthesized_identifiers()?
|
||||||
} else {
|
} else {
|
||||||
vec![]
|
vec![]
|
||||||
};
|
};
|
||||||
|
@ -10969,7 +10988,7 @@ impl<'a> Parser<'a> {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_partitions(&mut self) -> Result<Vec<Ident>, ParserError> {
|
fn parse_parenthesized_identifiers(&mut self) -> Result<Vec<Ident>, ParserError> {
|
||||||
self.expect_token(&Token::LParen)?;
|
self.expect_token(&Token::LParen)?;
|
||||||
let partitions = self.parse_comma_separated(|p| p.parse_identifier(false))?;
|
let partitions = self.parse_comma_separated(|p| p.parse_identifier(false))?;
|
||||||
self.expect_token(&Token::RParen)?;
|
self.expect_token(&Token::RParen)?;
|
||||||
|
|
|
@ -211,12 +211,9 @@ fn parse_delimited_identifiers() {
|
||||||
#[test]
|
#[test]
|
||||||
fn parse_create_table() {
|
fn parse_create_table() {
|
||||||
clickhouse().verified_stmt(r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY ("x")"#);
|
clickhouse().verified_stmt(r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY ("x")"#);
|
||||||
clickhouse().one_statement_parses_to(
|
clickhouse().verified_stmt(r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY "x""#);
|
||||||
r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY "x""#,
|
|
||||||
r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY ("x")"#,
|
|
||||||
);
|
|
||||||
clickhouse().verified_stmt(
|
clickhouse().verified_stmt(
|
||||||
r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY ("x") AS SELECT * FROM "t" WHERE true"#,
|
r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY "x" AS SELECT * FROM "t" WHERE true"#,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,7 +245,7 @@ fn parse_clickhouse_data_types() {
|
||||||
.replace(" Float64", " FLOAT64");
|
.replace(" Float64", " FLOAT64");
|
||||||
|
|
||||||
match clickhouse_and_generic().one_statement_parses_to(sql, &canonical_sql) {
|
match clickhouse_and_generic().one_statement_parses_to(sql, &canonical_sql) {
|
||||||
Statement::CreateTable { name, columns, .. } => {
|
Statement::CreateTable(CreateTable { name, columns, .. }) => {
|
||||||
assert_eq!(name, ObjectName(vec!["table".into()]));
|
assert_eq!(name, ObjectName(vec!["table".into()]));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
columns,
|
columns,
|
||||||
|
@ -289,7 +286,7 @@ fn parse_create_table_with_nullable() {
|
||||||
let canonical_sql = sql.replace("String", "STRING");
|
let canonical_sql = sql.replace("String", "STRING");
|
||||||
|
|
||||||
match clickhouse_and_generic().one_statement_parses_to(sql, &canonical_sql) {
|
match clickhouse_and_generic().one_statement_parses_to(sql, &canonical_sql) {
|
||||||
Statement::CreateTable { name, columns, .. } => {
|
Statement::CreateTable(CreateTable { name, columns, .. }) => {
|
||||||
assert_eq!(name, ObjectName(vec!["table".into()]));
|
assert_eq!(name, ObjectName(vec!["table".into()]));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
columns,
|
columns,
|
||||||
|
@ -338,7 +335,7 @@ fn parse_create_table_with_nested_data_types() {
|
||||||
);
|
);
|
||||||
|
|
||||||
match clickhouse().one_statement_parses_to(sql, "") {
|
match clickhouse().one_statement_parses_to(sql, "") {
|
||||||
Statement::CreateTable { name, columns, .. } => {
|
Statement::CreateTable(CreateTable { name, columns, .. }) => {
|
||||||
assert_eq!(name, ObjectName(vec!["table".into()]));
|
assert_eq!(name, ObjectName(vec!["table".into()]));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
columns,
|
columns,
|
||||||
|
@ -410,6 +407,88 @@ fn parse_create_table_with_nested_data_types() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_create_table_with_primary_key() {
|
||||||
|
match clickhouse_and_generic().verified_stmt(concat!(
|
||||||
|
r#"CREATE TABLE db.table (`i` INT, `k` INT)"#,
|
||||||
|
" ENGINE=SharedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')",
|
||||||
|
" PRIMARY KEY tuple(i)",
|
||||||
|
" ORDER BY tuple(i)",
|
||||||
|
)) {
|
||||||
|
Statement::CreateTable(CreateTable {
|
||||||
|
name,
|
||||||
|
columns,
|
||||||
|
engine,
|
||||||
|
primary_key,
|
||||||
|
order_by,
|
||||||
|
..
|
||||||
|
}) => {
|
||||||
|
assert_eq!(name.to_string(), "db.table");
|
||||||
|
assert_eq!(
|
||||||
|
vec![
|
||||||
|
ColumnDef {
|
||||||
|
name: Ident::with_quote('`', "i"),
|
||||||
|
data_type: DataType::Int(None),
|
||||||
|
collation: None,
|
||||||
|
options: vec![],
|
||||||
|
},
|
||||||
|
ColumnDef {
|
||||||
|
name: Ident::with_quote('`', "k"),
|
||||||
|
data_type: DataType::Int(None),
|
||||||
|
collation: None,
|
||||||
|
options: vec![],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
columns
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
engine,
|
||||||
|
Some(TableEngine {
|
||||||
|
name: "SharedMergeTree".to_string(),
|
||||||
|
parameters: Some(vec![
|
||||||
|
Ident::with_quote('\'', "/clickhouse/tables/{uuid}/{shard}"),
|
||||||
|
Ident::with_quote('\'', "{replica}"),
|
||||||
|
]),
|
||||||
|
})
|
||||||
|
);
|
||||||
|
fn assert_function(actual: &Function, name: &str, arg: &str) -> bool {
|
||||||
|
assert_eq!(actual.name, ObjectName(vec![Ident::new(name)]));
|
||||||
|
assert_eq!(
|
||||||
|
actual.args,
|
||||||
|
FunctionArguments::List(FunctionArgumentList {
|
||||||
|
args: vec![FunctionArg::Unnamed(FunctionArgExpr::Expr(Identifier(
|
||||||
|
Ident::new(arg)
|
||||||
|
)),)],
|
||||||
|
duplicate_treatment: None,
|
||||||
|
clauses: vec![],
|
||||||
|
})
|
||||||
|
);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
match primary_key.unwrap().as_ref() {
|
||||||
|
Expr::Function(primary_key) => {
|
||||||
|
assert!(assert_function(primary_key, "tuple", "i"));
|
||||||
|
}
|
||||||
|
_ => panic!("unexpected primary key type"),
|
||||||
|
}
|
||||||
|
match order_by {
|
||||||
|
Some(OneOrManyWithParens::One(Expr::Function(order_by))) => {
|
||||||
|
assert!(assert_function(&order_by, "tuple", "i"));
|
||||||
|
}
|
||||||
|
_ => panic!("unexpected order by type"),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
clickhouse_and_generic()
|
||||||
|
.parse_sql_statements(concat!(
|
||||||
|
r#"CREATE TABLE db.table (`i` Int, `k` Int)"#,
|
||||||
|
" ORDER BY tuple(i), tuple(k)",
|
||||||
|
))
|
||||||
|
.expect_err("ORDER BY supports one expression with tuple");
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn parse_create_view_with_fields_data_types() {
|
fn parse_create_view_with_fields_data_types() {
|
||||||
match clickhouse().verified_stmt(r#"CREATE VIEW v (i "int", f "String") AS SELECT * FROM t"#) {
|
match clickhouse().verified_stmt(r#"CREATE VIEW v (i "int", f "String") AS SELECT * FROM t"#) {
|
||||||
|
|
|
@ -773,7 +773,13 @@ fn parse_create_table_engine_default_charset() {
|
||||||
},],
|
},],
|
||||||
columns
|
columns
|
||||||
);
|
);
|
||||||
assert_eq!(engine, Some("InnoDB".to_string()));
|
assert_eq!(
|
||||||
|
engine,
|
||||||
|
Some(TableEngine {
|
||||||
|
name: "InnoDB".to_string(),
|
||||||
|
parameters: None
|
||||||
|
})
|
||||||
|
);
|
||||||
assert_eq!(default_charset, Some("utf8mb3".to_string()));
|
assert_eq!(default_charset, Some("utf8mb3".to_string()));
|
||||||
}
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue