Compare commits

..

1 commit

Author SHA1 Message Date
Andrew Lamb
4be9bcc0b6 Update rat_exclude_file.txt 2025-01-20 11:36:27 -05:00
61 changed files with 5374 additions and 22589 deletions

View file

@ -1,39 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name: license
# trigger for all PRs and changes to main
on:
push:
branches:
- main
pull_request:
jobs:
rat:
name: Release Audit Tool (RAT)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: 3.8
- name: Audit licenses
run: ./dev/release/run-rat.sh .

View file

@ -19,9 +19,6 @@ name: Rust
on: [push, pull_request]
permissions:
contents: read
jobs:
codestyle:
@ -88,8 +85,11 @@ jobs:
uses: ./.github/actions/setup-builder
with:
rust-version: ${{ matrix.rust }}
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: Install Tarpaulin
run: cargo install cargo-tarpaulin
uses: actions-rs/install@v0.1
with:
crate: cargo-tarpaulin
version: 0.14.2
use-tool-cache: true
- name: Test
run: cargo test --all-features

View file

@ -28,8 +28,6 @@ technically be breaking and thus will result in a `0.(N+1)` version.
- Unreleased: Check https://github.com/sqlparser-rs/sqlparser-rs/commits/main for undocumented changes.
- `0.56.0`: [changelog/0.56.0.md](changelog/0.56.0.md)
- `0.55.0`: [changelog/0.55.0.md](changelog/0.55.0.md)
- `0.54.0`: [changelog/0.54.0.md](changelog/0.54.0.md)
- `0.53.0`: [changelog/0.53.0.md](changelog/0.53.0.md)
- `0.52.0`: [changelog/0.52.0.md](changelog/0.52.0.md)

View file

@ -18,7 +18,7 @@
[package]
name = "sqlparser"
description = "Extensible SQL Lexer and Parser with support for ANSI SQL:2011"
version = "0.57.0"
version = "0.54.0"
authors = ["Apache DataFusion <dev@datafusion.apache.org>"]
homepage = "https://github.com/apache/datafusion-sqlparser-rs"
documentation = "https://docs.rs/sqlparser/"
@ -49,7 +49,7 @@ bigdecimal = { version = "0.4.1", features = ["serde"], optional = true }
log = "0.4"
recursive = { version = "0.1.1", optional = true}
serde = { version = "1.0", default-features = false, features = ["derive", "alloc"], optional = true }
serde = { version = "1.0", features = ["derive"], optional = true }
# serde_json is only used in examples/cli, but we have to put it outside
# of dev-dependencies because of
# https://github.com/rust-lang/cargo/issues/1596

View file

@ -53,7 +53,7 @@ println!("AST: {:?}", ast);
This outputs
```rust
AST: [Query(Query { ctes: [], body: Select(Select { distinct: false, projection: [UnnamedExpr(Identifier("a")), UnnamedExpr(Identifier("b")), UnnamedExpr(Value(Long(123))), UnnamedExpr(Function(Function { name:ObjectName([Identifier(Ident { value: "myfunc", quote_style: None })]), args: [Identifier("b")], filter: None, over: None, distinct: false }))], from: [TableWithJoins { relation: Table { name: ObjectName([Identifier(Ident { value: "table_1", quote_style: None })]), alias: None, args: [], with_hints: [] }, joins: [] }], selection: Some(BinaryOp { left: BinaryOp { left: Identifier("a"), op: Gt, right: Identifier("b") }, op: And, right: BinaryOp { left: Identifier("b"), op: Lt, right: Value(Long(100)) } }), group_by: [], having: None }), order_by: [OrderByExpr { expr: Identifier("a"), asc: Some(false) }, OrderByExpr { expr: Identifier("b"), asc: None }], limit: None, offset: None, fetch: None })]
AST: [Query(Query { ctes: [], body: Select(Select { distinct: false, projection: [UnnamedExpr(Identifier("a")), UnnamedExpr(Identifier("b")), UnnamedExpr(Value(Long(123))), UnnamedExpr(Function(Function { name: ObjectName(["myfunc"]), args: [Identifier("b")], filter: None, over: None, distinct: false }))], from: [TableWithJoins { relation: Table { name: ObjectName(["table_1"]), alias: None, args: [], with_hints: [] }, joins: [] }], selection: Some(BinaryOp { left: BinaryOp { left: Identifier("a"), op: Gt, right: Identifier("b") }, op: And, right: BinaryOp { left: Identifier("b"), op: Lt, right: Value(Long(100)) } }), group_by: [], having: None }), order_by: [OrderByExpr { expr: Identifier("a"), asc: Some(false) }, OrderByExpr { expr: Identifier("b"), asc: None }], limit: None, offset: None, fetch: None })]
```
@ -89,14 +89,10 @@ keywords, the following should hold true for all SQL:
```rust
// Parse SQL
let sql = "SELECT 'hello'";
let ast = Parser::parse_sql(&GenericDialect, sql).unwrap();
// The original SQL text can be generated from the AST
assert_eq!(ast[0].to_string(), sql);
// The SQL can also be pretty-printed with newlines and indentation
assert_eq!(format!("{:#}", ast[0]), "SELECT\n 'hello'");
```
There are still some cases in this crate where different SQL with seemingly
@ -160,8 +156,7 @@ $ cargo run --features json_example --example cli FILENAME.sql [--dialectname]
## Users
This parser is currently being used by the [DataFusion] query engine, [LocustDB],
[Ballista], [GlueSQL], [Opteryx], [Polars], [PRQL], [Qrlew], [JumpWire], [ParadeDB], [CipherStash Proxy],
and [GreptimeDB].
[Ballista], [GlueSQL], [Opteryx], [Polars], [PRQL], [Qrlew], [JumpWire], and [ParadeDB].
If your project is using sqlparser-rs feel free to make a PR to add it
to this list.
@ -280,5 +275,3 @@ licensed as above, without any additional terms or conditions.
[sql-standard]: https://en.wikipedia.org/wiki/ISO/IEC_9075
[`Dialect`]: https://docs.rs/sqlparser/latest/sqlparser/dialect/trait.Dialect.html
[`GenericDialect`]: https://docs.rs/sqlparser/latest/sqlparser/dialect/struct.GenericDialect.html
[CipherStash Proxy]: https://github.com/cipherstash/proxy
[GreptimeDB]: https://github.com/GreptimeTeam/greptimedb

View file

@ -1,173 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# sqlparser-rs 0.55.0 Changelog
This release consists of 55 commits from 25 contributors. See credits at the end of this changelog for more information.
## Migrating usages of `Expr::Value`
In v0.55 of sqlparser the `Expr::Value` enum variant contains a `ValueWithSpan` instead of a `Value`. Here is how to migrate.
### When pattern matching
```diff
- Expr::Value(Value::SingleQuotedString(my_string)) => { ... }
+ Expr::Value(ValueWithSpan{ value: Value::SingleQuotedString(my_string), span: _ }) => { ... }
```
### When creating an `Expr`
Use the new `Expr::value` method (notice the lowercase `v`), which will create a `ValueWithSpan` containing an empty span:
```diff
- Expr::Value(Value::SingleQuotedString(my_string))
+ Expr::value(Value::SingleQuotedString(my_string))
```
## Migrating usages of `ObjectName`
In v0.55 of sqlparser, the `ObjectName` structure has been changed as shown below. Here is now to migrate.
```diff
- pub struct ObjectName(pub Vec<Ident>);
+ pub struct ObjectName(pub Vec<ObjectNamePart>)
```
### When constructing `ObjectName`
Use the `From` impl:
```diff
- name: ObjectName(vec![Ident::new("f")]),
+ name: ObjectName::from(vec![Ident::new("f")]),
```
### Accessing Spans
Use the `span()` function
```diff
- name.span
+ name.span()
```
**Breaking changes:**
- Enhance object name path segments [#1539](https://github.com/apache/datafusion-sqlparser-rs/pull/1539) (ayman-sigma)
- Store spans for Value expressions [#1738](https://github.com/apache/datafusion-sqlparser-rs/pull/1738) (lovasoa)
**Implemented enhancements:**
- feat: adjust create and drop trigger for mysql dialect [#1734](https://github.com/apache/datafusion-sqlparser-rs/pull/1734) (invm)
**Fixed bugs:**
- fix: make `serde` feature no_std [#1730](https://github.com/apache/datafusion-sqlparser-rs/pull/1730) (iajoiner)
**Other:**
- Update rat_exclude_file.txt [#1670](https://github.com/apache/datafusion-sqlparser-rs/pull/1670) (alamb)
- Add support for Snowflake account privileges [#1666](https://github.com/apache/datafusion-sqlparser-rs/pull/1666) (yoavcloud)
- Add support for Create Iceberg Table statement for Snowflake parser [#1664](https://github.com/apache/datafusion-sqlparser-rs/pull/1664) (Vedin)
- National strings: check if dialect supports backslash escape [#1672](https://github.com/apache/datafusion-sqlparser-rs/pull/1672) (hansott)
- Only support escape literals for Postgres, Redshift and generic dialect [#1674](https://github.com/apache/datafusion-sqlparser-rs/pull/1674) (hansott)
- BigQuery: Support trailing commas in column definitions list [#1682](https://github.com/apache/datafusion-sqlparser-rs/pull/1682) (iffyio)
- Enable GROUP BY exp for Snowflake dialect [#1683](https://github.com/apache/datafusion-sqlparser-rs/pull/1683) (yoavcloud)
- Add support for parsing empty dictionary expressions [#1684](https://github.com/apache/datafusion-sqlparser-rs/pull/1684) (yoavcloud)
- Support multiple tables in `UPDATE FROM` clause [#1681](https://github.com/apache/datafusion-sqlparser-rs/pull/1681) (iffyio)
- Add support for mysql table hints [#1675](https://github.com/apache/datafusion-sqlparser-rs/pull/1675) (AvivDavid-Satori)
- BigQuery: Add support for select expr star [#1680](https://github.com/apache/datafusion-sqlparser-rs/pull/1680) (iffyio)
- Support underscore separators in numbers for Clickhouse. Fixes #1659 [#1677](https://github.com/apache/datafusion-sqlparser-rs/pull/1677) (graup)
- BigQuery: Fix column identifier reserved keywords list [#1678](https://github.com/apache/datafusion-sqlparser-rs/pull/1678) (iffyio)
- Fix bug when parsing a Snowflake stage with `;` suffix [#1688](https://github.com/apache/datafusion-sqlparser-rs/pull/1688) (yoavcloud)
- Allow plain JOIN without turning it into INNER [#1692](https://github.com/apache/datafusion-sqlparser-rs/pull/1692) (mvzink)
- Fix DDL generation in case of an empty arguments function. [#1690](https://github.com/apache/datafusion-sqlparser-rs/pull/1690) (remysaissy)
- Fix `CREATE FUNCTION` round trip for Hive dialect [#1693](https://github.com/apache/datafusion-sqlparser-rs/pull/1693) (iffyio)
- Make numeric literal underscore test dialect agnostic [#1685](https://github.com/apache/datafusion-sqlparser-rs/pull/1685) (iffyio)
- Extend lambda support for ClickHouse and DuckDB dialects [#1686](https://github.com/apache/datafusion-sqlparser-rs/pull/1686) (gstvg)
- Make TypedString preserve quote style [#1679](https://github.com/apache/datafusion-sqlparser-rs/pull/1679) (graup)
- Do not parse ASOF and MATCH_CONDITION as table factor aliases [#1698](https://github.com/apache/datafusion-sqlparser-rs/pull/1698) (yoavcloud)
- Add support for GRANT on some common Snowflake objects [#1699](https://github.com/apache/datafusion-sqlparser-rs/pull/1699) (yoavcloud)
- Add RETURNS TABLE() support for CREATE FUNCTION in Postgresql [#1687](https://github.com/apache/datafusion-sqlparser-rs/pull/1687) (remysaissy)
- Add parsing for GRANT ROLE and GRANT DATABASE ROLE in Snowflake dialect [#1689](https://github.com/apache/datafusion-sqlparser-rs/pull/1689) (yoavcloud)
- Add support for `CREATE/ALTER/DROP CONNECTOR` syntax [#1701](https://github.com/apache/datafusion-sqlparser-rs/pull/1701) (wugeer)
- Parse Snowflake COPY INTO <location> [#1669](https://github.com/apache/datafusion-sqlparser-rs/pull/1669) (yoavcloud)
- Require space after -- to start single line comment in MySQL [#1705](https://github.com/apache/datafusion-sqlparser-rs/pull/1705) (hansott)
- Add suppport for Show Objects statement for the Snowflake parser [#1702](https://github.com/apache/datafusion-sqlparser-rs/pull/1702) (DanCodedThis)
- Fix incorrect parsing of JsonAccess bracket notation after cast in Snowflake [#1708](https://github.com/apache/datafusion-sqlparser-rs/pull/1708) (yoavcloud)
- Parse Postgres VARBIT datatype [#1703](https://github.com/apache/datafusion-sqlparser-rs/pull/1703) (mvzink)
- Implement FROM-first selects [#1713](https://github.com/apache/datafusion-sqlparser-rs/pull/1713) (mitsuhiko)
- Enable custom dialects to support `MATCH() AGAINST()` [#1719](https://github.com/apache/datafusion-sqlparser-rs/pull/1719) (joocer)
- Support group by cube/rollup etc in BigQuery [#1720](https://github.com/apache/datafusion-sqlparser-rs/pull/1720) (Groennbeck)
- Add support for MS Varbinary(MAX) (#1714) [#1715](https://github.com/apache/datafusion-sqlparser-rs/pull/1715) (TylerBrinks)
- Add supports for Hive's `SELECT ... GROUP BY .. GROUPING SETS` syntax [#1653](https://github.com/apache/datafusion-sqlparser-rs/pull/1653) (wugeer)
- Differentiate LEFT JOIN from LEFT OUTER JOIN [#1726](https://github.com/apache/datafusion-sqlparser-rs/pull/1726) (mvzink)
- Add support for Postgres `ALTER TYPE` [#1727](https://github.com/apache/datafusion-sqlparser-rs/pull/1727) (jvatic)
- Replace `Method` and `CompositeAccess` with `CompoundFieldAccess` [#1716](https://github.com/apache/datafusion-sqlparser-rs/pull/1716) (iffyio)
- Add support for `EXECUTE IMMEDIATE` [#1717](https://github.com/apache/datafusion-sqlparser-rs/pull/1717) (iffyio)
- Treat COLLATE like any other column option [#1731](https://github.com/apache/datafusion-sqlparser-rs/pull/1731) (mvzink)
- Add support for PostgreSQL/Redshift geometric operators [#1723](https://github.com/apache/datafusion-sqlparser-rs/pull/1723) (benrsatori)
- Implement SnowFlake ALTER SESSION [#1712](https://github.com/apache/datafusion-sqlparser-rs/pull/1712) (osipovartem)
- Extend Visitor trait for Value type [#1725](https://github.com/apache/datafusion-sqlparser-rs/pull/1725) (tomershaniii)
- Add support for `ORDER BY ALL` [#1724](https://github.com/apache/datafusion-sqlparser-rs/pull/1724) (PokIsemaine)
- Parse casting to array using double colon operator in Redshift [#1737](https://github.com/apache/datafusion-sqlparser-rs/pull/1737) (yoavcloud)
- Replace parallel condition/result vectors with single CaseWhen vector in Expr::Case. This fixes the iteration order when using the `Visitor` trait. Expressions are now visited in the same order as they appear in the sql source. [#1733](https://github.com/apache/datafusion-sqlparser-rs/pull/1733) (lovasoa)
- BigQuery: Add support for `BEGIN` [#1718](https://github.com/apache/datafusion-sqlparser-rs/pull/1718) (iffyio)
- Parse SIGNED INTEGER type in MySQL CAST [#1739](https://github.com/apache/datafusion-sqlparser-rs/pull/1739) (mvzink)
- Parse MySQL ALTER TABLE ALGORITHM option [#1745](https://github.com/apache/datafusion-sqlparser-rs/pull/1745) (mvzink)
- Random test cleanups use Expr::value [#1749](https://github.com/apache/datafusion-sqlparser-rs/pull/1749) (alamb)
- Parse ALTER TABLE AUTO_INCREMENT operation for MySQL [#1748](https://github.com/apache/datafusion-sqlparser-rs/pull/1748) (mvzink)
## Credits
Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor.
```
10 Yoav Cohen
9 Ifeanyi Ubah
7 Michael Victor Zink
3 Hans Ott
2 Andrew Lamb
2 Ophir LOJKINE
2 Paul Grau
2 Rémy SAISSY
2 wugeer
1 Armin Ronacher
1 Artem Osipov
1 AvivDavid-Satori
1 Ayman Elkfrawy
1 DanCodedThis
1 Denys Tsomenko
1 Emil
1 Ian Alexander Joiner
1 Jesse Stuart
1 Justin Joyce
1 Michael
1 SiLe Zhou
1 Tyler Brinks
1 benrsatori
1 gstvg
1 tomershaniii
```
Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release.

View file

@ -1,102 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# sqlparser-rs 0.56.0 Changelog
This release consists of 48 commits from 19 contributors. See credits at the end of this changelog for more information.
**Other:**
- Ignore escaped LIKE wildcards in MySQL [#1735](https://github.com/apache/datafusion-sqlparser-rs/pull/1735) (mvzink)
- Parse SET NAMES syntax in Postgres [#1752](https://github.com/apache/datafusion-sqlparser-rs/pull/1752) (mvzink)
- re-add support for nested comments in mssql [#1754](https://github.com/apache/datafusion-sqlparser-rs/pull/1754) (lovasoa)
- Extend support for INDEX parsing [#1707](https://github.com/apache/datafusion-sqlparser-rs/pull/1707) (LucaCappelletti94)
- Parse MySQL `ALTER TABLE DROP FOREIGN KEY` syntax [#1762](https://github.com/apache/datafusion-sqlparser-rs/pull/1762) (mvzink)
- add support for `with` clauses (CTEs) in `delete` statements [#1764](https://github.com/apache/datafusion-sqlparser-rs/pull/1764) (lovasoa)
- SET with a list of comma separated assignments [#1757](https://github.com/apache/datafusion-sqlparser-rs/pull/1757) (MohamedAbdeen21)
- Preserve MySQL-style `LIMIT <offset>, <limit>` syntax [#1765](https://github.com/apache/datafusion-sqlparser-rs/pull/1765) (mvzink)
- Add support for `DROP MATERIALIZED VIEW` [#1743](https://github.com/apache/datafusion-sqlparser-rs/pull/1743) (iffyio)
- Add `CASE` and `IF` statement support [#1741](https://github.com/apache/datafusion-sqlparser-rs/pull/1741) (iffyio)
- BigQuery: Add support for `CREATE SCHEMA` options [#1742](https://github.com/apache/datafusion-sqlparser-rs/pull/1742) (iffyio)
- Snowflake: Support dollar quoted comments [#1755](https://github.com/apache/datafusion-sqlparser-rs/pull/1755)
- Add LOCK operation for ALTER TABLE [#1768](https://github.com/apache/datafusion-sqlparser-rs/pull/1768) (MohamedAbdeen21)
- Add support for `RAISE` statement [#1766](https://github.com/apache/datafusion-sqlparser-rs/pull/1766) (iffyio)
- Add GLOBAL context/modifier to SET statements [#1767](https://github.com/apache/datafusion-sqlparser-rs/pull/1767) (MohamedAbdeen21)
- Parse `SUBSTR` as alias for `SUBSTRING` [#1769](https://github.com/apache/datafusion-sqlparser-rs/pull/1769) (mvzink)
- SET statements: scope modifier for multiple assignments [#1772](https://github.com/apache/datafusion-sqlparser-rs/pull/1772) (MohamedAbdeen21)
- Support qualified column names in `MATCH AGAINST` clause [#1774](https://github.com/apache/datafusion-sqlparser-rs/pull/1774) (tomershaniii)
- Mysql: Add support for := operator [#1779](https://github.com/apache/datafusion-sqlparser-rs/pull/1779) (barsela1)
- Add cipherstash-proxy to list of users in README.md [#1782](https://github.com/apache/datafusion-sqlparser-rs/pull/1782) (coderdan)
- Fix typos [#1785](https://github.com/apache/datafusion-sqlparser-rs/pull/1785) (jayvdb)
- Add support for Databricks TIMESTAMP_NTZ. [#1781](https://github.com/apache/datafusion-sqlparser-rs/pull/1781) (romanb)
- Enable double-dot-notation for mssql. [#1787](https://github.com/apache/datafusion-sqlparser-rs/pull/1787) (romanb)
- Fix: Snowflake ALTER SESSION cannot be followed by other statements. [#1786](https://github.com/apache/datafusion-sqlparser-rs/pull/1786) (romanb)
- Add GreptimeDB to the "Users" in README [#1788](https://github.com/apache/datafusion-sqlparser-rs/pull/1788) (MichaelScofield)
- Extend snowflake grant options support [#1794](https://github.com/apache/datafusion-sqlparser-rs/pull/1794) (yoavcloud)
- Fix clippy lint on rust 1.86 [#1796](https://github.com/apache/datafusion-sqlparser-rs/pull/1796) (iffyio)
- Allow single quotes in EXTRACT() for Redshift. [#1795](https://github.com/apache/datafusion-sqlparser-rs/pull/1795) (romanb)
- MSSQL: Add support for functionality `MERGE` output clause [#1790](https://github.com/apache/datafusion-sqlparser-rs/pull/1790) (dilovancelik)
- Support additional DuckDB integer types such as HUGEINT, UHUGEINT, etc [#1797](https://github.com/apache/datafusion-sqlparser-rs/pull/1797) (alexander-beedie)
- Add support for MSSQL IF/ELSE statements. [#1791](https://github.com/apache/datafusion-sqlparser-rs/pull/1791) (romanb)
- Allow literal backslash escapes for string literals in Redshift dialect. [#1801](https://github.com/apache/datafusion-sqlparser-rs/pull/1801) (romanb)
- Add support for MySQL's STRAIGHT_JOIN join operator. [#1802](https://github.com/apache/datafusion-sqlparser-rs/pull/1802) (romanb)
- Snowflake COPY INTO target columns, select items and optional alias [#1805](https://github.com/apache/datafusion-sqlparser-rs/pull/1805) (yoavcloud)
- Fix tokenization of qualified identifiers with numeric prefix. [#1803](https://github.com/apache/datafusion-sqlparser-rs/pull/1803) (romanb)
- Add support for `INHERITS` option in `CREATE TABLE` statement [#1806](https://github.com/apache/datafusion-sqlparser-rs/pull/1806) (LucaCappelletti94)
- Add `DROP TRIGGER` support for SQL Server [#1813](https://github.com/apache/datafusion-sqlparser-rs/pull/1813) (aharpervc)
- Snowflake: support nested join without parentheses [#1799](https://github.com/apache/datafusion-sqlparser-rs/pull/1799) (barsela1)
- Add support for parenthesized subquery as `IN` predicate [#1793](https://github.com/apache/datafusion-sqlparser-rs/pull/1793) (adamchainz)
- Fix `STRAIGHT_JOIN` constraint when table alias is absent [#1812](https://github.com/apache/datafusion-sqlparser-rs/pull/1812) (killertux)
- Add support for `PRINT` statement for SQL Server [#1811](https://github.com/apache/datafusion-sqlparser-rs/pull/1811) (aharpervc)
- enable `supports_filter_during_aggregation` for Generic dialect [#1815](https://github.com/apache/datafusion-sqlparser-rs/pull/1815) (goldmedal)
- Add support for `XMLTABLE` [#1817](https://github.com/apache/datafusion-sqlparser-rs/pull/1817) (lovasoa)
- Add `CREATE FUNCTION` support for SQL Server [#1808](https://github.com/apache/datafusion-sqlparser-rs/pull/1808) (aharpervc)
- Add `OR ALTER` support for `CREATE VIEW` [#1818](https://github.com/apache/datafusion-sqlparser-rs/pull/1818) (aharpervc)
- Add `DECLARE ... CURSOR FOR` support for SQL Server [#1821](https://github.com/apache/datafusion-sqlparser-rs/pull/1821) (aharpervc)
- Handle missing login in changelog generate script [#1823](https://github.com/apache/datafusion-sqlparser-rs/pull/1823) (iffyio)
- Snowflake: Add support for `CONNECT_BY_ROOT` [#1780](https://github.com/apache/datafusion-sqlparser-rs/pull/1780) (tomershaniii)
## Credits
Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor.
```
8 Roman Borschel
6 Ifeanyi Ubah
5 Andrew Harper
5 Michael Victor Zink
4 Mohamed Abdeen
3 Ophir LOJKINE
2 Luca Cappelletti
2 Yoav Cohen
2 bar sela
2 tomershaniii
1 Adam Johnson
1 Aleksei Piianin
1 Alexander Beedie
1 Bruno Clemente
1 Dan Draper
1 DilovanCelik
1 Jax Liu
1 John Vandenberg
1 LFC
```
Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release.

View file

@ -1,95 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# sqlparser-rs 0.57.0 Changelog
This release consists of 39 commits from 19 contributors. See credits at the end of this changelog for more information.
**Implemented enhancements:**
- feat: Hive: support `SORT BY` direction [#1873](https://github.com/apache/datafusion-sqlparser-rs/pull/1873) (chenkovsky)
**Other:**
- Support some of pipe operators [#1759](https://github.com/apache/datafusion-sqlparser-rs/pull/1759) (simonvandel)
- Added support for `DROP DOMAIN` [#1828](https://github.com/apache/datafusion-sqlparser-rs/pull/1828) (LucaCappelletti94)
- Improve support for cursors for SQL Server [#1831](https://github.com/apache/datafusion-sqlparser-rs/pull/1831) (aharpervc)
- Add all missing table options to be handled in any order [#1747](https://github.com/apache/datafusion-sqlparser-rs/pull/1747) (benrsatori)
- Add `CREATE TRIGGER` support for SQL Server [#1810](https://github.com/apache/datafusion-sqlparser-rs/pull/1810) (aharpervc)
- Added support for `CREATE DOMAIN` [#1830](https://github.com/apache/datafusion-sqlparser-rs/pull/1830) (LucaCappelletti94)
- Allow stored procedures to be defined without `BEGIN`/`END` [#1834](https://github.com/apache/datafusion-sqlparser-rs/pull/1834) (aharpervc)
- Add support for the MATCH and REGEXP binary operators [#1840](https://github.com/apache/datafusion-sqlparser-rs/pull/1840) (lovasoa)
- Fix: parsing ident starting with underscore in certain dialects [#1835](https://github.com/apache/datafusion-sqlparser-rs/pull/1835) (MohamedAbdeen21)
- implement pretty-printing with `{:#}` [#1847](https://github.com/apache/datafusion-sqlparser-rs/pull/1847) (lovasoa)
- Fix big performance issue in string serialization [#1848](https://github.com/apache/datafusion-sqlparser-rs/pull/1848) (lovasoa)
- Add support for `DENY` statements [#1836](https://github.com/apache/datafusion-sqlparser-rs/pull/1836) (aharpervc)
- Postgresql: Add `REPLICA IDENTITY` operation for `ALTER TABLE` [#1844](https://github.com/apache/datafusion-sqlparser-rs/pull/1844) (MohamedAbdeen21)
- Add support for INCLUDE/EXCLUDE NULLS for UNPIVOT [#1849](https://github.com/apache/datafusion-sqlparser-rs/pull/1849) (Vedin)
- pretty print improvements [#1851](https://github.com/apache/datafusion-sqlparser-rs/pull/1851) (lovasoa)
- fix new rust 1.87 cargo clippy warnings [#1856](https://github.com/apache/datafusion-sqlparser-rs/pull/1856) (lovasoa)
- Update criterion requirement from 0.5 to 0.6 in /sqlparser_bench [#1857](https://github.com/apache/datafusion-sqlparser-rs/pull/1857) (dependabot[bot])
- pretty-print CREATE TABLE statements [#1854](https://github.com/apache/datafusion-sqlparser-rs/pull/1854) (lovasoa)
- pretty-print CREATE VIEW statements [#1855](https://github.com/apache/datafusion-sqlparser-rs/pull/1855) (lovasoa)
- Handle optional datatypes properly in `CREATE FUNCTION` statements [#1826](https://github.com/apache/datafusion-sqlparser-rs/pull/1826) (LucaCappelletti94)
- Mysql: Add `SRID` column option [#1852](https://github.com/apache/datafusion-sqlparser-rs/pull/1852) (MohamedAbdeen21)
- Add support for table valued functions for SQL Server [#1839](https://github.com/apache/datafusion-sqlparser-rs/pull/1839) (aharpervc)
- Keep the COLUMN keyword only if it exists when dropping the column [#1862](https://github.com/apache/datafusion-sqlparser-rs/pull/1862) (git-hulk)
- Add support for parameter default values in SQL Server [#1866](https://github.com/apache/datafusion-sqlparser-rs/pull/1866) (aharpervc)
- Add support for `TABLESAMPLE` pipe operator [#1860](https://github.com/apache/datafusion-sqlparser-rs/pull/1860) (hendrikmakait)
- Adds support for mysql's drop index [#1864](https://github.com/apache/datafusion-sqlparser-rs/pull/1864) (dmzmk)
- Fix: GROUPING SETS accept values without parenthesis [#1867](https://github.com/apache/datafusion-sqlparser-rs/pull/1867) (Vedin)
- Add ICEBERG keyword support to ALTER TABLE statement [#1869](https://github.com/apache/datafusion-sqlparser-rs/pull/1869) (osipovartem)
- MySQL: Support `index_name` in FK constraints [#1871](https://github.com/apache/datafusion-sqlparser-rs/pull/1871) (MohamedAbdeen21)
- Postgres: Apply `ONLY` keyword per table in TRUNCATE stmt [#1872](https://github.com/apache/datafusion-sqlparser-rs/pull/1872) (MohamedAbdeen21)
- Fix `CASE` expression spans [#1874](https://github.com/apache/datafusion-sqlparser-rs/pull/1874) (eliaperantoni)
- MySQL: `[[NOT] ENFORCED]` in CHECK constraint [#1870](https://github.com/apache/datafusion-sqlparser-rs/pull/1870) (MohamedAbdeen21)
- Add support for `CREATE SCHEMA WITH ( <properties> )` [#1877](https://github.com/apache/datafusion-sqlparser-rs/pull/1877) (utay)
- Add support for `ALTER TABLE DROP INDEX` [#1865](https://github.com/apache/datafusion-sqlparser-rs/pull/1865) (vimko)
- chore: Replace archived actions-rs/install action [#1876](https://github.com/apache/datafusion-sqlparser-rs/pull/1876) (assignUser)
- Allow `IF NOT EXISTS` after table name for Snowflake [#1881](https://github.com/apache/datafusion-sqlparser-rs/pull/1881) (bombsimon)
- Support `DISTINCT AS { STRUCT | VALUE }` for BigQuery [#1880](https://github.com/apache/datafusion-sqlparser-rs/pull/1880) (bombsimon)
## Credits
Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor.
```
7 Ophir LOJKINE
6 Andrew Harper
6 Mohamed Abdeen
3 Luca Cappelletti
2 Denys Tsomenko
2 Simon Sawert
1 Andrew Lamb
1 Artem Osipov
1 Chen Chongchen
1 Dmitriy Mazurin
1 Elia Perantoni
1 Hendrik Makait
1 Jacob Wujciak-Jens
1 Simon Vandel Sillesen
1 Yannick Utard
1 benrsatori
1 dependabot[bot]
1 hulk
1 vimko
```
Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release.

View file

@ -28,8 +28,7 @@ def print_pulls(repo_name, title, pulls):
print()
for (pull, commit) in pulls:
url = "https://github.com/{}/pull/{}".format(repo_name, pull.number)
author = f"({commit.author.login})" if commit.author else ''
print("- {} [#{}]({}) {}".format(pull.title, pull.number, url, author))
print("- {} [#{}]({}) ({})".format(pull.title, pull.number, url, commit.author.login))
print()
@ -162,4 +161,4 @@ def cli(args=None):
generate_changelog(repo, project, args.tag1, args.tag2, args.version)
if __name__ == "__main__":
cli()
cli()

View file

@ -1,8 +1,7 @@
# Files to exclude from the Apache Rat (license) check
.gitignore
.tool-versions
target/*
**.gitignore
rat.txt
dev/release/rat_exclude_files.txt
fuzz/.gitignore
sqlparser_bench/img/flamegraph.svg
**Cargo.lock
filtered_rat.txt

View file

@ -63,7 +63,7 @@ $ cargo run --example cli - [--dialectname]
};
let contents = if filename == "-" {
println!("Parsing from stdin using {dialect:?}");
println!("Parsing from stdin using {:?}", dialect);
let mut buf = Vec::new();
stdin()
.read_to_end(&mut buf)

View file

@ -26,7 +26,7 @@ edition = "2018"
sqlparser = { path = "../" }
[dev-dependencies]
criterion = "0.6"
criterion = "0.5"
[[bench]]
name = "sqlparser_bench"

View file

@ -45,29 +45,30 @@ fn basic_queries(c: &mut Criterion) {
let large_statement = {
let expressions = (0..1000)
.map(|n| format!("FN_{n}(COL_{n})"))
.map(|n| format!("FN_{}(COL_{})", n, n))
.collect::<Vec<_>>()
.join(", ");
let tables = (0..1000)
.map(|n| format!("TABLE_{n}"))
.map(|n| format!("TABLE_{}", n))
.collect::<Vec<_>>()
.join(" JOIN ");
let where_condition = (0..1000)
.map(|n| format!("COL_{n} = {n}"))
.map(|n| format!("COL_{} = {}", n, n))
.collect::<Vec<_>>()
.join(" OR ");
let order_condition = (0..1000)
.map(|n| format!("COL_{n} DESC"))
.map(|n| format!("COL_{} DESC", n))
.collect::<Vec<_>>()
.join(", ");
format!(
"SELECT {expressions} FROM {tables} WHERE {where_condition} ORDER BY {order_condition}"
"SELECT {} FROM {} WHERE {} ORDER BY {}",
expressions, tables, where_condition, order_condition
)
};
group.bench_function("parse_large_statement", |b| {
b.iter(|| Parser::parse_sql(&dialect, std::hint::black_box(large_statement.as_str())));
b.iter(|| Parser::parse_sql(&dialect, criterion::black_box(large_statement.as_str())));
});
let large_statement = Parser::parse_sql(&dialect, large_statement.as_str())

View file

@ -36,7 +36,7 @@ pub enum EnumMember {
Name(String),
/// ClickHouse allows to specify an integer value for each enum value.
///
/// [ClickHouse](https://clickhouse.com/docs/en/sql-reference/data-types/enum)
/// [clickhouse](https://clickhouse.com/docs/en/sql-reference/data-types/enum)
NamedValue(String, Expr),
}
@ -45,327 +45,268 @@ pub enum EnumMember {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum DataType {
/// Table type in [PostgreSQL], e.g. CREATE FUNCTION RETURNS TABLE(...).
///
/// [PostgreSQL]: https://www.postgresql.org/docs/15/sql-createfunction.html
/// [MsSQL]: https://learn.microsoft.com/en-us/sql/t-sql/statements/create-function-transact-sql?view=sql-server-ver16#c-create-a-multi-statement-table-valued-function
Table(Option<Vec<ColumnDef>>),
/// Table type with a name, e.g. CREATE FUNCTION RETURNS @result TABLE(...).
///
/// [MsSQl]: https://learn.microsoft.com/en-us/sql/t-sql/statements/create-function-transact-sql?view=sql-server-ver16#table
NamedTable {
/// Table name.
name: ObjectName,
/// Table columns.
columns: Vec<ColumnDef>,
},
/// Fixed-length character type, e.g. CHARACTER(10).
/// Fixed-length character type e.g. CHARACTER(10)
Character(Option<CharacterLength>),
/// Fixed-length char type, e.g. CHAR(10).
/// Fixed-length char type e.g. CHAR(10)
Char(Option<CharacterLength>),
/// Character varying type, e.g. CHARACTER VARYING(10).
/// Character varying type e.g. CHARACTER VARYING(10)
CharacterVarying(Option<CharacterLength>),
/// Char varying type, e.g. CHAR VARYING(10).
/// Char varying type e.g. CHAR VARYING(10)
CharVarying(Option<CharacterLength>),
/// Variable-length character type, e.g. VARCHAR(10).
/// Variable-length character type e.g. VARCHAR(10)
Varchar(Option<CharacterLength>),
/// Variable-length character type, e.g. NVARCHAR(10).
/// Variable-length character type e.g. NVARCHAR(10)
Nvarchar(Option<CharacterLength>),
/// Uuid type.
/// Uuid type
Uuid,
/// Large character object with optional length,
/// e.g. CHARACTER LARGE OBJECT, CHARACTER LARGE OBJECT(1000), [SQL Standard].
/// Large character object with optional length e.g. CHARACTER LARGE OBJECT, CHARACTER LARGE OBJECT(1000), [standard]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#character-large-object-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#character-large-object-type
CharacterLargeObject(Option<u64>),
/// Large character object with optional length,
/// e.g. CHAR LARGE OBJECT, CHAR LARGE OBJECT(1000), [SQL Standard].
/// Large character object with optional length e.g. CHAR LARGE OBJECT, CHAR LARGE OBJECT(1000), [standard]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#character-large-object-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#character-large-object-type
CharLargeObject(Option<u64>),
/// Large character object with optional length,
/// e.g. CLOB, CLOB(1000), [SQL Standard].
/// Large character object with optional length e.g. CLOB, CLOB(1000), [standard]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#character-large-object-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#character-large-object-type
/// [Oracle]: https://docs.oracle.com/javadb/10.10.1.2/ref/rrefclob.html
Clob(Option<u64>),
/// Fixed-length binary type with optional length,
/// see [SQL Standard], [MS SQL Server].
/// Fixed-length binary type with optional length e.g. [standard], [MS SQL Server]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#binary-string-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#binary-string-type
/// [MS SQL Server]: https://learn.microsoft.com/pt-br/sql/t-sql/data-types/binary-and-varbinary-transact-sql?view=sql-server-ver16
Binary(Option<u64>),
/// Variable-length binary with optional length type,
/// see [SQL Standard], [MS SQL Server].
/// Variable-length binary with optional length type e.g. [standard], [MS SQL Server]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#binary-string-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#binary-string-type
/// [MS SQL Server]: https://learn.microsoft.com/pt-br/sql/t-sql/data-types/binary-and-varbinary-transact-sql?view=sql-server-ver16
Varbinary(Option<BinaryLength>),
/// Large binary object with optional length,
/// see [SQL Standard], [Oracle].
Varbinary(Option<u64>),
/// Large binary object with optional length e.g. BLOB, BLOB(1000), [standard], [Oracle]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#binary-large-object-string-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#binary-large-object-string-type
/// [Oracle]: https://docs.oracle.com/javadb/10.8.3.0/ref/rrefblob.html
Blob(Option<u64>),
/// [MySQL] blob with up to 2**8 bytes.
/// [MySQL] blob with up to 2**8 bytes
///
/// [MySQL]: https://dev.mysql.com/doc/refman/9.1/en/blob.html
TinyBlob,
/// [MySQL] blob with up to 2**24 bytes.
/// [MySQL] blob with up to 2**24 bytes
///
/// [MySQL]: https://dev.mysql.com/doc/refman/9.1/en/blob.html
MediumBlob,
/// [MySQL] blob with up to 2**32 bytes.
/// [MySQL] blob with up to 2**32 bytes
///
/// [MySQL]: https://dev.mysql.com/doc/refman/9.1/en/blob.html
LongBlob,
/// Variable-length binary data with optional length.
///
/// [BigQuery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#bytes_type
/// [bigquery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#bytes_type
Bytes(Option<u64>),
/// Numeric type with optional precision and scale, e.g. NUMERIC(10,2), [SQL Standard][1].
/// Numeric type with optional precision and scale e.g. NUMERIC(10,2), [standard][1]
///
/// [1]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#exact-numeric-type
Numeric(ExactNumberInfo),
/// Decimal type with optional precision and scale, e.g. DECIMAL(10,2), [SQL Standard][1].
/// Decimal type with optional precision and scale e.g. DECIMAL(10,2), [standard][1]
///
/// [1]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#exact-numeric-type
Decimal(ExactNumberInfo),
/// [BigNumeric] type used in BigQuery.
/// [BigNumeric] type used in BigQuery
///
/// [BigNumeric]: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#bignumeric_literals
BigNumeric(ExactNumberInfo),
/// This is alias for `BigNumeric` type used in BigQuery.
/// This is alias for `BigNumeric` type used in BigQuery
///
/// [BigDecimal]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#decimal_types
BigDecimal(ExactNumberInfo),
/// Dec type with optional precision and scale, e.g. DEC(10,2), [SQL Standard][1].
/// Dec type with optional precision and scale e.g. DEC(10,2), [standard][1]
///
/// [1]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#exact-numeric-type
Dec(ExactNumberInfo),
/// Floating point with optional precision, e.g. FLOAT(8).
/// Floating point with optional precision e.g. FLOAT(8)
Float(Option<u64>),
/// Tiny integer with optional display width, e.g. TINYINT or TINYINT(3).
/// Tiny integer with optional display width e.g. TINYINT or TINYINT(3)
TinyInt(Option<u64>),
/// Unsigned tiny integer with optional display width,
/// e.g. TINYINT UNSIGNED or TINYINT(3) UNSIGNED.
TinyIntUnsigned(Option<u64>),
/// Unsigned tiny integer, e.g. UTINYINT
UTinyInt,
/// Int2 is an alias for SmallInt in [PostgreSQL].
/// Note: Int2 means 2 bytes in PostgreSQL (not 2 bits).
/// Int2 with optional display width, e.g. INT2 or INT2(5).
/// Unsigned tiny integer with optional display width e.g. TINYINT UNSIGNED or TINYINT(3) UNSIGNED
UnsignedTinyInt(Option<u64>),
/// Int2 as alias for SmallInt in [postgresql]
/// Note: Int2 mean 2 bytes in postgres (not 2 bits)
/// Int2 with optional display width e.g. INT2 or INT2(5)
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
/// [postgresql]: https://www.postgresql.org/docs/15/datatype.html
Int2(Option<u64>),
/// Unsigned Int2 with optional display width, e.g. INT2 UNSIGNED or INT2(5) UNSIGNED.
Int2Unsigned(Option<u64>),
/// Small integer with optional display width, e.g. SMALLINT or SMALLINT(5).
/// Unsigned Int2 with optional display width e.g. INT2 Unsigned or INT2(5) Unsigned
UnsignedInt2(Option<u64>),
/// Small integer with optional display width e.g. SMALLINT or SMALLINT(5)
SmallInt(Option<u64>),
/// Unsigned small integer with optional display width,
/// e.g. SMALLINT UNSIGNED or SMALLINT(5) UNSIGNED.
SmallIntUnsigned(Option<u64>),
/// Unsigned small integer, e.g. USMALLINT.
USmallInt,
/// MySQL medium integer ([1]) with optional display width,
/// e.g. MEDIUMINT or MEDIUMINT(5).
/// Unsigned small integer with optional display width e.g. SMALLINT UNSIGNED or SMALLINT(5) UNSIGNED
UnsignedSmallInt(Option<u64>),
/// MySQL medium integer ([1]) with optional display width e.g. MEDIUMINT or MEDIUMINT(5)
///
/// [1]: https://dev.mysql.com/doc/refman/8.0/en/integer-types.html
MediumInt(Option<u64>),
/// Unsigned medium integer ([1]) with optional display width,
/// e.g. MEDIUMINT UNSIGNED or MEDIUMINT(5) UNSIGNED.
/// Unsigned medium integer ([1]) with optional display width e.g. MEDIUMINT UNSIGNED or MEDIUMINT(5) UNSIGNED
///
/// [1]: https://dev.mysql.com/doc/refman/8.0/en/integer-types.html
MediumIntUnsigned(Option<u64>),
/// Int with optional display width, e.g. INT or INT(11).
UnsignedMediumInt(Option<u64>),
/// Int with optional display width e.g. INT or INT(11)
Int(Option<u64>),
/// Int4 is an alias for Integer in [PostgreSQL].
/// Note: Int4 means 4 bytes in PostgreSQL (not 4 bits).
/// Int4 with optional display width, e.g. Int4 or Int4(11).
/// Int4 as alias for Integer in [postgresql]
/// Note: Int4 mean 4 bytes in postgres (not 4 bits)
/// Int4 with optional display width e.g. Int4 or Int4(11)
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
/// [postgresql]: https://www.postgresql.org/docs/15/datatype.html
Int4(Option<u64>),
/// Int8 is an alias for BigInt in [PostgreSQL] and Integer type in [ClickHouse].
/// Int8 with optional display width, e.g. INT8 or INT8(11).
/// Note: Int8 means 8 bytes in [PostgreSQL], but 8 bits in [ClickHouse].
/// Int8 as alias for Bigint in [postgresql] and integer type in [clickhouse]
/// Note: Int8 mean 8 bytes in [postgresql] (not 8 bits)
/// Int8 with optional display width e.g. INT8 or INT8(11)
/// Note: Int8 mean 8 bits in [clickhouse]
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [postgresql]: https://www.postgresql.org/docs/15/datatype.html
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
Int8(Option<u64>),
/// Integer type in [ClickHouse].
/// Note: Int16 means 16 bits in [ClickHouse].
/// Integer type in [clickhouse]
/// Note: Int16 mean 16 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
Int16,
/// Integer type in [ClickHouse].
/// Note: Int32 means 32 bits in [ClickHouse].
/// Integer type in [clickhouse]
/// Note: Int16 mean 32 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
Int32,
/// Integer type in [BigQuery], [ClickHouse].
/// Integer type in [bigquery], [clickhouse]
///
/// [BigQuery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#integer_types
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [bigquery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#integer_types
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
Int64,
/// Integer type in [ClickHouse].
/// Note: Int128 means 128 bits in [ClickHouse].
/// Integer type in [clickhouse]
/// Note: Int128 mean 128 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
Int128,
/// Integer type in [ClickHouse].
/// Note: Int256 means 256 bits in [ClickHouse].
/// Integer type in [clickhouse]
/// Note: Int256 mean 256 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
Int256,
/// Integer with optional display width, e.g. INTEGER or INTEGER(11).
/// Integer with optional display width e.g. INTEGER or INTEGER(11)
Integer(Option<u64>),
/// Unsigned int with optional display width, e.g. INT UNSIGNED or INT(11) UNSIGNED.
IntUnsigned(Option<u64>),
/// Unsigned int4 with optional display width, e.g. INT4 UNSIGNED or INT4(11) UNSIGNED.
Int4Unsigned(Option<u64>),
/// Unsigned integer with optional display width, e.g. INTEGER UNSIGNED or INTEGER(11) UNSIGNED.
IntegerUnsigned(Option<u64>),
/// 128-bit integer type, e.g. HUGEINT.
HugeInt,
/// Unsigned 128-bit integer type, e.g. UHUGEINT.
UHugeInt,
/// Unsigned integer type in [ClickHouse].
/// Note: UInt8 means 8 bits in [ClickHouse].
/// Unsigned int with optional display width e.g. INT UNSIGNED or INT(11) UNSIGNED
UnsignedInt(Option<u64>),
/// Unsigned int4 with optional display width e.g. INT4 UNSIGNED or INT4(11) UNSIGNED
UnsignedInt4(Option<u64>),
/// Unsigned integer with optional display width e.g. INTEGER UNSIGNED or INTEGER(11) UNSIGNED
UnsignedInteger(Option<u64>),
/// Unsigned integer type in [clickhouse]
/// Note: UInt8 mean 8 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
UInt8,
/// Unsigned integer type in [ClickHouse].
/// Note: UInt16 means 16 bits in [ClickHouse].
/// Unsigned integer type in [clickhouse]
/// Note: UInt16 mean 16 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
UInt16,
/// Unsigned integer type in [ClickHouse].
/// Note: UInt32 means 32 bits in [ClickHouse].
/// Unsigned integer type in [clickhouse]
/// Note: UInt32 mean 32 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
UInt32,
/// Unsigned integer type in [ClickHouse].
/// Note: UInt64 means 64 bits in [ClickHouse].
/// Unsigned integer type in [clickhouse]
/// Note: UInt64 mean 64 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
UInt64,
/// Unsigned integer type in [ClickHouse].
/// Note: UInt128 means 128 bits in [ClickHouse].
/// Unsigned integer type in [clickhouse]
/// Note: UInt128 mean 128 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
UInt128,
/// Unsigned integer type in [ClickHouse].
/// Note: UInt256 means 256 bits in [ClickHouse].
/// Unsigned integer type in [clickhouse]
/// Note: UInt256 mean 256 bits in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint
UInt256,
/// Big integer with optional display width, e.g. BIGINT or BIGINT(20).
/// Big integer with optional display width e.g. BIGINT or BIGINT(20)
BigInt(Option<u64>),
/// Unsigned big integer with optional display width, e.g. BIGINT UNSIGNED or BIGINT(20) UNSIGNED.
BigIntUnsigned(Option<u64>),
/// Unsigned big integer, e.g. UBIGINT.
UBigInt,
/// Unsigned Int8 with optional display width, e.g. INT8 UNSIGNED or INT8(11) UNSIGNED.
Int8Unsigned(Option<u64>),
/// Signed integer as used in [MySQL CAST] target types, without optional `INTEGER` suffix,
/// e.g. `SIGNED`
/// Unsigned big integer with optional display width e.g. BIGINT UNSIGNED or BIGINT(20) UNSIGNED
UnsignedBigInt(Option<u64>),
/// Unsigned Int8 with optional display width e.g. INT8 UNSIGNED or INT8(11) UNSIGNED
UnsignedInt8(Option<u64>),
/// Float4 as alias for Real in [postgresql]
///
/// [MySQL CAST]: https://dev.mysql.com/doc/refman/8.4/en/cast-functions.html
Signed,
/// Signed integer as used in [MySQL CAST] target types, with optional `INTEGER` suffix,
/// e.g. `SIGNED INTEGER`
///
/// [MySQL CAST]: https://dev.mysql.com/doc/refman/8.4/en/cast-functions.html
SignedInteger,
/// Signed integer as used in [MySQL CAST] target types, without optional `INTEGER` suffix,
/// e.g. `SIGNED`
///
/// [MySQL CAST]: https://dev.mysql.com/doc/refman/8.4/en/cast-functions.html
Unsigned,
/// Unsigned integer as used in [MySQL CAST] target types, with optional `INTEGER` suffix,
/// e.g. `UNSIGNED INTEGER`.
///
/// [MySQL CAST]: https://dev.mysql.com/doc/refman/8.4/en/cast-functions.html
UnsignedInteger,
/// Float4 is an alias for Real in [PostgreSQL].
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
/// [postgresql]: https://www.postgresql.org/docs/15/datatype.html
Float4,
/// Floating point in [ClickHouse].
/// Floating point in [clickhouse]
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/float
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/float
Float32,
/// Floating point in [BigQuery].
/// Floating point in [bigquery]
///
/// [BigQuery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#floating_point_types
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/float
/// [bigquery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#floating_point_types
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/float
Float64,
/// Floating point, e.g. REAL.
/// Floating point e.g. REAL
Real,
/// Float8 is an alias for Double in [PostgreSQL].
/// Float8 as alias for Double in [postgresql]
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
/// [postgresql]: https://www.postgresql.org/docs/15/datatype.html
Float8,
/// Double
Double(ExactNumberInfo),
/// Double Precision, see [SQL Standard], [PostgreSQL].
/// Double PRECISION e.g. [standard], [postgresql]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#approximate-numeric-type
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype-numeric.html
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#approximate-numeric-type
/// [postgresql]: https://www.postgresql.org/docs/current/datatype-numeric.html
DoublePrecision,
/// Bool is an alias for Boolean, see [PostgreSQL].
/// Bool as alias for Boolean in [postgresql]
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
/// [postgresql]: https://www.postgresql.org/docs/15/datatype.html
Bool,
/// Boolean type.
/// Boolean
Boolean,
/// Date type.
/// Date
Date,
/// Date32 with the same range as Datetime64.
/// Date32 with the same range as Datetime64
///
/// [1]: https://clickhouse.com/docs/en/sql-reference/data-types/date32
Date32,
/// Time with optional time precision and time zone information, see [SQL Standard][1].
/// Time with optional time precision and time zone information e.g. [standard][1].
///
/// [1]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#datetime-type
Time(Option<u64>, TimezoneInfo),
/// Datetime with optional time precision, see [MySQL][1].
/// Datetime with optional time precision e.g. [MySQL][1].
///
/// [1]: https://dev.mysql.com/doc/refman/8.0/en/datetime.html
Datetime(Option<u64>),
/// Datetime with time precision and optional timezone, see [ClickHouse][1].
/// Datetime with time precision and optional timezone e.g. [ClickHouse][1].
///
/// [1]: https://clickhouse.com/docs/en/sql-reference/data-types/datetime64
Datetime64(u64, Option<String>),
/// Timestamp with optional time precision and time zone information, see [SQL Standard][1].
/// Timestamp with optional time precision and time zone information e.g. [standard][1].
///
/// [1]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#datetime-type
Timestamp(Option<u64>, TimezoneInfo),
/// Databricks timestamp without time zone. See [1].
///
/// [1]: https://docs.databricks.com/aws/en/sql/language-manual/data-types/timestamp-ntz-type
TimestampNtz,
/// Interval type.
/// Interval
Interval,
/// JSON type.
/// JSON type
JSON,
/// Binary JSON type.
/// Binary JSON type
JSONB,
/// Regclass used in [PostgreSQL] serial.
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
/// Regclass used in postgresql serial
Regclass,
/// Text type.
/// Text
Text,
/// [MySQL] text with up to 2**8 bytes.
/// [MySQL] text with up to 2**8 bytes
///
/// [MySQL]: https://dev.mysql.com/doc/refman/9.1/en/blob.html
TinyText,
/// [MySQL] text with up to 2**24 bytes.
/// [MySQL] text with up to 2**24 bytes
///
/// [MySQL]: https://dev.mysql.com/doc/refman/9.1/en/blob.html
MediumText,
/// [MySQL] text with up to 2**32 bytes.
/// [MySQL] text with up to 2**32 bytes
///
/// [MySQL]: https://dev.mysql.com/doc/refman/9.1/en/blob.html
LongText,
@ -375,85 +316,67 @@ pub enum DataType {
///
/// [1]: https://clickhouse.com/docs/en/sql-reference/data-types/fixedstring
FixedString(u64),
/// Bytea type, see [PostgreSQL].
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype-bit.html
/// Bytea
Bytea,
/// Bit string, see [PostgreSQL], [MySQL], or [MSSQL].
/// Bit string, e.g. [Postgres], [MySQL], or [MSSQL]
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype-bit.html
/// [Postgres]: https://www.postgresql.org/docs/current/datatype-bit.html
/// [MySQL]: https://dev.mysql.com/doc/refman/9.1/en/bit-type.html
/// [MSSQL]: https://learn.microsoft.com/en-us/sql/t-sql/data-types/bit-transact-sql?view=sql-server-ver16
Bit(Option<u64>),
/// `BIT VARYING(n)`: Variable-length bit string, see [PostgreSQL].
/// Variable-length bit string e.g. [Postgres]
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype-bit.html
/// [Postgres]: https://www.postgresql.org/docs/current/datatype-bit.html
BitVarying(Option<u64>),
/// `VARBIT(n)`: Variable-length bit string. [PostgreSQL] alias for `BIT VARYING`.
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/datatype.html
VarBit(Option<u64>),
/// Custom types.
/// Custom type such as enums
Custom(ObjectName, Vec<String>),
/// Arrays.
/// Arrays
Array(ArrayElemTypeDef),
/// Map, see [ClickHouse].
/// Map
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/map
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/map
Map(Box<DataType>, Box<DataType>),
/// Tuple, see [ClickHouse].
/// Tuple
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/tuple
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/tuple
Tuple(Vec<StructField>),
/// Nested type, see [ClickHouse].
/// Nested
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/nested-data-structures/nested
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/nested-data-structures/nested
Nested(Vec<ColumnDef>),
/// Enum type.
/// Enums
Enum(Vec<EnumMember>, Option<u8>),
/// Set type.
/// Set
Set(Vec<String>),
/// Struct type, see [Hive], [BigQuery].
/// Struct
///
/// [Hive]: https://docs.cloudera.com/cdw-runtime/cloud/impala-sql-reference/topics/impala-struct.html
/// [BigQuery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#struct_type
/// [hive]: https://docs.cloudera.com/cdw-runtime/cloud/impala-sql-reference/topics/impala-struct.html
/// [bigquery]: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#struct_type
Struct(Vec<StructField>, StructBracketKind),
/// Union type, see [DuckDB].
/// Union
///
/// [DuckDB]: https://duckdb.org/docs/sql/data_types/union.html
/// [duckdb]: https://duckdb.org/docs/sql/data_types/union.html
Union(Vec<UnionField>),
/// Nullable - special marker NULL represents in ClickHouse as a data type.
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/nullable
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/nullable
Nullable(Box<DataType>),
/// LowCardinality - changes the internal representation of other data types to be dictionary-encoded.
///
/// [ClickHouse]: https://clickhouse.com/docs/en/sql-reference/data-types/lowcardinality
/// [clickhouse]: https://clickhouse.com/docs/en/sql-reference/data-types/lowcardinality
LowCardinality(Box<DataType>),
/// No type specified - only used with
/// [`SQLiteDialect`](crate::dialect::SQLiteDialect), from statements such
/// as `CREATE TABLE t1 (a)`.
Unspecified,
/// Trigger data type, returned by functions associated with triggers, see [PostgreSQL].
/// Trigger data type, returned by functions associated with triggers
///
/// [PostgreSQL]: https://www.postgresql.org/docs/current/plpgsql-trigger.html
/// [postgresql]: https://www.postgresql.org/docs/current/plpgsql-trigger.html
Trigger,
/// Any data type, used in BigQuery UDF definitions for templated parameters, see [BigQuery].
/// Any data type, used in BigQuery UDF definitions for templated parameters
///
/// [BigQuery]: https://cloud.google.com/bigquery/docs/user-defined-functions#templated-sql-udf-parameters
/// [bigquery]: https://cloud.google.com/bigquery/docs/user-defined-functions#templated-sql-udf-parameters
AnyType,
/// Geometric type, see [PostgreSQL].
///
/// [PostgreSQL]: https://www.postgresql.org/docs/9.5/functions-geometry.html
GeometricType(GeometricTypeKind),
/// PostgreSQL text search vectors, see [PostgreSQL].
///
/// [PostgreSQL]: https://www.postgresql.org/docs/17/datatype-textsearch.html
TsVector,
/// PostgreSQL text search query, see [PostgreSQL].
///
/// [PostgreSQL]: https://www.postgresql.org/docs/17/datatype-textsearch.html
TsQuery,
}
impl fmt::Display for DataType {
@ -476,7 +399,9 @@ impl fmt::Display for DataType {
}
DataType::Clob(size) => format_type_with_optional_length(f, "CLOB", size, false),
DataType::Binary(size) => format_type_with_optional_length(f, "BINARY", size, false),
DataType::Varbinary(size) => format_varbinary_type(f, "VARBINARY", size),
DataType::Varbinary(size) => {
format_type_with_optional_length(f, "VARBINARY", size, false)
}
DataType::Blob(size) => format_type_with_optional_length(f, "BLOB", size, false),
DataType::TinyBlob => write!(f, "TINYBLOB"),
DataType::MediumBlob => write!(f, "MEDIUMBLOB"),
@ -497,29 +422,29 @@ impl fmt::Display for DataType {
DataType::TinyInt(zerofill) => {
format_type_with_optional_length(f, "TINYINT", zerofill, false)
}
DataType::TinyIntUnsigned(zerofill) => {
DataType::UnsignedTinyInt(zerofill) => {
format_type_with_optional_length(f, "TINYINT", zerofill, true)
}
DataType::Int2(zerofill) => {
format_type_with_optional_length(f, "INT2", zerofill, false)
}
DataType::Int2Unsigned(zerofill) => {
DataType::UnsignedInt2(zerofill) => {
format_type_with_optional_length(f, "INT2", zerofill, true)
}
DataType::SmallInt(zerofill) => {
format_type_with_optional_length(f, "SMALLINT", zerofill, false)
}
DataType::SmallIntUnsigned(zerofill) => {
DataType::UnsignedSmallInt(zerofill) => {
format_type_with_optional_length(f, "SMALLINT", zerofill, true)
}
DataType::MediumInt(zerofill) => {
format_type_with_optional_length(f, "MEDIUMINT", zerofill, false)
}
DataType::MediumIntUnsigned(zerofill) => {
DataType::UnsignedMediumInt(zerofill) => {
format_type_with_optional_length(f, "MEDIUMINT", zerofill, true)
}
DataType::Int(zerofill) => format_type_with_optional_length(f, "INT", zerofill, false),
DataType::IntUnsigned(zerofill) => {
DataType::UnsignedInt(zerofill) => {
format_type_with_optional_length(f, "INT", zerofill, true)
}
DataType::Int4(zerofill) => {
@ -543,39 +468,24 @@ impl fmt::Display for DataType {
DataType::Int256 => {
write!(f, "Int256")
}
DataType::HugeInt => {
write!(f, "HUGEINT")
}
DataType::Int4Unsigned(zerofill) => {
DataType::UnsignedInt4(zerofill) => {
format_type_with_optional_length(f, "INT4", zerofill, true)
}
DataType::Integer(zerofill) => {
format_type_with_optional_length(f, "INTEGER", zerofill, false)
}
DataType::IntegerUnsigned(zerofill) => {
DataType::UnsignedInteger(zerofill) => {
format_type_with_optional_length(f, "INTEGER", zerofill, true)
}
DataType::BigInt(zerofill) => {
format_type_with_optional_length(f, "BIGINT", zerofill, false)
}
DataType::BigIntUnsigned(zerofill) => {
DataType::UnsignedBigInt(zerofill) => {
format_type_with_optional_length(f, "BIGINT", zerofill, true)
}
DataType::Int8Unsigned(zerofill) => {
DataType::UnsignedInt8(zerofill) => {
format_type_with_optional_length(f, "INT8", zerofill, true)
}
DataType::UTinyInt => {
write!(f, "UTINYINT")
}
DataType::USmallInt => {
write!(f, "USMALLINT")
}
DataType::UBigInt => {
write!(f, "UBIGINT")
}
DataType::UHugeInt => {
write!(f, "UHUGEINT")
}
DataType::UInt8 => {
write!(f, "UInt8")
}
@ -594,18 +504,6 @@ impl fmt::Display for DataType {
DataType::UInt256 => {
write!(f, "UInt256")
}
DataType::Signed => {
write!(f, "SIGNED")
}
DataType::SignedInteger => {
write!(f, "SIGNED INTEGER")
}
DataType::Unsigned => {
write!(f, "UNSIGNED")
}
DataType::UnsignedInteger => {
write!(f, "UNSIGNED INTEGER")
}
DataType::Real => write!(f, "REAL"),
DataType::Float4 => write!(f, "FLOAT4"),
DataType::Float32 => write!(f, "Float32"),
@ -626,7 +524,6 @@ impl fmt::Display for DataType {
DataType::Timestamp(precision, timezone_info) => {
format_datetime_precision_and_tz(f, "TIMESTAMP", precision, timezone_info)
}
DataType::TimestampNtz => write!(f, "TIMESTAMP_NTZ"),
DataType::Datetime64(precision, timezone) => {
format_clickhouse_datetime_precision_and_timezone(
f,
@ -649,7 +546,6 @@ impl fmt::Display for DataType {
DataType::BitVarying(size) => {
format_type_with_optional_length(f, "BIT VARYING", size, false)
}
DataType::VarBit(size) => format_type_with_optional_length(f, "VARBIT", size, false),
DataType::Array(ty) => match ty {
ArrayElemTypeDef::None => write!(f, "ARRAY"),
ArrayElemTypeDef::SquareBracket(t, None) => write!(f, "{t}[]"),
@ -666,7 +562,7 @@ impl fmt::Display for DataType {
}
DataType::Enum(vals, bits) => {
match bits {
Some(bits) => write!(f, "ENUM{bits}"),
Some(bits) => write!(f, "ENUM{}", bits),
None => write!(f, "ENUM"),
}?;
write!(f, "(")?;
@ -714,16 +610,16 @@ impl fmt::Display for DataType {
}
// ClickHouse
DataType::Nullable(data_type) => {
write!(f, "Nullable({data_type})")
write!(f, "Nullable({})", data_type)
}
DataType::FixedString(character_length) => {
write!(f, "FixedString({character_length})")
write!(f, "FixedString({})", character_length)
}
DataType::LowCardinality(data_type) => {
write!(f, "LowCardinality({data_type})")
write!(f, "LowCardinality({})", data_type)
}
DataType::Map(key_data_type, value_data_type) => {
write!(f, "Map({key_data_type}, {value_data_type})")
write!(f, "Map({}, {})", key_data_type, value_data_type)
}
DataType::Tuple(fields) => {
write!(f, "Tuple({})", display_comma_separated(fields))
@ -734,20 +630,6 @@ impl fmt::Display for DataType {
DataType::Unspecified => Ok(()),
DataType::Trigger => write!(f, "TRIGGER"),
DataType::AnyType => write!(f, "ANY TYPE"),
DataType::Table(fields) => match fields {
Some(fields) => {
write!(f, "TABLE({})", display_comma_separated(fields))
}
None => {
write!(f, "TABLE")
}
},
DataType::NamedTable { name, columns } => {
write!(f, "{} TABLE ({})", name, display_comma_separated(columns))
}
DataType::GeometricType(kind) => write!(f, "{kind}"),
DataType::TsVector => write!(f, "TSVECTOR"),
DataType::TsQuery => write!(f, "TSQUERY"),
}
}
}
@ -780,18 +662,6 @@ fn format_character_string_type(
Ok(())
}
fn format_varbinary_type(
f: &mut fmt::Formatter,
sql_type: &str,
size: &Option<BinaryLength>,
) -> fmt::Result {
write!(f, "{sql_type}")?;
if let Some(size) = size {
write!(f, "({size})")?;
}
Ok(())
}
fn format_datetime_precision_and_tz(
f: &mut fmt::Formatter,
sql_type: &'static str,
@ -849,19 +719,19 @@ pub enum StructBracketKind {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum TimezoneInfo {
/// No information about time zone, e.g. TIMESTAMP
/// No information about time zone. E.g., TIMESTAMP
None,
/// Temporal type 'WITH TIME ZONE', e.g. TIMESTAMP WITH TIME ZONE, [SQL Standard], [Oracle]
/// Temporal type 'WITH TIME ZONE'. E.g., TIMESTAMP WITH TIME ZONE, [standard], [Oracle]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#datetime-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#datetime-type
/// [Oracle]: https://docs.oracle.com/en/database/oracle/oracle-database/12.2/nlspg/datetime-data-types-and-time-zone-support.html#GUID-3F1C388E-C651-43D5-ADBC-1A49E5C2CA05
WithTimeZone,
/// Temporal type 'WITHOUT TIME ZONE', e.g. TIME WITHOUT TIME ZONE, [SQL Standard], [Postgresql]
/// Temporal type 'WITHOUT TIME ZONE'. E.g., TIME WITHOUT TIME ZONE, [standard], [Postgresql]
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#datetime-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#datetime-type
/// [Postgresql]: https://www.postgresql.org/docs/current/datatype-datetime.html
WithoutTimeZone,
/// Postgresql specific `WITH TIME ZONE` formatting, for both TIME and TIMESTAMP, e.g. TIMETZ, [Postgresql]
/// Postgresql specific `WITH TIME ZONE` formatting, for both TIME and TIMESTAMP. E.g., TIMETZ, [Postgresql]
///
/// [Postgresql]: https://www.postgresql.org/docs/current/datatype-datetime.html
Tz,
@ -890,18 +760,18 @@ impl fmt::Display for TimezoneInfo {
}
/// Additional information for `NUMERIC`, `DECIMAL`, and `DEC` data types
/// following the 2016 [SQL Standard].
/// following the 2016 [standard].
///
/// [SQL Standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#exact-numeric-type
/// [standard]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#exact-numeric-type
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum ExactNumberInfo {
/// No additional information, e.g. `DECIMAL`
/// No additional information e.g. `DECIMAL`
None,
/// Only precision information, e.g. `DECIMAL(10)`
/// Only precision information e.g. `DECIMAL(10)`
Precision(u64),
/// Precision and scale information, e.g. `DECIMAL(10,2)`
/// Precision and scale information e.g. `DECIMAL(10,2)`
PrecisionAndScale(u64, u64),
}
@ -942,7 +812,7 @@ impl fmt::Display for CharacterLength {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
CharacterLength::IntegerLength { length, unit } => {
write!(f, "{length}")?;
write!(f, "{}", length)?;
if let Some(unit) = unit {
write!(f, " {unit}")?;
}
@ -955,7 +825,7 @@ impl fmt::Display for CharacterLength {
}
}
/// Possible units for characters, initially based on 2016 ANSI [SQL Standard][1].
/// Possible units for characters, initially based on 2016 ANSI [standard][1].
///
/// [1]: https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#char-length-units
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
@ -981,32 +851,6 @@ impl fmt::Display for CharLengthUnits {
}
}
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum BinaryLength {
IntegerLength {
/// Default (if VARYING)
length: u64,
},
/// VARBINARY(MAX) used in T-SQL (Microsoft SQL Server)
Max,
}
impl fmt::Display for BinaryLength {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BinaryLength::IntegerLength { length } => {
write!(f, "{length}")?;
}
BinaryLength::Max => {
write!(f, "MAX")?;
}
}
Ok(())
}
}
/// Represents the data type of the elements in an array (if any) as well as
/// the syntax used to declare the array.
///
@ -1024,34 +868,3 @@ pub enum ArrayElemTypeDef {
/// `Array(Int64)`
Parenthesis(Box<DataType>),
}
/// Represents different types of geometric shapes which are commonly used in
/// PostgreSQL/Redshift for spatial operations and geometry-related computations.
///
/// [PostgreSQL]: https://www.postgresql.org/docs/9.5/functions-geometry.html
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum GeometricTypeKind {
Point,
Line,
LineSegment,
GeometricBox,
GeometricPath,
Polygon,
Circle,
}
impl fmt::Display for GeometricTypeKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
GeometricTypeKind::Point => write!(f, "point"),
GeometricTypeKind::Line => write!(f, "line"),
GeometricTypeKind::LineSegment => write!(f, "lseg"),
GeometricTypeKind::GeometricBox => write!(f, "box"),
GeometricTypeKind::GeometricPath => write!(f, "path"),
GeometricTypeKind::Polygon => write!(f, "polygon"),
GeometricTypeKind::Circle => write!(f, "circle"),
}
}
}

View file

@ -173,7 +173,7 @@ impl fmt::Display for AlterRoleOperation {
in_database,
} => {
if let Some(database_name) = in_database {
write!(f, "IN DATABASE {database_name} ")?;
write!(f, "IN DATABASE {} ", database_name)?;
}
match config_value {
@ -187,7 +187,7 @@ impl fmt::Display for AlterRoleOperation {
in_database,
} => {
if let Some(database_name) = in_database {
write!(f, "IN DATABASE {database_name} ")?;
write!(f, "IN DATABASE {} ", database_name)?;
}
match config_name {
@ -218,15 +218,15 @@ impl fmt::Display for Use {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("USE ")?;
match self {
Use::Catalog(name) => write!(f, "CATALOG {name}"),
Use::Schema(name) => write!(f, "SCHEMA {name}"),
Use::Database(name) => write!(f, "DATABASE {name}"),
Use::Warehouse(name) => write!(f, "WAREHOUSE {name}"),
Use::Role(name) => write!(f, "ROLE {name}"),
Use::Catalog(name) => write!(f, "CATALOG {}", name),
Use::Schema(name) => write!(f, "SCHEMA {}", name),
Use::Database(name) => write!(f, "DATABASE {}", name),
Use::Warehouse(name) => write!(f, "WAREHOUSE {}", name),
Use::Role(name) => write!(f, "ROLE {}", name),
Use::SecondaryRoles(secondary_roles) => {
write!(f, "SECONDARY ROLES {secondary_roles}")
write!(f, "SECONDARY ROLES {}", secondary_roles)
}
Use::Object(name) => write!(f, "{name}"),
Use::Object(name) => write!(f, "{}", name),
Use::Default => write!(f, "DEFAULT"),
}
}

View file

@ -30,48 +30,21 @@ use sqlparser_derive::{Visit, VisitMut};
use crate::ast::value::escape_single_quote_string;
use crate::ast::{
display_comma_separated, display_separated, ArgMode, CommentDef, CreateFunctionBody,
CreateFunctionUsing, DataType, Expr, FunctionBehavior, FunctionCalledOnNull,
FunctionDeterminismSpecifier, FunctionParallel, Ident, IndexColumn, MySQLColumnPosition,
ObjectName, OperateFunctionArg, OrderByExpr, ProjectionSelect, SequenceOptions, SqlOption, Tag,
Value, ValueWithSpan,
display_comma_separated, display_separated, CreateFunctionBody, CreateFunctionUsing, DataType,
Expr, FunctionBehavior, FunctionCalledOnNull, FunctionDeterminismSpecifier, FunctionParallel,
Ident, MySQLColumnPosition, ObjectName, OperateFunctionArg, OrderByExpr, ProjectionSelect,
SequenceOptions, SqlOption, Tag, Value,
};
use crate::keywords::Keyword;
use crate::tokenizer::Token;
/// ALTER TABLE operation REPLICA IDENTITY values
/// See [Postgres ALTER TABLE docs](https://www.postgresql.org/docs/current/sql-altertable.html)
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum ReplicaIdentity {
None,
Full,
Default,
Index(Ident),
}
impl fmt::Display for ReplicaIdentity {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ReplicaIdentity::None => f.write_str("NONE"),
ReplicaIdentity::Full => f.write_str("FULL"),
ReplicaIdentity::Default => f.write_str("DEFAULT"),
ReplicaIdentity::Index(idx) => write!(f, "USING INDEX {idx}"),
}
}
}
/// An `ALTER TABLE` (`Statement::AlterTable`) operation
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum AlterTableOperation {
/// `ADD <table_constraint> [NOT VALID]`
AddConstraint {
constraint: TableConstraint,
not_valid: bool,
},
/// `ADD <table_constraint>`
AddConstraint(TableConstraint),
/// `ADD [COLUMN] [IF NOT EXISTS] <column_def>`
AddColumn {
/// `[COLUMN]`.
@ -92,6 +65,7 @@ pub enum AlterTableOperation {
name: Ident,
select: ProjectionSelect,
},
/// `DROP PROJECTION [IF EXISTS] name`
///
/// Note: this is a ClickHouse-specific operation.
@ -100,6 +74,7 @@ pub enum AlterTableOperation {
if_exists: bool,
name: Ident,
},
/// `MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]`
///
/// Note: this is a ClickHouse-specific operation.
@ -109,6 +84,7 @@ pub enum AlterTableOperation {
name: Ident,
partition: Option<Ident>,
},
/// `CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]`
///
/// Note: this is a ClickHouse-specific operation.
@ -118,6 +94,7 @@ pub enum AlterTableOperation {
name: Ident,
partition: Option<Ident>,
},
/// `DISABLE ROW LEVEL SECURITY`
///
/// Note: this is a PostgreSQL-specific operation.
@ -140,10 +117,9 @@ pub enum AlterTableOperation {
name: Ident,
drop_behavior: Option<DropBehavior>,
},
/// `DROP [ COLUMN ] [ IF EXISTS ] <column_name> [ , <column_name>, ... ] [ CASCADE ]`
/// `DROP [ COLUMN ] [ IF EXISTS ] <column_name> [ CASCADE ]`
DropColumn {
has_column_keyword: bool,
column_names: Vec<Ident>,
column_name: Ident,
if_exists: bool,
drop_behavior: Option<DropBehavior>,
},
@ -178,24 +154,8 @@ pub enum AlterTableOperation {
},
/// `DROP PRIMARY KEY`
///
/// Note: this is a [MySQL]-specific operation.
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
/// Note: this is a MySQL-specific operation.
DropPrimaryKey,
/// `DROP FOREIGN KEY <fk_symbol>`
///
/// Note: this is a [MySQL]-specific operation.
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
DropForeignKey {
name: Ident,
},
/// `DROP INDEX <index_name>`
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
DropIndex {
name: Ident,
},
/// `ENABLE ALWAYS RULE rewrite_rule_name`
///
/// Note: this is a PostgreSQL-specific operation.
@ -241,13 +201,6 @@ pub enum AlterTableOperation {
old_partitions: Vec<Expr>,
new_partitions: Vec<Expr>,
},
/// REPLICA IDENTITY { DEFAULT | USING INDEX index_name | FULL | NOTHING }
///
/// Note: this is a PostgreSQL-specific operation.
/// Please refer to [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-altertable.html)
ReplicaIdentity {
identity: ReplicaIdentity,
},
/// Add Partitions
AddPartitions {
if_not_exists: bool,
@ -319,38 +272,6 @@ pub enum AlterTableOperation {
DropClusteringKey,
SuspendRecluster,
ResumeRecluster,
/// `ALGORITHM [=] { DEFAULT | INSTANT | INPLACE | COPY }`
///
/// [MySQL]-specific table alter algorithm.
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
Algorithm {
equals: bool,
algorithm: AlterTableAlgorithm,
},
/// `LOCK [=] { DEFAULT | NONE | SHARED | EXCLUSIVE }`
///
/// [MySQL]-specific table alter lock.
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
Lock {
equals: bool,
lock: AlterTableLock,
},
/// `AUTO_INCREMENT [=] <value>`
///
/// [MySQL]-specific table option for raising current auto increment value.
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
AutoIncrement {
equals: bool,
value: ValueWithSpan,
},
/// `VALIDATE CONSTRAINT <name>`
ValidateConstraint {
name: Ident,
},
}
/// An `ALTER Policy` (`Statement::AlterPolicy`) operation
@ -396,54 +317,6 @@ impl fmt::Display for AlterPolicyOperation {
}
}
/// [MySQL] `ALTER TABLE` algorithm.
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum AlterTableAlgorithm {
Default,
Instant,
Inplace,
Copy,
}
impl fmt::Display for AlterTableAlgorithm {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
Self::Default => "DEFAULT",
Self::Instant => "INSTANT",
Self::Inplace => "INPLACE",
Self::Copy => "COPY",
})
}
}
/// [MySQL] `ALTER TABLE` lock.
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/alter-table.html
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum AlterTableLock {
Default,
None,
Shared,
Exclusive,
}
impl fmt::Display for AlterTableLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
Self::Default => "DEFAULT",
Self::None => "NONE",
Self::Shared => "SHARED",
Self::Exclusive => "EXCLUSIVE",
})
}
}
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
@ -457,7 +330,7 @@ pub enum Owner {
impl fmt::Display for Owner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Owner::Ident(ident) => write!(f, "{ident}"),
Owner::Ident(ident) => write!(f, "{}", ident),
Owner::CurrentRole => write!(f, "CURRENT_ROLE"),
Owner::CurrentUser => write!(f, "CURRENT_USER"),
Owner::SessionUser => write!(f, "SESSION_USER"),
@ -465,23 +338,6 @@ impl fmt::Display for Owner {
}
}
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum AlterConnectorOwner {
User(Ident),
Role(Ident),
}
impl fmt::Display for AlterConnectorOwner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
AlterConnectorOwner::User(ident) => write!(f, "USER {ident}"),
AlterConnectorOwner::Role(ident) => write!(f, "ROLE {ident}"),
}
}
}
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
@ -501,16 +357,7 @@ impl fmt::Display for AlterTableOperation {
display_separated(new_partitions, " "),
ine = if *if_not_exists { " IF NOT EXISTS" } else { "" }
),
AlterTableOperation::AddConstraint {
not_valid,
constraint,
} => {
write!(f, "ADD {constraint}")?;
if *not_valid {
write!(f, " NOT VALID")?;
}
Ok(())
}
AlterTableOperation::AddConstraint(c) => write!(f, "ADD {c}"),
AlterTableOperation::AddColumn {
column_keyword,
if_not_exists,
@ -541,22 +388,14 @@ impl fmt::Display for AlterTableOperation {
if *if_not_exists {
write!(f, " IF NOT EXISTS")?;
}
write!(f, " {name} ({query})")
}
AlterTableOperation::Algorithm { equals, algorithm } => {
write!(
f,
"ALGORITHM {}{}",
if *equals { "= " } else { "" },
algorithm
)
write!(f, " {} ({})", name, query)
}
AlterTableOperation::DropProjection { if_exists, name } => {
write!(f, "DROP PROJECTION")?;
if *if_exists {
write!(f, " IF EXISTS")?;
}
write!(f, " {name}")
write!(f, " {}", name)
}
AlterTableOperation::MaterializeProjection {
if_exists,
@ -567,9 +406,9 @@ impl fmt::Display for AlterTableOperation {
if *if_exists {
write!(f, " IF EXISTS")?;
}
write!(f, " {name}")?;
write!(f, " {}", name)?;
if let Some(partition) = partition {
write!(f, " IN PARTITION {partition}")?;
write!(f, " IN PARTITION {}", partition)?;
}
Ok(())
}
@ -582,9 +421,9 @@ impl fmt::Display for AlterTableOperation {
if *if_exists {
write!(f, " IF EXISTS")?;
}
write!(f, " {name}")?;
write!(f, " {}", name)?;
if let Some(partition) = partition {
write!(f, " IN PARTITION {partition}")?;
write!(f, " IN PARTITION {}", partition)?;
}
Ok(())
}
@ -627,19 +466,15 @@ impl fmt::Display for AlterTableOperation {
)
}
AlterTableOperation::DropPrimaryKey => write!(f, "DROP PRIMARY KEY"),
AlterTableOperation::DropForeignKey { name } => write!(f, "DROP FOREIGN KEY {name}"),
AlterTableOperation::DropIndex { name } => write!(f, "DROP INDEX {name}"),
AlterTableOperation::DropColumn {
has_column_keyword,
column_names: column_name,
column_name,
if_exists,
drop_behavior,
} => write!(
f,
"DROP {}{}{}{}",
if *has_column_keyword { "COLUMN " } else { "" },
"DROP COLUMN {}{}{}",
if *if_exists { "IF EXISTS " } else { "" },
display_comma_separated(column_name),
column_name,
match drop_behavior {
None => "",
Some(DropBehavior::Restrict) => " RESTRICT",
@ -774,23 +609,6 @@ impl fmt::Display for AlterTableOperation {
write!(f, "RESUME RECLUSTER")?;
Ok(())
}
AlterTableOperation::AutoIncrement { equals, value } => {
write!(
f,
"AUTO_INCREMENT {}{}",
if *equals { "= " } else { "" },
value
)
}
AlterTableOperation::Lock { equals, lock } => {
write!(f, "LOCK {}{}", if *equals { "= " } else { "" }, lock)
}
AlterTableOperation::ReplicaIdentity { identity } => {
write!(f, "REPLICA IDENTITY {identity}")
}
AlterTableOperation::ValidateConstraint { name } => {
write!(f, "VALIDATE CONSTRAINT {name}")
}
}
}
}
@ -805,95 +623,6 @@ impl fmt::Display for AlterIndexOperation {
}
}
/// An `ALTER TYPE` statement (`Statement::AlterType`)
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct AlterType {
pub name: ObjectName,
pub operation: AlterTypeOperation,
}
/// An [AlterType] operation
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum AlterTypeOperation {
Rename(AlterTypeRename),
AddValue(AlterTypeAddValue),
RenameValue(AlterTypeRenameValue),
}
/// See [AlterTypeOperation::Rename]
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct AlterTypeRename {
pub new_name: Ident,
}
/// See [AlterTypeOperation::AddValue]
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct AlterTypeAddValue {
pub if_not_exists: bool,
pub value: Ident,
pub position: Option<AlterTypeAddValuePosition>,
}
/// See [AlterTypeAddValue]
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum AlterTypeAddValuePosition {
Before(Ident),
After(Ident),
}
/// See [AlterTypeOperation::RenameValue]
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct AlterTypeRenameValue {
pub from: Ident,
pub to: Ident,
}
impl fmt::Display for AlterTypeOperation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::Rename(AlterTypeRename { new_name }) => {
write!(f, "RENAME TO {new_name}")
}
Self::AddValue(AlterTypeAddValue {
if_not_exists,
value,
position,
}) => {
write!(f, "ADD VALUE")?;
if *if_not_exists {
write!(f, " IF NOT EXISTS")?;
}
write!(f, " {value}")?;
match position {
Some(AlterTypeAddValuePosition::Before(neighbor_value)) => {
write!(f, " BEFORE {neighbor_value}")?;
}
Some(AlterTypeAddValuePosition::After(neighbor_value)) => {
write!(f, " AFTER {neighbor_value}")?;
}
None => {}
};
Ok(())
}
Self::RenameValue(AlterTypeRenameValue { from, to }) => {
write!(f, "RENAME VALUE {from} TO {to}")
}
}
}
}
/// An `ALTER COLUMN` (`Statement::AlterTable`) operation
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
@ -912,10 +641,7 @@ pub enum AlterColumnOperation {
data_type: DataType,
/// PostgreSQL specific
using: Option<Expr>,
/// Set to true if the statement includes the `SET DATA TYPE` keywords
had_set: bool,
},
/// `ADD GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY [ ( sequence_options ) ]`
///
/// Note: this is a PostgreSQL-specific operation.
@ -933,22 +659,15 @@ impl fmt::Display for AlterColumnOperation {
AlterColumnOperation::SetDefault { value } => {
write!(f, "SET DEFAULT {value}")
}
AlterColumnOperation::DropDefault => {
AlterColumnOperation::DropDefault {} => {
write!(f, "DROP DEFAULT")
}
AlterColumnOperation::SetDataType {
data_type,
using,
had_set,
} => {
if *had_set {
write!(f, "SET DATA ")?;
}
write!(f, "TYPE {data_type}")?;
AlterColumnOperation::SetDataType { data_type, using } => {
if let Some(expr) = using {
write!(f, " USING {expr}")?;
write!(f, "SET DATA TYPE {data_type} USING {expr}")
} else {
write!(f, "SET DATA TYPE {data_type}")
}
Ok(())
}
AlterColumnOperation::AddGenerated {
generated_as,
@ -1008,7 +727,7 @@ pub enum TableConstraint {
/// [1]: IndexType
index_type: Option<IndexType>,
/// Identifiers of the columns that are unique.
columns: Vec<IndexColumn>,
columns: Vec<Ident>,
index_options: Vec<IndexOption>,
characteristics: Option<ConstraintCharacteristics>,
/// Optional Postgres nulls handling: `[ NULLS [ NOT ] DISTINCT ]`
@ -1044,7 +763,7 @@ pub enum TableConstraint {
/// [1]: IndexType
index_type: Option<IndexType>,
/// Identifiers of the columns that form the primary key.
columns: Vec<IndexColumn>,
columns: Vec<Ident>,
index_options: Vec<IndexOption>,
characteristics: Option<ConstraintCharacteristics>,
},
@ -1055,9 +774,6 @@ pub enum TableConstraint {
/// }`).
ForeignKey {
name: Option<Ident>,
/// MySQL-specific field
/// <https://dev.mysql.com/doc/refman/8.4/en/create-table-foreign-keys.html>
index_name: Option<Ident>,
columns: Vec<Ident>,
foreign_table: ObjectName,
referred_columns: Vec<Ident>,
@ -1065,13 +781,10 @@ pub enum TableConstraint {
on_update: Option<ReferentialAction>,
characteristics: Option<ConstraintCharacteristics>,
},
/// `[ CONSTRAINT <name> ] CHECK (<expr>) [[NOT] ENFORCED]`
/// `[ CONSTRAINT <name> ] CHECK (<expr>)`
Check {
name: Option<Ident>,
expr: Box<Expr>,
/// MySQL-specific syntax
/// <https://dev.mysql.com/doc/refman/8.4/en/create-table.html>
enforced: Option<bool>,
},
/// MySQLs [index definition][1] for index creation. Not present on ANSI so, for now, the usage
/// is restricted to MySQL, as no other dialects that support this syntax were found.
@ -1089,7 +802,7 @@ pub enum TableConstraint {
/// [1]: IndexType
index_type: Option<IndexType>,
/// Referred column identifier list.
columns: Vec<IndexColumn>,
columns: Vec<Ident>,
},
/// MySQLs [fulltext][1] definition. Since the [`SPATIAL`][2] definition is exactly the same,
/// and MySQL displays both the same way, it is part of this definition as well.
@ -1112,7 +825,7 @@ pub enum TableConstraint {
/// Optional index name.
opt_index_name: Option<Ident>,
/// Referred column identifier list.
columns: Vec<IndexColumn>,
columns: Vec<Ident>,
},
}
@ -1171,7 +884,6 @@ impl fmt::Display for TableConstraint {
}
TableConstraint::ForeignKey {
name,
index_name,
columns,
foreign_table,
referred_columns,
@ -1181,9 +893,8 @@ impl fmt::Display for TableConstraint {
} => {
write!(
f,
"{}FOREIGN KEY{} ({}) REFERENCES {}",
"{}FOREIGN KEY ({}) REFERENCES {}",
display_constraint_name(name),
display_option_spaced(index_name),
display_comma_separated(columns),
foreign_table,
)?;
@ -1197,21 +908,12 @@ impl fmt::Display for TableConstraint {
write!(f, " ON UPDATE {action}")?;
}
if let Some(characteristics) = characteristics {
write!(f, " {characteristics}")?;
write!(f, " {}", characteristics)?;
}
Ok(())
}
TableConstraint::Check {
name,
expr,
enforced,
} => {
write!(f, "{}CHECK ({})", display_constraint_name(name), expr)?;
if let Some(b) = enforced {
write!(f, " {}", if *b { "ENFORCED" } else { "NOT ENFORCED" })
} else {
Ok(())
}
TableConstraint::Check { name, expr } => {
write!(f, "{}CHECK ({})", display_constraint_name(name), expr)
}
TableConstraint::Index {
display_as_key,
@ -1311,20 +1013,13 @@ impl fmt::Display for KeyOrIndexDisplay {
/// [1]: https://dev.mysql.com/doc/refman/8.0/en/create-table.html
/// [2]: https://dev.mysql.com/doc/refman/8.0/en/create-index.html
/// [3]: https://www.postgresql.org/docs/14/sql-createindex.html
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum IndexType {
BTree,
Hash,
GIN,
GiST,
SPGiST,
BRIN,
Bloom,
/// Users may define their own index types, which would
/// not be covered by the above variants.
Custom(Ident),
// TODO add Postgresql's possible indexes
}
impl fmt::Display for IndexType {
@ -1332,12 +1027,6 @@ impl fmt::Display for IndexType {
match self {
Self::BTree => write!(f, "BTREE"),
Self::Hash => write!(f, "HASH"),
Self::GIN => write!(f, "GIN"),
Self::GiST => write!(f, "GIST"),
Self::SPGiST => write!(f, "SPGIST"),
Self::BRIN => write!(f, "BRIN"),
Self::Bloom => write!(f, "BLOOM"),
Self::Custom(name) => write!(f, "{name}"),
}
}
}
@ -1365,9 +1054,9 @@ impl fmt::Display for IndexOption {
}
}
/// [PostgreSQL] unique index nulls handling option: `[ NULLS [ NOT ] DISTINCT ]`
/// [Postgres] unique index nulls handling option: `[ NULLS [ NOT ] DISTINCT ]`
///
/// [PostgreSQL]: https://www.postgresql.org/docs/17/sql-altertable.html
/// [Postgres]: https://www.postgresql.org/docs/17/sql-altertable.html
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
@ -1396,16 +1085,11 @@ impl fmt::Display for NullsDistinctOption {
pub struct ProcedureParam {
pub name: Ident,
pub data_type: DataType,
pub mode: Option<ArgMode>,
}
impl fmt::Display for ProcedureParam {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(mode) = &self.mode {
write!(f, "{mode} {} {}", self.name, self.data_type)
} else {
write!(f, "{} {}", self.name, self.data_type)
}
write!(f, "{} {}", self.name, self.data_type)
}
}
@ -1416,6 +1100,7 @@ impl fmt::Display for ProcedureParam {
pub struct ColumnDef {
pub name: Ident,
pub data_type: DataType,
pub collation: Option<ObjectName>,
pub options: Vec<ColumnOptionDef>,
}
@ -1426,6 +1111,9 @@ impl fmt::Display for ColumnDef {
} else {
write!(f, "{} {}", self.name, self.data_type)?;
}
if let Some(collation) = &self.collation {
write!(f, " COLLATE {collation}")?;
}
for option in &self.options {
write!(f, " {option}")?;
}
@ -1455,41 +1143,17 @@ impl fmt::Display for ColumnDef {
pub struct ViewColumnDef {
pub name: Ident,
pub data_type: Option<DataType>,
pub options: Option<ColumnOptions>,
}
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum ColumnOptions {
CommaSeparated(Vec<ColumnOption>),
SpaceSeparated(Vec<ColumnOption>),
}
impl ColumnOptions {
pub fn as_slice(&self) -> &[ColumnOption] {
match self {
ColumnOptions::CommaSeparated(options) => options.as_slice(),
ColumnOptions::SpaceSeparated(options) => options.as_slice(),
}
}
pub options: Option<Vec<ColumnOption>>,
}
impl fmt::Display for ViewColumnDef {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.name)?;
if let Some(data_type) = self.data_type.as_ref() {
write!(f, " {data_type}")?;
write!(f, " {}", data_type)?;
}
if let Some(options) = self.options.as_ref() {
match options {
ColumnOptions::CommaSeparated(column_options) => {
write!(f, " {}", display_comma_separated(column_options.as_slice()))?;
}
ColumnOptions::SpaceSeparated(column_options) => {
write!(f, " {}", display_separated(column_options.as_slice(), " "))?
}
}
write!(f, " {}", display_comma_separated(options.as_slice()))?;
}
Ok(())
}
@ -1709,7 +1373,7 @@ pub struct ColumnPolicyProperty {
/// ```
/// [Snowflake]: https://docs.snowflake.com/en/sql-reference/sql/create-table
pub with: bool,
pub policy_name: ObjectName,
pub policy_name: Ident,
pub using_columns: Option<Vec<Ident>>,
}
@ -1796,7 +1460,6 @@ pub enum ColumnOption {
/// - ...
DialectSpecific(Vec<Token>),
CharacterSet(ObjectName),
Collation(ObjectName),
Comment(String),
OnUpdate(Expr),
/// `Generated`s are modifiers that follow a column definition in a `CREATE
@ -1843,13 +1506,6 @@ pub enum ColumnOption {
/// ```
/// [Snowflake]: https://docs.snowflake.com/en/sql-reference/sql/create-table
Tags(TagsColumnOption),
/// MySQL specific: Spatial reference identifier
/// Syntax:
/// ```sql
/// CREATE TABLE geom (g GEOMETRY NOT NULL SRID 4326);
/// ```
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/creating-spatial-indexes.html
Srid(Box<Expr>),
}
impl fmt::Display for ColumnOption {
@ -1874,7 +1530,7 @@ impl fmt::Display for ColumnOption {
} => {
write!(f, "{}", if *is_primary { "PRIMARY KEY" } else { "UNIQUE" })?;
if let Some(characteristics) = characteristics {
write!(f, " {characteristics}")?;
write!(f, " {}", characteristics)?;
}
Ok(())
}
@ -1896,14 +1552,13 @@ impl fmt::Display for ColumnOption {
write!(f, " ON UPDATE {action}")?;
}
if let Some(characteristics) = characteristics {
write!(f, " {characteristics}")?;
write!(f, " {}", characteristics)?;
}
Ok(())
}
Check(expr) => write!(f, "CHECK ({expr})"),
DialectSpecific(val) => write!(f, "{}", display_separated(val, " ")),
CharacterSet(n) => write!(f, "CHARACTER SET {n}"),
Collation(n) => write!(f, "COLLATE {n}"),
Comment(v) => write!(f, "COMMENT '{}'", escape_single_quote_string(v)),
OnUpdate(expr) => write!(f, "ON UPDATE {expr}"),
Generated {
@ -1956,7 +1611,7 @@ impl fmt::Display for ColumnOption {
write!(f, "{parameters}")
}
OnConflict(keyword) => {
write!(f, "ON CONFLICT {keyword:?}")?;
write!(f, "ON CONFLICT {:?}", keyword)?;
Ok(())
}
Policy(parameters) => {
@ -1965,9 +1620,6 @@ impl fmt::Display for ColumnOption {
Tags(tags) => {
write!(f, "{tags}")
}
Srid(srid) => {
write!(f, "SRID {srid}")
}
}
}
}
@ -2281,63 +1933,10 @@ impl fmt::Display for ClusteredBy {
}
}
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
/// ```sql
/// CREATE DOMAIN name [ AS ] data_type
/// [ COLLATE collation ]
/// [ DEFAULT expression ]
/// [ domain_constraint [ ... ] ]
///
/// where domain_constraint is:
///
/// [ CONSTRAINT constraint_name ]
/// { NOT NULL | NULL | CHECK (expression) }
/// ```
/// See [PostgreSQL](https://www.postgresql.org/docs/current/sql-createdomain.html)
pub struct CreateDomain {
/// The name of the domain to be created.
pub name: ObjectName,
/// The data type of the domain.
pub data_type: DataType,
/// The collation of the domain.
pub collation: Option<Ident>,
/// The default value of the domain.
pub default: Option<Expr>,
/// The constraints of the domain.
pub constraints: Vec<TableConstraint>,
}
impl fmt::Display for CreateDomain {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"CREATE DOMAIN {name} AS {data_type}",
name = self.name,
data_type = self.data_type
)?;
if let Some(collation) = &self.collation {
write!(f, " COLLATE {collation}")?;
}
if let Some(default) = &self.default {
write!(f, " DEFAULT {default}")?;
}
if !self.constraints.is_empty() {
write!(f, " {}", display_separated(&self.constraints, " "))?;
}
Ok(())
}
}
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct CreateFunction {
/// True if this is a `CREATE OR ALTER FUNCTION` statement
///
/// [MsSql](https://learn.microsoft.com/en-us/sql/t-sql/statements/create-function-transact-sql?view=sql-server-ver16#or-alter)
pub or_alter: bool,
pub or_replace: bool,
pub temporary: bool,
pub if_not_exists: bool,
@ -2356,15 +1955,15 @@ pub struct CreateFunction {
///
/// IMMUTABLE | STABLE | VOLATILE
///
/// [PostgreSQL](https://www.postgresql.org/docs/current/sql-createfunction.html)
/// [Postgres](https://www.postgresql.org/docs/current/sql-createfunction.html)
pub behavior: Option<FunctionBehavior>,
/// CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT
///
/// [PostgreSQL](https://www.postgresql.org/docs/current/sql-createfunction.html)
/// [Postgres](https://www.postgresql.org/docs/current/sql-createfunction.html)
pub called_on_null: Option<FunctionCalledOnNull>,
/// PARALLEL { UNSAFE | RESTRICTED | SAFE }
///
/// [PostgreSQL](https://www.postgresql.org/docs/current/sql-createfunction.html)
/// [Postgres](https://www.postgresql.org/docs/current/sql-createfunction.html)
pub parallel: Option<FunctionParallel>,
/// USING ... (Hive only)
pub using: Option<CreateFunctionUsing>,
@ -2400,10 +1999,9 @@ impl fmt::Display for CreateFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"CREATE {or_alter}{or_replace}{temp}FUNCTION {if_not_exists}{name}",
"CREATE {or_replace}{temp}FUNCTION {if_not_exists}{name}",
name = self.name,
temp = if self.temporary { "TEMPORARY " } else { "" },
or_alter = if self.or_alter { "OR ALTER " } else { "" },
or_replace = if self.or_replace { "OR REPLACE " } else { "" },
if_not_exists = if self.if_not_exists {
"IF NOT EXISTS "
@ -2441,12 +2039,6 @@ impl fmt::Display for CreateFunction {
if let Some(CreateFunctionBody::Return(function_body)) = &self.function_body {
write!(f, " RETURN {function_body}")?;
}
if let Some(CreateFunctionBody::AsReturnExpr(function_body)) = &self.function_body {
write!(f, " AS RETURN {function_body}")?;
}
if let Some(CreateFunctionBody::AsReturnSelect(function_body)) = &self.function_body {
write!(f, " AS RETURN {function_body}")?;
}
if let Some(using) = &self.using {
write!(f, " {using}")?;
}
@ -2460,67 +2052,6 @@ impl fmt::Display for CreateFunction {
if let Some(CreateFunctionBody::AsAfterOptions(function_body)) = &self.function_body {
write!(f, " AS {function_body}")?;
}
if let Some(CreateFunctionBody::AsBeginEnd(bes)) = &self.function_body {
write!(f, " AS {bes}")?;
}
Ok(())
}
}
/// ```sql
/// CREATE CONNECTOR [IF NOT EXISTS] connector_name
/// [TYPE datasource_type]
/// [URL datasource_url]
/// [COMMENT connector_comment]
/// [WITH DCPROPERTIES(property_name=property_value, ...)]
/// ```
///
/// [Hive](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=27362034#LanguageManualDDL-CreateDataConnectorCreateConnector)
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct CreateConnector {
pub name: Ident,
pub if_not_exists: bool,
pub connector_type: Option<String>,
pub url: Option<String>,
pub comment: Option<CommentDef>,
pub with_dcproperties: Option<Vec<SqlOption>>,
}
impl fmt::Display for CreateConnector {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"CREATE CONNECTOR {if_not_exists}{name}",
if_not_exists = if self.if_not_exists {
"IF NOT EXISTS "
} else {
""
},
name = self.name,
)?;
if let Some(connector_type) = &self.connector_type {
write!(f, " TYPE '{connector_type}'")?;
}
if let Some(url) = &self.url {
write!(f, " URL '{url}'")?;
}
if let Some(comment) = &self.comment {
write!(f, " COMMENT = '{comment}'")?;
}
if let Some(with_dcproperties) = &self.with_dcproperties {
write!(
f,
" WITH DCPROPERTIES({})",
display_comma_separated(with_dcproperties)
)?;
}
Ok(())
}
}

View file

@ -29,38 +29,16 @@ use serde::{Deserialize, Serialize};
#[cfg(feature = "visitor")]
use sqlparser_derive::{Visit, VisitMut};
use crate::display_utils::{indented_list, DisplayCommaSeparated, Indent, NewLine, SpaceOrNewline};
pub use super::ddl::{ColumnDef, TableConstraint};
use super::{
display_comma_separated, display_separated, query::InputFormatClause, Assignment, ClusteredBy,
CommentDef, CreateTableOptions, Expr, FileFormat, FromTable, HiveDistributionStyle, HiveFormat,
HiveIOFormat, HiveRowFormat, Ident, IndexType, InsertAliases, MysqlInsertPriority, ObjectName,
OnCommit, OnInsert, OneOrManyWithParens, OrderByExpr, Query, RowAccessPolicy, SelectItem,
Setting, SqliteOnConflict, StorageSerializationPolicy, TableObject, TableWithJoins, Tag,
WrappedCollection,
CommentDef, Expr, FileFormat, FromTable, HiveDistributionStyle, HiveFormat, HiveIOFormat,
HiveRowFormat, Ident, InsertAliases, MysqlInsertPriority, ObjectName, OnCommit, OnInsert,
OneOrManyWithParens, OrderByExpr, Query, RowAccessPolicy, SelectItem, Setting, SqlOption,
SqliteOnConflict, TableEngine, TableObject, TableWithJoins, Tag, WrappedCollection,
};
/// Index column type.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct IndexColumn {
pub column: OrderByExpr,
pub operator_class: Option<Ident>,
}
impl Display for IndexColumn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.column)?;
if let Some(operator_class) = &self.operator_class {
write!(f, " {operator_class}")?;
}
Ok(())
}
}
/// CREATE INDEX statement.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
@ -70,8 +48,8 @@ pub struct CreateIndex {
pub name: Option<ObjectName>,
#[cfg_attr(feature = "visitor", visit(with = "visit_relation"))]
pub table_name: ObjectName,
pub using: Option<IndexType>,
pub columns: Vec<IndexColumn>,
pub using: Option<Ident>,
pub columns: Vec<OrderByExpr>,
pub unique: bool,
pub concurrently: bool,
pub if_not_exists: bool,
@ -139,7 +117,6 @@ pub struct CreateTable {
pub if_not_exists: bool,
pub transient: bool,
pub volatile: bool,
pub iceberg: bool,
/// Table name
#[cfg_attr(feature = "visitor", visit(with = "visit_relation"))]
pub name: ObjectName,
@ -148,17 +125,19 @@ pub struct CreateTable {
pub constraints: Vec<TableConstraint>,
pub hive_distribution: HiveDistributionStyle,
pub hive_formats: Option<HiveFormat>,
pub table_options: CreateTableOptions,
pub table_properties: Vec<SqlOption>,
pub with_options: Vec<SqlOption>,
pub file_format: Option<FileFormat>,
pub location: Option<String>,
pub query: Option<Box<Query>>,
pub without_rowid: bool,
pub like: Option<ObjectName>,
pub clone: Option<ObjectName>,
// For Hive dialect, the table comment is after the column definitions without `=`,
// so the `comment` field is optional and different than the comment field in the general options list.
// [Hive](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTable)
pub engine: Option<TableEngine>,
pub comment: Option<CommentDef>,
pub auto_increment_offset: Option<u32>,
pub default_charset: Option<String>,
pub collation: Option<String>,
pub on_commit: Option<OnCommit>,
/// ClickHouse "ON CLUSTER" clause:
/// <https://clickhouse.com/docs/en/sql-reference/distributed-ddl/>
@ -175,17 +154,13 @@ pub struct CreateTable {
pub partition_by: Option<Box<Expr>>,
/// BigQuery: Table clustering column list.
/// <https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#table_option_list>
/// Snowflake: Table clustering list which contains base column, expressions on base columns.
/// <https://docs.snowflake.com/en/user-guide/tables-clustering-keys#defining-a-clustering-key-for-a-table>
pub cluster_by: Option<WrappedCollection<Vec<Expr>>>,
pub cluster_by: Option<WrappedCollection<Vec<Ident>>>,
/// Hive: Table clustering column list.
/// <https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTable>
pub clustered_by: Option<ClusteredBy>,
/// Postgres `INHERITs` clause, which contains the list of tables from which
/// the new table inherits.
/// <https://www.postgresql.org/docs/current/ddl-inherit.html>
/// <https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-INHERITS>
pub inherits: Option<Vec<ObjectName>>,
/// BigQuery: Table options list.
/// <https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#table_option_list>
pub options: Option<Vec<SqlOption>>,
/// SQLite "STRICT" clause.
/// if the "STRICT" table-option keyword is added to the end, after the closing ")",
/// then strict typing rules apply to that table.
@ -217,21 +192,6 @@ pub struct CreateTable {
/// Snowflake "WITH TAG" clause
/// <https://docs.snowflake.com/en/sql-reference/sql/create-table>
pub with_tags: Option<Vec<Tag>>,
/// Snowflake "EXTERNAL_VOLUME" clause for Iceberg tables
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
pub external_volume: Option<String>,
/// Snowflake "BASE_LOCATION" clause for Iceberg tables
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
pub base_location: Option<String>,
/// Snowflake "CATALOG" clause for Iceberg tables
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
pub catalog: Option<String>,
/// Snowflake "CATALOG_SYNC" clause for Iceberg tables
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
pub catalog_sync: Option<String>,
/// Snowflake "STORAGE_SERIALIZATION_POLICY" clause for Iceberg tables
/// <https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table>
pub storage_serialization_policy: Option<StorageSerializationPolicy>,
}
impl Display for CreateTable {
@ -245,7 +205,7 @@ impl Display for CreateTable {
// `CREATE TABLE t (a INT) AS SELECT a from t2`
write!(
f,
"CREATE {or_replace}{external}{global}{temporary}{transient}{volatile}{iceberg}TABLE {if_not_exists}{name}",
"CREATE {or_replace}{external}{global}{temporary}{transient}{volatile}TABLE {if_not_exists}{name}",
or_replace = if self.or_replace { "OR REPLACE " } else { "" },
external = if self.external { "EXTERNAL " } else { "" },
global = self.global
@ -261,32 +221,25 @@ impl Display for CreateTable {
temporary = if self.temporary { "TEMPORARY " } else { "" },
transient = if self.transient { "TRANSIENT " } else { "" },
volatile = if self.volatile { "VOLATILE " } else { "" },
// Only for Snowflake
iceberg = if self.iceberg { "ICEBERG " } else { "" },
name = self.name,
)?;
if let Some(on_cluster) = &self.on_cluster {
write!(f, " ON CLUSTER {on_cluster}")?;
write!(f, " ON CLUSTER {}", on_cluster)?;
}
if !self.columns.is_empty() || !self.constraints.is_empty() {
f.write_str(" (")?;
NewLine.fmt(f)?;
Indent(DisplayCommaSeparated(&self.columns)).fmt(f)?;
write!(f, " ({}", display_comma_separated(&self.columns))?;
if !self.columns.is_empty() && !self.constraints.is_empty() {
f.write_str(",")?;
SpaceOrNewline.fmt(f)?;
write!(f, ", ")?;
}
Indent(DisplayCommaSeparated(&self.constraints)).fmt(f)?;
NewLine.fmt(f)?;
f.write_str(")")?;
write!(f, "{})", display_comma_separated(&self.constraints))?;
} else if self.query.is_none() && self.like.is_none() && self.clone.is_none() {
// PostgreSQL allows `CREATE TABLE t ();`, but requires empty parens
f.write_str(" ()")?;
write!(f, " ()")?;
}
// Hive table comment should be after column definitions, please refer to:
// [Hive](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTable)
if let Some(comment) = &self.comment {
if let Some(CommentDef::AfterColumnDefsWithoutEq(comment)) = &self.comment {
write!(f, " COMMENT '{comment}'")?;
}
@ -379,22 +332,40 @@ impl Display for CreateTable {
}
write!(f, " LOCATION '{}'", self.location.as_ref().unwrap())?;
}
match &self.table_options {
options @ CreateTableOptions::With(_)
| options @ CreateTableOptions::Plain(_)
| options @ CreateTableOptions::TableProperties(_) => write!(f, " {options}")?,
_ => (),
if !self.table_properties.is_empty() {
write!(
f,
" TBLPROPERTIES ({})",
display_comma_separated(&self.table_properties)
)?;
}
if !self.with_options.is_empty() {
write!(f, " WITH ({})", display_comma_separated(&self.with_options))?;
}
if let Some(engine) = &self.engine {
write!(f, " ENGINE={engine}")?;
}
if let Some(comment_def) = &self.comment {
match comment_def {
CommentDef::WithEq(comment) => {
write!(f, " COMMENT = '{comment}'")?;
}
CommentDef::WithoutEq(comment) => {
write!(f, " COMMENT '{comment}'")?;
}
// For CommentDef::AfterColumnDefsWithoutEq will be displayed after column definition
CommentDef::AfterColumnDefsWithoutEq(_) => (),
}
}
if let Some(auto_increment_offset) = self.auto_increment_offset {
write!(f, " AUTO_INCREMENT {auto_increment_offset}")?;
}
if let Some(primary_key) = &self.primary_key {
write!(f, " PRIMARY KEY {primary_key}")?;
write!(f, " PRIMARY KEY {}", primary_key)?;
}
if let Some(order_by) = &self.order_by {
write!(f, " ORDER BY {order_by}")?;
}
if let Some(inherits) = &self.inherits {
write!(f, " INHERITS ({})", display_comma_separated(inherits))?;
write!(f, " ORDER BY {}", order_by)?;
}
if let Some(partition_by) = self.partition_by.as_ref() {
write!(f, " PARTITION BY {partition_by}")?;
@ -402,31 +373,12 @@ impl Display for CreateTable {
if let Some(cluster_by) = self.cluster_by.as_ref() {
write!(f, " CLUSTER BY {cluster_by}")?;
}
if let options @ CreateTableOptions::Options(_) = &self.table_options {
write!(f, " {options}")?;
}
if let Some(external_volume) = self.external_volume.as_ref() {
write!(f, " EXTERNAL_VOLUME = '{external_volume}'")?;
}
if let Some(catalog) = self.catalog.as_ref() {
write!(f, " CATALOG = '{catalog}'")?;
}
if self.iceberg {
if let Some(base_location) = self.base_location.as_ref() {
write!(f, " BASE_LOCATION = '{base_location}'")?;
}
}
if let Some(catalog_sync) = self.catalog_sync.as_ref() {
write!(f, " CATALOG_SYNC = '{catalog_sync}'")?;
}
if let Some(storage_serialization_policy) = self.storage_serialization_policy.as_ref() {
if let Some(options) = self.options.as_ref() {
write!(
f,
" STORAGE_SERIALIZATION_POLICY = {storage_serialization_policy}"
" OPTIONS({})",
display_comma_separated(options.as_slice())
)?;
}
@ -480,6 +432,13 @@ impl Display for CreateTable {
write!(f, " WITH TAG ({})", display_comma_separated(tag.as_slice()))?;
}
if let Some(default_charset) = &self.default_charset {
write!(f, " DEFAULT CHARSET={default_charset}")?;
}
if let Some(collation) = &self.collation {
write!(f, " COLLATE={collation}")?;
}
if self.on_commit.is_some() {
let on_commit = match self.on_commit {
Some(OnCommit::DeleteRows) => "ON COMMIT DELETE ROWS",
@ -588,32 +547,28 @@ impl Display for Insert {
)?;
}
if !self.columns.is_empty() {
write!(f, "({})", display_comma_separated(&self.columns))?;
SpaceOrNewline.fmt(f)?;
write!(f, "({}) ", display_comma_separated(&self.columns))?;
}
if let Some(ref parts) = self.partitioned {
if !parts.is_empty() {
write!(f, "PARTITION ({})", display_comma_separated(parts))?;
SpaceOrNewline.fmt(f)?;
write!(f, "PARTITION ({}) ", display_comma_separated(parts))?;
}
}
if !self.after_columns.is_empty() {
write!(f, "({})", display_comma_separated(&self.after_columns))?;
SpaceOrNewline.fmt(f)?;
write!(f, "({}) ", display_comma_separated(&self.after_columns))?;
}
if let Some(settings) = &self.settings {
write!(f, "SETTINGS {}", display_comma_separated(settings))?;
SpaceOrNewline.fmt(f)?;
write!(f, "SETTINGS {} ", display_comma_separated(settings))?;
}
if let Some(source) = &self.source {
source.fmt(f)?;
write!(f, "{source}")?;
} else if !self.assignments.is_empty() {
write!(f, "SET")?;
indented_list(f, &self.assignments)?;
write!(f, "SET ")?;
write!(f, "{}", display_comma_separated(&self.assignments))?;
} else if let Some(format_clause) = &self.format_clause {
format_clause.fmt(f)?;
write!(f, "{format_clause}")?;
} else if self.columns.is_empty() {
write!(f, "DEFAULT VALUES")?;
}
@ -633,9 +588,7 @@ impl Display for Insert {
}
if let Some(returning) = &self.returning {
SpaceOrNewline.fmt(f)?;
f.write_str("RETURNING")?;
indented_list(f, returning)?;
write!(f, " RETURNING {}", display_comma_separated(returning))?;
}
Ok(())
}
@ -664,45 +617,32 @@ pub struct Delete {
impl Display for Delete {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("DELETE")?;
write!(f, "DELETE ")?;
if !self.tables.is_empty() {
indented_list(f, &self.tables)?;
write!(f, "{} ", display_comma_separated(&self.tables))?;
}
match &self.from {
FromTable::WithFromKeyword(from) => {
f.write_str(" FROM")?;
indented_list(f, from)?;
write!(f, "FROM {}", display_comma_separated(from))?;
}
FromTable::WithoutKeyword(from) => {
indented_list(f, from)?;
write!(f, "{}", display_comma_separated(from))?;
}
}
if let Some(using) = &self.using {
SpaceOrNewline.fmt(f)?;
f.write_str("USING")?;
indented_list(f, using)?;
write!(f, " USING {}", display_comma_separated(using))?;
}
if let Some(selection) = &self.selection {
SpaceOrNewline.fmt(f)?;
f.write_str("WHERE")?;
SpaceOrNewline.fmt(f)?;
Indent(selection).fmt(f)?;
write!(f, " WHERE {selection}")?;
}
if let Some(returning) = &self.returning {
SpaceOrNewline.fmt(f)?;
f.write_str("RETURNING")?;
indented_list(f, returning)?;
write!(f, " RETURNING {}", display_comma_separated(returning))?;
}
if !self.order_by.is_empty() {
SpaceOrNewline.fmt(f)?;
f.write_str("ORDER BY")?;
indented_list(f, &self.order_by)?;
write!(f, " ORDER BY {}", display_comma_separated(&self.order_by))?;
}
if let Some(limit) = &self.limit {
SpaceOrNewline.fmt(f)?;
f.write_str("LIMIT")?;
SpaceOrNewline.fmt(f)?;
Indent(limit).fmt(f)?;
write!(f, " LIMIT {limit}")?;
}
Ok(())
}

View file

@ -1,89 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Key-value options for SQL statements.
//! See [this page](https://docs.snowflake.com/en/sql-reference/commands-data-loading) for more details.
#[cfg(not(feature = "std"))]
use alloc::string::String;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::fmt;
use core::fmt::Formatter;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "visitor")]
use sqlparser_derive::{Visit, VisitMut};
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct KeyValueOptions {
pub options: Vec<KeyValueOption>,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum KeyValueOptionType {
STRING,
BOOLEAN,
ENUM,
NUMBER,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct KeyValueOption {
pub option_name: String,
pub option_type: KeyValueOptionType,
pub value: String,
}
impl fmt::Display for KeyValueOptions {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
if !self.options.is_empty() {
let mut first = false;
for option in &self.options {
if !first {
first = true;
} else {
f.write_str(" ")?;
}
write!(f, "{option}")?;
}
}
Ok(())
}
}
impl fmt::Display for KeyValueOption {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.option_type {
KeyValueOptionType::STRING => {
write!(f, "{}='{}'", self.option_name, self.value)?;
}
KeyValueOptionType::ENUM | KeyValueOptionType::BOOLEAN | KeyValueOptionType::NUMBER => {
write!(f, "{}={}", self.option_name, self.value)?;
}
}
Ok(())
}
}

View file

@ -15,6 +15,5 @@
// specific language governing permissions and limitations
// under the License.
pub mod attached_token;
pub mod key_value_options;
pub mod stmt_create_table;
pub mod stmt_data_loading;

View file

@ -26,12 +26,10 @@ use sqlparser_derive::{Visit, VisitMut};
use super::super::dml::CreateTable;
use crate::ast::{
ClusteredBy, ColumnDef, CommentDef, CreateTableOptions, Expr, FileFormat,
HiveDistributionStyle, HiveFormat, Ident, ObjectName, OnCommit, OneOrManyWithParens, Query,
RowAccessPolicy, Statement, StorageSerializationPolicy, TableConstraint, Tag,
WrappedCollection,
ClusteredBy, ColumnDef, CommentDef, Expr, FileFormat, HiveDistributionStyle, HiveFormat, Ident,
ObjectName, OnCommit, OneOrManyWithParens, Query, RowAccessPolicy, SqlOption, Statement,
TableConstraint, TableEngine, Tag, WrappedCollection,
};
use crate::parser::ParserError;
/// Builder for create table statement variant ([1]).
@ -44,11 +42,12 @@ use crate::parser::ParserError;
/// ```rust
/// use sqlparser::ast::helpers::stmt_create_table::CreateTableBuilder;
/// use sqlparser::ast::{ColumnDef, DataType, Ident, ObjectName};
/// let builder = CreateTableBuilder::new(ObjectName::from(vec![Ident::new("table_name")]))
/// let builder = CreateTableBuilder::new(ObjectName(vec![Ident::new("table_name")]))
/// .if_not_exists(true)
/// .columns(vec![ColumnDef {
/// name: Ident::new("c1"),
/// data_type: DataType::Int(None),
/// collation: None,
/// options: vec![],
/// }]);
/// // You can access internal elements with ease
@ -72,27 +71,32 @@ pub struct CreateTableBuilder {
pub if_not_exists: bool,
pub transient: bool,
pub volatile: bool,
pub iceberg: bool,
pub name: ObjectName,
pub columns: Vec<ColumnDef>,
pub constraints: Vec<TableConstraint>,
pub hive_distribution: HiveDistributionStyle,
pub hive_formats: Option<HiveFormat>,
pub table_properties: Vec<SqlOption>,
pub with_options: Vec<SqlOption>,
pub file_format: Option<FileFormat>,
pub location: Option<String>,
pub query: Option<Box<Query>>,
pub without_rowid: bool,
pub like: Option<ObjectName>,
pub clone: Option<ObjectName>,
pub engine: Option<TableEngine>,
pub comment: Option<CommentDef>,
pub auto_increment_offset: Option<u32>,
pub default_charset: Option<String>,
pub collation: Option<String>,
pub on_commit: Option<OnCommit>,
pub on_cluster: Option<Ident>,
pub primary_key: Option<Box<Expr>>,
pub order_by: Option<OneOrManyWithParens<Expr>>,
pub partition_by: Option<Box<Expr>>,
pub cluster_by: Option<WrappedCollection<Vec<Expr>>>,
pub cluster_by: Option<WrappedCollection<Vec<Ident>>>,
pub clustered_by: Option<ClusteredBy>,
pub inherits: Option<Vec<ObjectName>>,
pub options: Option<Vec<SqlOption>>,
pub strict: bool,
pub copy_grants: bool,
pub enable_schema_evolution: Option<bool>,
@ -103,12 +107,6 @@ pub struct CreateTableBuilder {
pub with_aggregation_policy: Option<ObjectName>,
pub with_row_access_policy: Option<RowAccessPolicy>,
pub with_tags: Option<Vec<Tag>>,
pub base_location: Option<String>,
pub external_volume: Option<String>,
pub catalog: Option<String>,
pub catalog_sync: Option<String>,
pub storage_serialization_policy: Option<StorageSerializationPolicy>,
pub table_options: CreateTableOptions,
}
impl CreateTableBuilder {
@ -121,19 +119,24 @@ impl CreateTableBuilder {
if_not_exists: false,
transient: false,
volatile: false,
iceberg: false,
name,
columns: vec![],
constraints: vec![],
hive_distribution: HiveDistributionStyle::NONE,
hive_formats: None,
table_properties: vec![],
with_options: vec![],
file_format: None,
location: None,
query: None,
without_rowid: false,
like: None,
clone: None,
engine: None,
comment: None,
auto_increment_offset: None,
default_charset: None,
collation: None,
on_commit: None,
on_cluster: None,
primary_key: None,
@ -141,7 +144,7 @@ impl CreateTableBuilder {
partition_by: None,
cluster_by: None,
clustered_by: None,
inherits: None,
options: None,
strict: false,
copy_grants: false,
enable_schema_evolution: None,
@ -152,12 +155,6 @@ impl CreateTableBuilder {
with_aggregation_policy: None,
with_row_access_policy: None,
with_tags: None,
base_location: None,
external_volume: None,
catalog: None,
catalog_sync: None,
storage_serialization_policy: None,
table_options: CreateTableOptions::None,
}
}
pub fn or_replace(mut self, or_replace: bool) -> Self {
@ -195,11 +192,6 @@ impl CreateTableBuilder {
self
}
pub fn iceberg(mut self, iceberg: bool) -> Self {
self.iceberg = iceberg;
self
}
pub fn columns(mut self, columns: Vec<ColumnDef>) -> Self {
self.columns = columns;
self
@ -220,6 +212,15 @@ impl CreateTableBuilder {
self
}
pub fn table_properties(mut self, table_properties: Vec<SqlOption>) -> Self {
self.table_properties = table_properties;
self
}
pub fn with_options(mut self, with_options: Vec<SqlOption>) -> Self {
self.with_options = with_options;
self
}
pub fn file_format(mut self, file_format: Option<FileFormat>) -> Self {
self.file_format = file_format;
self
@ -249,11 +250,31 @@ impl CreateTableBuilder {
self
}
pub fn comment_after_column_def(mut self, comment: Option<CommentDef>) -> Self {
pub fn engine(mut self, engine: Option<TableEngine>) -> Self {
self.engine = engine;
self
}
pub fn comment(mut self, comment: Option<CommentDef>) -> Self {
self.comment = comment;
self
}
pub fn auto_increment_offset(mut self, offset: Option<u32>) -> Self {
self.auto_increment_offset = offset;
self
}
pub fn default_charset(mut self, default_charset: Option<String>) -> Self {
self.default_charset = default_charset;
self
}
pub fn collation(mut self, collation: Option<String>) -> Self {
self.collation = collation;
self
}
pub fn on_commit(mut self, on_commit: Option<OnCommit>) -> Self {
self.on_commit = on_commit;
self
@ -279,7 +300,7 @@ impl CreateTableBuilder {
self
}
pub fn cluster_by(mut self, cluster_by: Option<WrappedCollection<Vec<Expr>>>) -> Self {
pub fn cluster_by(mut self, cluster_by: Option<WrappedCollection<Vec<Ident>>>) -> Self {
self.cluster_by = cluster_by;
self
}
@ -289,8 +310,8 @@ impl CreateTableBuilder {
self
}
pub fn inherits(mut self, inherits: Option<Vec<ObjectName>>) -> Self {
self.inherits = inherits;
pub fn options(mut self, options: Option<Vec<SqlOption>>) -> Self {
self.options = options;
self
}
@ -350,39 +371,6 @@ impl CreateTableBuilder {
self
}
pub fn base_location(mut self, base_location: Option<String>) -> Self {
self.base_location = base_location;
self
}
pub fn external_volume(mut self, external_volume: Option<String>) -> Self {
self.external_volume = external_volume;
self
}
pub fn catalog(mut self, catalog: Option<String>) -> Self {
self.catalog = catalog;
self
}
pub fn catalog_sync(mut self, catalog_sync: Option<String>) -> Self {
self.catalog_sync = catalog_sync;
self
}
pub fn storage_serialization_policy(
mut self,
storage_serialization_policy: Option<StorageSerializationPolicy>,
) -> Self {
self.storage_serialization_policy = storage_serialization_policy;
self
}
pub fn table_options(mut self, table_options: CreateTableOptions) -> Self {
self.table_options = table_options;
self
}
pub fn build(self) -> Statement {
Statement::CreateTable(CreateTable {
or_replace: self.or_replace,
@ -392,19 +380,24 @@ impl CreateTableBuilder {
if_not_exists: self.if_not_exists,
transient: self.transient,
volatile: self.volatile,
iceberg: self.iceberg,
name: self.name,
columns: self.columns,
constraints: self.constraints,
hive_distribution: self.hive_distribution,
hive_formats: self.hive_formats,
table_properties: self.table_properties,
with_options: self.with_options,
file_format: self.file_format,
location: self.location,
query: self.query,
without_rowid: self.without_rowid,
like: self.like,
clone: self.clone,
engine: self.engine,
comment: self.comment,
auto_increment_offset: self.auto_increment_offset,
default_charset: self.default_charset,
collation: self.collation,
on_commit: self.on_commit,
on_cluster: self.on_cluster,
primary_key: self.primary_key,
@ -412,7 +405,7 @@ impl CreateTableBuilder {
partition_by: self.partition_by,
cluster_by: self.cluster_by,
clustered_by: self.clustered_by,
inherits: self.inherits,
options: self.options,
strict: self.strict,
copy_grants: self.copy_grants,
enable_schema_evolution: self.enable_schema_evolution,
@ -423,12 +416,6 @@ impl CreateTableBuilder {
with_aggregation_policy: self.with_aggregation_policy,
with_row_access_policy: self.with_row_access_policy,
with_tags: self.with_tags,
base_location: self.base_location,
external_volume: self.external_volume,
catalog: self.catalog,
catalog_sync: self.catalog_sync,
storage_serialization_policy: self.storage_serialization_policy,
table_options: self.table_options,
})
}
}
@ -448,19 +435,24 @@ impl TryFrom<Statement> for CreateTableBuilder {
if_not_exists,
transient,
volatile,
iceberg,
name,
columns,
constraints,
hive_distribution,
hive_formats,
table_properties,
with_options,
file_format,
location,
query,
without_rowid,
like,
clone,
engine,
comment,
auto_increment_offset,
default_charset,
collation,
on_commit,
on_cluster,
primary_key,
@ -468,7 +460,7 @@ impl TryFrom<Statement> for CreateTableBuilder {
partition_by,
cluster_by,
clustered_by,
inherits,
options,
strict,
copy_grants,
enable_schema_evolution,
@ -479,12 +471,6 @@ impl TryFrom<Statement> for CreateTableBuilder {
with_aggregation_policy,
with_row_access_policy,
with_tags,
base_location,
external_volume,
catalog,
catalog_sync,
storage_serialization_policy,
table_options,
}) => Ok(Self {
or_replace,
temporary,
@ -497,13 +483,19 @@ impl TryFrom<Statement> for CreateTableBuilder {
constraints,
hive_distribution,
hive_formats,
table_properties,
with_options,
file_format,
location,
query,
without_rowid,
like,
clone,
engine,
comment,
auto_increment_offset,
default_charset,
collation,
on_commit,
on_cluster,
primary_key,
@ -511,9 +503,8 @@ impl TryFrom<Statement> for CreateTableBuilder {
partition_by,
cluster_by,
clustered_by,
inherits,
options,
strict,
iceberg,
copy_grants,
enable_schema_evolution,
change_tracking,
@ -524,12 +515,6 @@ impl TryFrom<Statement> for CreateTableBuilder {
with_row_access_policy,
with_tags,
volatile,
base_location,
external_volume,
catalog,
catalog_sync,
storage_serialization_policy,
table_options,
}),
_ => Err(ParserError::ParserError(format!(
"Expected create table statement, but received: {stmt}"
@ -542,9 +527,8 @@ impl TryFrom<Statement> for CreateTableBuilder {
#[derive(Default)]
pub(crate) struct CreateTableConfiguration {
pub partition_by: Option<Box<Expr>>,
pub cluster_by: Option<WrappedCollection<Vec<Expr>>>,
pub inherits: Option<Vec<ObjectName>>,
pub table_options: CreateTableOptions,
pub cluster_by: Option<WrappedCollection<Vec<Ident>>>,
pub options: Option<Vec<SqlOption>>,
}
#[cfg(test)]
@ -555,7 +539,7 @@ mod tests {
#[test]
pub fn test_from_valid_statement() {
let builder = CreateTableBuilder::new(ObjectName::from(vec![Ident::new("table_name")]));
let builder = CreateTableBuilder::new(ObjectName(vec![Ident::new("table_name")]));
let stmt = builder.clone().build();

View file

@ -21,13 +21,15 @@
#[cfg(not(feature = "std"))]
use alloc::string::String;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::fmt;
use core::fmt::Formatter;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::ast::helpers::key_value_options::KeyValueOptions;
use crate::ast::{Ident, ObjectName, SelectItem};
use crate::ast::{Ident, ObjectName};
#[cfg(feature = "visitor")]
use sqlparser_derive::{Visit, VisitMut};
@ -36,29 +38,35 @@ use sqlparser_derive::{Visit, VisitMut};
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct StageParamsObject {
pub url: Option<String>,
pub encryption: KeyValueOptions,
pub encryption: DataLoadingOptions,
pub endpoint: Option<String>,
pub storage_integration: Option<String>,
pub credentials: KeyValueOptions,
pub credentials: DataLoadingOptions,
}
/// This enum enables support for both standard SQL select item expressions
/// and Snowflake-specific ones for data loading.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum StageLoadSelectItemKind {
SelectItem(SelectItem),
StageLoadSelectItem(StageLoadSelectItem),
pub struct DataLoadingOptions {
pub options: Vec<DataLoadingOption>,
}
impl fmt::Display for StageLoadSelectItemKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self {
StageLoadSelectItemKind::SelectItem(item) => write!(f, "{item}"),
StageLoadSelectItemKind::StageLoadSelectItem(item) => write!(f, "{item}"),
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum DataLoadingOptionType {
STRING,
BOOLEAN,
ENUM,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct DataLoadingOption {
pub option_name: String,
pub option_type: DataLoadingOptionType,
pub value: String,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
@ -97,6 +105,42 @@ impl fmt::Display for StageParamsObject {
}
}
impl fmt::Display for DataLoadingOptions {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
if !self.options.is_empty() {
let mut first = false;
for option in &self.options {
if !first {
first = true;
} else {
f.write_str(" ")?;
}
write!(f, "{}", option)?;
}
}
Ok(())
}
}
impl fmt::Display for DataLoadingOption {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.option_type {
DataLoadingOptionType::STRING => {
write!(f, "{}='{}'", self.option_name, self.value)?;
}
DataLoadingOptionType::ENUM => {
// single quote is omitted
write!(f, "{}={}", self.option_name, self.value)?;
}
DataLoadingOptionType::BOOLEAN => {
// single quote is omitted
write!(f, "{}={}", self.option_name, self.value)?;
}
}
Ok(())
}
}
impl fmt::Display for StageLoadSelectItem {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.alias.is_some() {

File diff suppressed because it is too large Load diff

View file

@ -53,21 +53,6 @@ pub enum UnaryOperator {
PGAbs,
/// Unary logical not operator: e.g. `! false` (Hive-specific)
BangNot,
/// `#` Number of points in path or polygon (PostgreSQL/Redshift geometric operator)
/// see <https://www.postgresql.org/docs/9.5/functions-geometry.html>
Hash,
/// `@-@` Length or circumference (PostgreSQL/Redshift geometric operator)
/// see <https://www.postgresql.org/docs/9.5/functions-geometry.html>
AtDashAt,
/// `@@` Center (PostgreSQL/Redshift geometric operator)
/// see <https://www.postgresql.org/docs/9.5/functions-geometry.html>
DoubleAt,
/// `?-` Is horizontal? (PostgreSQL/Redshift geometric operator)
/// see <https://www.postgresql.org/docs/9.5/functions-geometry.html>
QuestionDash,
/// `?|` Is vertical? (PostgreSQL/Redshift geometric operator)
/// see <https://www.postgresql.org/docs/9.5/functions-geometry.html>
QuestionPipe,
}
impl fmt::Display for UnaryOperator {
@ -83,11 +68,6 @@ impl fmt::Display for UnaryOperator {
UnaryOperator::PGPrefixFactorial => "!!",
UnaryOperator::PGAbs => "@",
UnaryOperator::BangNot => "!",
UnaryOperator::Hash => "#",
UnaryOperator::AtDashAt => "@-@",
UnaryOperator::DoubleAt => "@@",
UnaryOperator::QuestionDash => "?-",
UnaryOperator::QuestionPipe => "?|",
})
}
}
@ -139,11 +119,6 @@ pub enum BinaryOperator {
DuckIntegerDivide,
/// MySQL [`DIV`](https://dev.mysql.com/doc/refman/8.0/en/arithmetic-functions.html) integer division
MyIntegerDivide,
/// MATCH operator, e.g. `a MATCH b` (SQLite-specific)
/// See <https://www.sqlite.org/lang_expr.html#the_like_glob_regexp_match_and_extract_operators>
Match,
/// REGEXP operator, e.g. `a REGEXP b` (SQLite-specific)
Regexp,
/// Support for custom operators (such as Postgres custom operators)
Custom(String),
/// Bitwise XOR, e.g. `a # b` (PostgreSQL-specific)
@ -278,57 +253,6 @@ pub enum BinaryOperator {
/// Specifies a test for an overlap between two datetime periods:
/// <https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#overlaps-predicate>
Overlaps,
/// `##` Point of closest proximity (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
DoubleHash,
/// `<->` Distance between (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
LtDashGt,
/// `&<` Overlaps to left? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
AndLt,
/// `&>` Overlaps to right? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
AndGt,
/// `<<|` Is strictly below? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
LtLtPipe,
/// `|>>` Is strictly above? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
PipeGtGt,
/// `&<|` Does not extend above? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
AndLtPipe,
/// `|&>` Does not extend below? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
PipeAndGt,
/// `<^` Is below? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
LtCaret,
/// `>^` Is above? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
GtCaret,
/// `?#` Intersects? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
QuestionHash,
/// `?-` Is horizontal? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
QuestionDash,
/// `?-|` Is perpendicular? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
QuestionDashPipe,
/// `?||` Are Parallel? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
QuestionDoublePipe,
/// `@` Contained or on? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
At,
/// `~=` Same as? (PostgreSQL/Redshift geometric operator)
/// See <https://www.postgresql.org/docs/9.5/functions-geometry.html>
TildeEq,
/// ':=' Assignment Operator
/// See <https://dev.mysql.com/doc/refman/8.4/en/assignment-operators.html#operator_assign-value>
Assignment,
}
impl fmt::Display for BinaryOperator {
@ -355,8 +279,6 @@ impl fmt::Display for BinaryOperator {
BinaryOperator::BitwiseXor => f.write_str("^"),
BinaryOperator::DuckIntegerDivide => f.write_str("//"),
BinaryOperator::MyIntegerDivide => f.write_str("DIV"),
BinaryOperator::Match => f.write_str("MATCH"),
BinaryOperator::Regexp => f.write_str("REGEXP"),
BinaryOperator::Custom(s) => f.write_str(s),
BinaryOperator::PGBitwiseXor => f.write_str("#"),
BinaryOperator::PGBitwiseShiftLeft => f.write_str("<<"),
@ -388,23 +310,6 @@ impl fmt::Display for BinaryOperator {
write!(f, "OPERATOR({})", display_separated(idents, "."))
}
BinaryOperator::Overlaps => f.write_str("OVERLAPS"),
BinaryOperator::DoubleHash => f.write_str("##"),
BinaryOperator::LtDashGt => f.write_str("<->"),
BinaryOperator::AndLt => f.write_str("&<"),
BinaryOperator::AndGt => f.write_str("&>"),
BinaryOperator::LtLtPipe => f.write_str("<<|"),
BinaryOperator::PipeGtGt => f.write_str("|>>"),
BinaryOperator::AndLtPipe => f.write_str("&<|"),
BinaryOperator::PipeAndGt => f.write_str("|&>"),
BinaryOperator::LtCaret => f.write_str("<^"),
BinaryOperator::GtCaret => f.write_str(">^"),
BinaryOperator::QuestionHash => f.write_str("?#"),
BinaryOperator::QuestionDash => f.write_str("?-"),
BinaryOperator::QuestionDashPipe => f.write_str("?-|"),
BinaryOperator::QuestionDoublePipe => f.write_str("?||"),
BinaryOperator::At => f.write_str("@"),
BinaryOperator::TildeEq => f.write_str("~="),
BinaryOperator::Assignment => f.write_str(":="),
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -15,30 +15,26 @@
// specific language governing permissions and limitations
// under the License.
use crate::ast::{query::SelectItemQualifiedWildcardKind, ColumnOptions};
use core::iter;
use crate::tokenizer::Span;
use super::{
dcl::SecondaryRoles, value::ValueWithSpan, AccessExpr, AlterColumnOperation,
AlterIndexOperation, AlterTableOperation, Array, Assignment, AssignmentTarget, AttachedToken,
BeginEndStatements, CaseStatement, CloseCursor, ClusteredIndex, ColumnDef, ColumnOption,
ColumnOptionDef, ConditionalStatementBlock, ConditionalStatements, ConflictTarget, ConnectBy,
ConstraintCharacteristics, CopySource, CreateIndex, CreateTable, CreateTableOptions, Cte,
Delete, DoUpdate, ExceptSelectItem, ExcludeSelectItem, Expr, ExprWithAlias, Fetch, FromTable,
Function, FunctionArg, FunctionArgExpr, FunctionArgumentClause, FunctionArgumentList,
FunctionArguments, GroupByExpr, HavingBound, IfStatement, IlikeSelectItem, IndexColumn, Insert,
Interpolate, InterpolateExpr, Join, JoinConstraint, JoinOperator, JsonPath, JsonPathElem,
LateralView, LimitClause, MatchRecognizePattern, Measure, NamedParenthesizedList,
NamedWindowDefinition, ObjectName, ObjectNamePart, Offset, OnConflict, OnConflictAction,
OnInsert, OpenStatement, OrderBy, OrderByExpr, OrderByKind, Partition, PivotValueSource,
ProjectionSelect, Query, RaiseStatement, RaiseStatementValue, ReferentialAction,
dcl::SecondaryRoles, AccessExpr, AlterColumnOperation, AlterIndexOperation,
AlterTableOperation, Array, Assignment, AssignmentTarget, CloseCursor, ClusteredIndex,
ColumnDef, ColumnOption, ColumnOptionDef, ConflictTarget, ConnectBy, ConstraintCharacteristics,
CopySource, CreateIndex, CreateTable, CreateTableOptions, Cte, Delete, DoUpdate,
ExceptSelectItem, ExcludeSelectItem, Expr, ExprWithAlias, Fetch, FromTable, Function,
FunctionArg, FunctionArgExpr, FunctionArgumentClause, FunctionArgumentList, FunctionArguments,
GroupByExpr, HavingBound, IlikeSelectItem, Insert, Interpolate, InterpolateExpr, Join,
JoinConstraint, JoinOperator, JsonPath, JsonPathElem, LateralView, MatchRecognizePattern,
Measure, NamedWindowDefinition, ObjectName, Offset, OnConflict, OnConflictAction, OnInsert,
OrderBy, OrderByExpr, Partition, PivotValueSource, ProjectionSelect, Query, ReferentialAction,
RenameSelectItem, ReplaceSelectElement, ReplaceSelectItem, Select, SelectInto, SelectItem,
SetExpr, SqlOption, Statement, Subscript, SymbolDefinition, TableAlias, TableAliasColumnDef,
TableConstraint, TableFactor, TableObject, TableOptionsClustered, TableWithJoins,
UpdateTableFromKind, Use, Value, Values, ViewColumnDef, WhileStatement,
WildcardAdditionalOptions, With, WithFill,
UpdateTableFromKind, Use, Value, Values, ViewColumnDef, WildcardAdditionalOptions, With,
WithFill,
};
/// Given an iterator of spans, return the [Span::union] of all spans.
@ -97,13 +93,14 @@ impl Spanned for Query {
with,
body,
order_by,
limit_clause,
limit,
limit_by,
offset,
fetch,
locks: _, // todo
for_clause: _, // todo, mssql specific
settings: _, // todo, clickhouse specific
format_clause: _, // todo, clickhouse specific
pipe_operators: _, // todo bigquery specific
locks: _, // todo
for_clause: _, // todo, mssql specific
settings: _, // todo, clickhouse specific
format_clause: _, // todo, clickhouse specific
} = self;
union_spans(
@ -111,31 +108,14 @@ impl Spanned for Query {
.map(|i| i.span())
.chain(core::iter::once(body.span()))
.chain(order_by.as_ref().map(|i| i.span()))
.chain(limit_clause.as_ref().map(|i| i.span()))
.chain(limit.as_ref().map(|i| i.span()))
.chain(limit_by.iter().map(|i| i.span()))
.chain(offset.as_ref().map(|i| i.span()))
.chain(fetch.as_ref().map(|i| i.span())),
)
}
}
impl Spanned for LimitClause {
fn span(&self) -> Span {
match self {
LimitClause::LimitOffset {
limit,
offset,
limit_by,
} => union_spans(
limit
.iter()
.map(|i| i.span())
.chain(offset.as_ref().map(|i| i.span()))
.chain(limit_by.iter().map(|i| i.span())),
),
LimitClause::OffsetCommaLimit { offset, limit } => offset.span().union(&limit.span()),
}
}
}
impl Spanned for Offset {
fn span(&self) -> Span {
let Offset {
@ -210,7 +190,6 @@ impl Spanned for SetExpr {
SetExpr::Insert(statement) => statement.span(),
SetExpr::Table(_) => Span::empty(),
SetExpr::Update(statement) => statement.span(),
SetExpr::Delete(statement) => statement.span(),
}
}
}
@ -235,7 +214,6 @@ impl Spanned for Values {
/// - [Statement::CopyIntoSnowflake]
/// - [Statement::CreateSecret]
/// - [Statement::CreateRole]
/// - [Statement::AlterType]
/// - [Statement::AlterRole]
/// - [Statement::AttachDatabase]
/// - [Statement::AttachDuckDBDatabase]
@ -249,7 +227,11 @@ impl Spanned for Values {
/// - [Statement::Fetch]
/// - [Statement::Flush]
/// - [Statement::Discard]
/// - [Statement::Set]
/// - [Statement::SetRole]
/// - [Statement::SetVariable]
/// - [Statement::SetTimeZone]
/// - [Statement::SetNames]
/// - [Statement::SetNamesDefault]
/// - [Statement::ShowFunctions]
/// - [Statement::ShowVariable]
/// - [Statement::ShowStatus]
@ -259,6 +241,7 @@ impl Spanned for Values {
/// - [Statement::ShowTables]
/// - [Statement::ShowCollation]
/// - [Statement::StartTransaction]
/// - [Statement::SetTransaction]
/// - [Statement::Comment]
/// - [Statement::Commit]
/// - [Statement::Rollback]
@ -312,6 +295,7 @@ impl Spanned for Statement {
table_names,
partitions,
table: _,
only: _,
identity: _,
cascade: _,
on_cluster: _,
@ -337,10 +321,6 @@ impl Spanned for Statement {
file_format: _,
source,
} => source.span(),
Statement::Case(stmt) => stmt.span(),
Statement::If(stmt) => stmt.span(),
Statement::While(stmt) => stmt.span(),
Statement::Raise(stmt) => stmt.span(),
Statement::Call(function) => function.span(),
Statement::Copy {
source,
@ -352,9 +332,8 @@ impl Spanned for Statement {
} => source.span(),
Statement::CopyIntoSnowflake {
into: _,
into_columns: _,
from_obj: _,
from_obj_alias: _,
from_stage: _,
from_stage_alias: _,
stage_params: _,
from_transformations: _,
files: _,
@ -362,11 +341,7 @@ impl Spanned for Statement {
file_format: _,
copy_options: _,
validation_mode: _,
kind: _,
from_query: _,
partition: _,
} => Span::empty(),
Statement::Open(open) => open.span(),
Statement::Close { cursor } => match cursor {
CloseCursor::All => Span::empty(),
CloseCursor::Specific { name } => name.span,
@ -387,7 +362,6 @@ impl Spanned for Statement {
),
Statement::Delete(delete) => delete.span(),
Statement::CreateView {
or_alter: _,
or_replace: _,
materialized: _,
name,
@ -423,8 +397,6 @@ impl Spanned for Statement {
Statement::CreateIndex(create_index) => create_index.span(),
Statement::CreateRole { .. } => Span::empty(),
Statement::CreateSecret { .. } => Span::empty(),
Statement::CreateServer { .. } => Span::empty(),
Statement::CreateConnector { .. } => Span::empty(),
Statement::AlterTable {
name,
if_exists: _,
@ -432,7 +404,6 @@ impl Spanned for Statement {
operations,
location: _,
on_cluster,
iceberg: _,
} => union_spans(
core::iter::once(name.span())
.chain(operations.iter().map(|i| i.span()))
@ -451,15 +422,12 @@ impl Spanned for Statement {
.chain(with_options.iter().map(|i| i.span())),
),
// These statements need to be implemented
Statement::AlterType { .. } => Span::empty(),
Statement::AlterRole { .. } => Span::empty(),
Statement::AlterSession { .. } => Span::empty(),
Statement::AttachDatabase { .. } => Span::empty(),
Statement::AttachDuckDBDatabase { .. } => Span::empty(),
Statement::DetachDuckDBDatabase { .. } => Span::empty(),
Statement::Drop { .. } => Span::empty(),
Statement::DropFunction { .. } => Span::empty(),
Statement::DropDomain { .. } => Span::empty(),
Statement::DropProcedure { .. } => Span::empty(),
Statement::DropSecret { .. } => Span::empty(),
Statement::Declare { .. } => Span::empty(),
@ -468,7 +436,11 @@ impl Spanned for Statement {
Statement::Fetch { .. } => Span::empty(),
Statement::Flush { .. } => Span::empty(),
Statement::Discard { .. } => Span::empty(),
Statement::Set(_) => Span::empty(),
Statement::SetRole { .. } => Span::empty(),
Statement::SetVariable { .. } => Span::empty(),
Statement::SetTimeZone { .. } => Span::empty(),
Statement::SetNames { .. } => Span::empty(),
Statement::SetNamesDefault {} => Span::empty(),
Statement::ShowFunctions { .. } => Span::empty(),
Statement::ShowVariable { .. } => Span::empty(),
Statement::ShowStatus { .. } => Span::empty(),
@ -479,13 +451,13 @@ impl Spanned for Statement {
Statement::ShowCollation { .. } => Span::empty(),
Statement::Use(u) => u.span(),
Statement::StartTransaction { .. } => Span::empty(),
Statement::SetTransaction { .. } => Span::empty(),
Statement::Comment { .. } => Span::empty(),
Statement::Commit { .. } => Span::empty(),
Statement::Rollback { .. } => Span::empty(),
Statement::CreateSchema { .. } => Span::empty(),
Statement::CreateDatabase { .. } => Span::empty(),
Statement::CreateFunction { .. } => Span::empty(),
Statement::CreateDomain { .. } => Span::empty(),
Statement::CreateTrigger { .. } => Span::empty(),
Statement::DropTrigger { .. } => Span::empty(),
Statement::CreateProcedure { .. } => Span::empty(),
@ -493,7 +465,6 @@ impl Spanned for Statement {
Statement::CreateStage { .. } => Span::empty(),
Statement::Assert { .. } => Span::empty(),
Statement::Grant { .. } => Span::empty(),
Statement::Deny { .. } => Span::empty(),
Statement::Revoke { .. } => Span::empty(),
Statement::Deallocate { .. } => Span::empty(),
Statement::Execute { .. } => Span::empty(),
@ -515,12 +486,9 @@ impl Spanned for Statement {
Statement::OptimizeTable { .. } => Span::empty(),
Statement::CreatePolicy { .. } => Span::empty(),
Statement::AlterPolicy { .. } => Span::empty(),
Statement::AlterConnector { .. } => Span::empty(),
Statement::DropPolicy { .. } => Span::empty(),
Statement::DropConnector { .. } => Span::empty(),
Statement::ShowDatabases { .. } => Span::empty(),
Statement::ShowSchemas { .. } => Span::empty(),
Statement::ShowObjects { .. } => Span::empty(),
Statement::ShowViews { .. } => Span::empty(),
Statement::LISTEN { .. } => Span::empty(),
Statement::NOTIFY { .. } => Span::empty(),
@ -528,9 +496,8 @@ impl Spanned for Statement {
Statement::UNLISTEN { .. } => Span::empty(),
Statement::RenameTable { .. } => Span::empty(),
Statement::RaisError { .. } => Span::empty(),
Statement::Print { .. } => Span::empty(),
Statement::Return { .. } => Span::empty(),
Statement::List(..) | Statement::Remove(..) => Span::empty(),
Statement::SetSessionParam { .. } => Span::empty(),
}
}
}
@ -565,27 +532,32 @@ impl Spanned for CreateTable {
if_not_exists: _, // bool
transient: _, // bool
volatile: _, // bool
iceberg: _, // bool, Snowflake specific
name,
columns,
constraints,
hive_distribution: _, // hive specific
hive_formats: _, // hive specific
file_format: _, // enum
location: _, // string, no span
table_properties,
with_options,
file_format: _, // enum
location: _, // string, no span
query,
without_rowid: _, // bool
like,
clone,
comment: _, // todo, no span
on_commit: _,
engine: _, // todo
comment: _, // todo, no span
auto_increment_offset: _, // u32, no span
default_charset: _, // string, no span
collation: _, // string, no span
on_commit: _, // enum
on_cluster: _, // todo, clickhouse specific
primary_key: _, // todo, clickhouse specific
order_by: _, // todo, clickhouse specific
partition_by: _, // todo, BigQuery specific
cluster_by: _, // todo, BigQuery specific
clustered_by: _, // todo, Hive specific
inherits: _, // todo, PostgreSQL specific
options: _, // todo, BigQuery specific
strict: _, // bool
copy_grants: _, // bool
enable_schema_evolution: _, // bool
@ -596,19 +568,14 @@ impl Spanned for CreateTable {
with_aggregation_policy: _, // todo, Snowflake specific
with_row_access_policy: _, // todo, Snowflake specific
with_tags: _, // todo, Snowflake specific
external_volume: _, // todo, Snowflake specific
base_location: _, // todo, Snowflake specific
catalog: _, // todo, Snowflake specific
catalog_sync: _, // todo, Snowflake specific
storage_serialization_policy: _,
table_options,
} = self;
union_spans(
core::iter::once(name.span())
.chain(core::iter::once(table_options.span()))
.chain(columns.iter().map(|i| i.span()))
.chain(constraints.iter().map(|i| i.span()))
.chain(table_properties.iter().map(|i| i.span()))
.chain(with_options.iter().map(|i| i.span()))
.chain(query.iter().map(|i| i.span()))
.chain(like.iter().map(|i| i.span()))
.chain(clone.iter().map(|i| i.span())),
@ -621,10 +588,15 @@ impl Spanned for ColumnDef {
let ColumnDef {
name,
data_type: _, // enum
collation,
options,
} = self;
union_spans(core::iter::once(name.span).chain(options.iter().map(|i| i.span())))
union_spans(
core::iter::once(name.span)
.chain(collation.iter().map(|i| i.span()))
.chain(options.iter().map(|i| i.span())),
)
}
}
@ -652,7 +624,7 @@ impl Spanned for TableConstraint {
name.iter()
.map(|i| i.span)
.chain(index_name.iter().map(|i| i.span))
.chain(columns.iter().map(|i| i.span()))
.chain(columns.iter().map(|i| i.span))
.chain(characteristics.iter().map(|i| i.span())),
),
TableConstraint::PrimaryKey {
@ -666,13 +638,12 @@ impl Spanned for TableConstraint {
name.iter()
.map(|i| i.span)
.chain(index_name.iter().map(|i| i.span))
.chain(columns.iter().map(|i| i.span()))
.chain(columns.iter().map(|i| i.span))
.chain(characteristics.iter().map(|i| i.span())),
),
TableConstraint::ForeignKey {
name,
columns,
index_name,
foreign_table,
referred_columns,
on_delete,
@ -681,7 +652,6 @@ impl Spanned for TableConstraint {
} => union_spans(
name.iter()
.map(|i| i.span)
.chain(index_name.iter().map(|i| i.span))
.chain(columns.iter().map(|i| i.span))
.chain(core::iter::once(foreign_table.span()))
.chain(referred_columns.iter().map(|i| i.span))
@ -689,11 +659,9 @@ impl Spanned for TableConstraint {
.chain(on_update.iter().map(|i| i.span()))
.chain(characteristics.iter().map(|i| i.span())),
),
TableConstraint::Check {
name,
expr,
enforced: _,
} => expr.span().union_opt(&name.as_ref().map(|i| i.span)),
TableConstraint::Check { name, expr } => {
expr.span().union_opt(&name.as_ref().map(|i| i.span))
}
TableConstraint::Index {
display_as_key: _,
name,
@ -702,7 +670,7 @@ impl Spanned for TableConstraint {
} => union_spans(
name.iter()
.map(|i| i.span)
.chain(columns.iter().map(|i| i.span())),
.chain(columns.iter().map(|i| i.span)),
),
TableConstraint::FulltextOrSpatial {
fulltext: _,
@ -713,7 +681,7 @@ impl Spanned for TableConstraint {
opt_index_name
.iter()
.map(|i| i.span)
.chain(columns.iter().map(|i| i.span())),
.chain(columns.iter().map(|i| i.span)),
),
}
}
@ -724,7 +692,7 @@ impl Spanned for CreateIndex {
let CreateIndex {
name,
table_name,
using: _,
using,
columns,
unique: _, // bool
concurrently: _, // bool
@ -739,7 +707,8 @@ impl Spanned for CreateIndex {
name.iter()
.map(|i| i.span())
.chain(core::iter::once(table_name.span()))
.chain(columns.iter().map(|i| i.column.span()))
.chain(using.iter().map(|i| i.span))
.chain(columns.iter().map(|i| i.span()))
.chain(include.iter().map(|i| i.span))
.chain(with.iter().map(|i| i.span()))
.chain(predicate.iter().map(|i| i.span())),
@ -747,98 +716,6 @@ impl Spanned for CreateIndex {
}
}
impl Spanned for IndexColumn {
fn span(&self) -> Span {
self.column.span()
}
}
impl Spanned for CaseStatement {
fn span(&self) -> Span {
let CaseStatement {
case_token: AttachedToken(start),
match_expr: _,
when_blocks: _,
else_block: _,
end_case_token: AttachedToken(end),
} = self;
union_spans([start.span, end.span].into_iter())
}
}
impl Spanned for IfStatement {
fn span(&self) -> Span {
let IfStatement {
if_block,
elseif_blocks,
else_block,
end_token,
} = self;
union_spans(
iter::once(if_block.span())
.chain(elseif_blocks.iter().map(|b| b.span()))
.chain(else_block.as_ref().map(|b| b.span()))
.chain(end_token.as_ref().map(|AttachedToken(t)| t.span)),
)
}
}
impl Spanned for WhileStatement {
fn span(&self) -> Span {
let WhileStatement { while_block } = self;
while_block.span()
}
}
impl Spanned for ConditionalStatements {
fn span(&self) -> Span {
match self {
ConditionalStatements::Sequence { statements } => {
union_spans(statements.iter().map(|s| s.span()))
}
ConditionalStatements::BeginEnd(bes) => bes.span(),
}
}
}
impl Spanned for ConditionalStatementBlock {
fn span(&self) -> Span {
let ConditionalStatementBlock {
start_token: AttachedToken(start_token),
condition,
then_token,
conditional_statements,
} = self;
union_spans(
iter::once(start_token.span)
.chain(condition.as_ref().map(|c| c.span()))
.chain(then_token.as_ref().map(|AttachedToken(t)| t.span))
.chain(iter::once(conditional_statements.span())),
)
}
}
impl Spanned for RaiseStatement {
fn span(&self) -> Span {
let RaiseStatement { value } = self;
union_spans(value.iter().map(|value| value.span()))
}
}
impl Spanned for RaiseStatementValue {
fn span(&self) -> Span {
match self {
RaiseStatementValue::UsingMessage(expr) => expr.span(),
RaiseStatementValue::Expr(expr) => expr.span(),
}
}
}
/// # partial span
///
/// Missing spans:
@ -874,7 +751,6 @@ impl Spanned for ColumnOption {
ColumnOption::Check(expr) => expr.span(),
ColumnOption::DialectSpecific(_) => Span::empty(),
ColumnOption::CharacterSet(object_name) => object_name.span(),
ColumnOption::Collation(object_name) => object_name.span(),
ColumnOption::Comment(_) => Span::empty(),
ColumnOption::OnUpdate(expr) => expr.span(),
ColumnOption::Generated { .. } => Span::empty(),
@ -883,7 +759,6 @@ impl Spanned for ColumnOption {
ColumnOption::OnConflict(..) => Span::empty(),
ColumnOption::Policy(..) => Span::empty(),
ColumnOption::Tags(..) => Span::empty(),
ColumnOption::Srid(..) => Span::empty(),
}
}
}
@ -925,7 +800,6 @@ impl Spanned for AlterColumnOperation {
AlterColumnOperation::SetDataType {
data_type: _,
using,
had_set: _,
} => using.as_ref().map_or(Span::empty(), |u| u.span()),
AlterColumnOperation::AddGenerated { .. } => Span::empty(),
}
@ -993,13 +867,10 @@ impl Spanned for ViewColumnDef {
options,
} = self;
name.span.union_opt(&options.as_ref().map(|o| o.span()))
}
}
impl Spanned for ColumnOptions {
fn span(&self) -> Span {
union_spans(self.as_slice().iter().map(|i| i.span()))
union_spans(
core::iter::once(name.span)
.chain(options.iter().flat_map(|i| i.iter().map(|k| k.span()))),
)
}
}
@ -1016,14 +887,6 @@ impl Spanned for SqlOption {
} => union_spans(
core::iter::once(column_name.span).chain(for_values.iter().map(|i| i.span())),
),
SqlOption::TableSpace(_) => Span::empty(),
SqlOption::Comment(_) => Span::empty(),
SqlOption::NamedParenthesizedList(NamedParenthesizedList {
key: name,
name: value,
values,
}) => union_spans(core::iter::once(name.span).chain(values.iter().map(|i| i.span)))
.union_opt(&value.as_ref().map(|i| i.span)),
}
}
}
@ -1060,11 +923,7 @@ impl Spanned for CreateTableOptions {
match self {
CreateTableOptions::None => Span::empty(),
CreateTableOptions::With(vec) => union_spans(vec.iter().map(|i| i.span())),
CreateTableOptions::Options(vec) => {
union_spans(vec.as_slice().iter().map(|i| i.span()))
}
CreateTableOptions::Plain(vec) => union_spans(vec.iter().map(|i| i.span())),
CreateTableOptions::TableProperties(vec) => union_spans(vec.iter().map(|i| i.span())),
CreateTableOptions::Options(vec) => union_spans(vec.iter().map(|i| i.span())),
}
}
}
@ -1076,10 +935,7 @@ impl Spanned for CreateTableOptions {
impl Spanned for AlterTableOperation {
fn span(&self) -> Span {
match self {
AlterTableOperation::AddConstraint {
constraint,
not_valid: _,
} => constraint.span(),
AlterTableOperation::AddConstraint(table_constraint) => table_constraint.span(),
AlterTableOperation::AddColumn {
column_keyword: _,
if_not_exists: _,
@ -1111,11 +967,10 @@ impl Spanned for AlterTableOperation {
drop_behavior: _,
} => name.span,
AlterTableOperation::DropColumn {
has_column_keyword: _,
column_names,
column_name,
if_exists: _,
drop_behavior: _,
} => union_spans(column_names.iter().map(|i| i.span)),
} => column_name.span,
AlterTableOperation::AttachPartition { partition } => partition.span(),
AlterTableOperation::DetachPartition { partition } => partition.span(),
AlterTableOperation::FreezePartition {
@ -1131,8 +986,6 @@ impl Spanned for AlterTableOperation {
.span()
.union_opt(&with_name.as_ref().map(|n| n.span)),
AlterTableOperation::DropPrimaryKey => Span::empty(),
AlterTableOperation::DropForeignKey { name } => name.span,
AlterTableOperation::DropIndex { name } => name.span,
AlterTableOperation::EnableAlwaysRule { name } => name.span,
AlterTableOperation::EnableAlwaysTrigger { name } => name.span,
AlterTableOperation::EnableReplicaRule { name } => name.span,
@ -1196,11 +1049,6 @@ impl Spanned for AlterTableOperation {
AlterTableOperation::DropClusteringKey => Span::empty(),
AlterTableOperation::SuspendRecluster => Span::empty(),
AlterTableOperation::ResumeRecluster => Span::empty(),
AlterTableOperation::Algorithm { .. } => Span::empty(),
AlterTableOperation::AutoIncrement { value, .. } => value.span(),
AlterTableOperation::Lock { .. } => Span::empty(),
AlterTableOperation::ReplicaIdentity { .. } => Span::empty(),
AlterTableOperation::ValidateConstraint { name } => name.span,
}
}
}
@ -1234,21 +1082,16 @@ impl Spanned for ProjectionSelect {
}
}
/// # partial span
///
/// Missing spans:
/// - [OrderByKind::All]
impl Spanned for OrderBy {
fn span(&self) -> Span {
match &self.kind {
OrderByKind::All(_) => Span::empty(),
OrderByKind::Expressions(exprs) => union_spans(
exprs
.iter()
.map(|i| i.span())
.chain(self.interpolate.iter().map(|i| i.span())),
),
}
let OrderBy { exprs, interpolate } = self;
union_spans(
exprs
.iter()
.map(|i| i.span())
.chain(interpolate.iter().map(|i| i.span())),
)
}
}
@ -1416,6 +1259,7 @@ impl Spanned for AssignmentTarget {
/// f.e. `IS NULL <expr>` reports as `<expr>::span`.
///
/// Missing spans:
/// - [Expr::TypedString]
/// - [Expr::MatchAgainst] # MySQL specific
/// - [Expr::RLike] # MySQL specific
/// - [Expr::Struct] # BigQuery specific
@ -1428,6 +1272,7 @@ impl Spanned for Expr {
match self {
Expr::Identifier(ident) => ident.span,
Expr::CompoundIdentifier(vec) => union_spans(vec.iter().map(|i| i.span)),
Expr::CompositeAccess { expr, key } => expr.span().union(&key.span),
Expr::CompoundFieldAccess { root, access_chain } => {
union_spans(iter::once(root.span()).chain(access_chain.iter().map(|i| i.span())))
}
@ -1507,10 +1352,10 @@ impl Spanned for Expr {
.union_opt(&overlay_for.as_ref().map(|i| i.span())),
Expr::Collate { expr, collation } => expr
.span()
.union(&union_spans(collation.0.iter().map(|i| i.span()))),
.union(&union_spans(collation.0.iter().map(|i| i.span))),
Expr::Nested(expr) => expr.span(),
Expr::Value(value) => value.span(),
Expr::TypedString { value, .. } => value.span(),
Expr::TypedString { .. } => Span::empty(),
Expr::Function(function) => function.span(),
Expr::GroupingSets(vec) => {
union_spans(vec.iter().flat_map(|i| i.iter().map(|k| k.span())))
@ -1565,7 +1410,6 @@ impl Spanned for Expr {
substring_from,
substring_for,
special: _,
shorthand: _,
} => union_spans(
core::iter::once(expr.span())
.chain(substring_from.as_ref().map(|i| i.span()))
@ -1585,26 +1429,20 @@ impl Spanned for Expr {
.map(|items| union_spans(items.iter().map(|i| i.span()))),
),
),
Expr::Prefixed { value, .. } => value.span(),
Expr::IntroducedString { value, .. } => value.span(),
Expr::Case {
case_token,
end_token,
operand,
conditions,
results,
else_result,
} => union_spans(
iter::once(case_token.0.span)
.chain(
operand
.as_ref()
.map(|i| i.span())
.into_iter()
.chain(conditions.iter().flat_map(|case_when| {
[case_when.condition.span(), case_when.result.span()]
}))
.chain(else_result.as_ref().map(|i| i.span())),
)
.chain(iter::once(end_token.0.span)),
operand
.as_ref()
.map(|i| i.span())
.into_iter()
.chain(conditions.iter().map(|i| i.span()))
.chain(results.iter().map(|i| i.span()))
.chain(else_result.as_ref().map(|i| i.span())),
),
Expr::Exists { subquery, .. } => subquery.span(),
Expr::Subquery(query) => query.span(),
@ -1618,13 +1456,13 @@ impl Spanned for Expr {
object_name
.0
.iter()
.map(|i| i.span())
.map(|i| i.span)
.chain(iter::once(token.0.span)),
),
Expr::OuterJoin(expr) => expr.span(),
Expr::Prior(expr) => expr.span(),
Expr::Lambda(_) => Span::empty(),
Expr::MemberOf(member_of) => member_of.value.span().union(&member_of.array.span()),
Expr::Method(_) => Span::empty(),
}
}
}
@ -1663,19 +1501,7 @@ impl Spanned for ObjectName {
fn span(&self) -> Span {
let ObjectName(segments) = self;
union_spans(segments.iter().map(|i| i.span()))
}
}
impl Spanned for ObjectNamePart {
fn span(&self) -> Span {
match self {
ObjectNamePart::Identifier(ident) => ident.span,
ObjectNamePart::Function(func) => func
.name
.span
.union(&union_spans(func.args.iter().map(|i| i.span()))),
}
union_spans(segments.iter().map(|i| i.span))
}
}
@ -1706,7 +1532,7 @@ impl Spanned for Function {
union_spans(
name.0
.iter()
.map(|i| i.span())
.map(|i| i.span)
.chain(iter::once(args.span()))
.chain(iter::once(parameters.span()))
.chain(filter.iter().map(|i| i.span()))
@ -1783,23 +1609,16 @@ impl Spanned for JsonPathElem {
}
}
impl Spanned for SelectItemQualifiedWildcardKind {
fn span(&self) -> Span {
match self {
SelectItemQualifiedWildcardKind::ObjectName(object_name) => object_name.span(),
SelectItemQualifiedWildcardKind::Expr(expr) => expr.span(),
}
}
}
impl Spanned for SelectItem {
fn span(&self) -> Span {
match self {
SelectItem::UnnamedExpr(expr) => expr.span(),
SelectItem::ExprWithAlias { expr, alias } => expr.span().union(&alias.span),
SelectItem::QualifiedWildcard(kind, wildcard_additional_options) => union_spans(
[kind.span()]
.into_iter()
SelectItem::QualifiedWildcard(object_name, wildcard_additional_options) => union_spans(
object_name
.0
.iter()
.map(|i| i.span)
.chain(iter::once(wildcard_additional_options.span())),
),
SelectItem::Wildcard(wildcard_additional_options) => wildcard_additional_options.span(),
@ -1906,11 +1725,10 @@ impl Spanned for TableFactor {
partitions: _,
json_path: _,
sample: _,
index_hints: _,
} => union_spans(
name.0
.iter()
.map(|i| i.span())
.map(|i| i.span)
.chain(alias.as_ref().map(|alias| {
union_spans(
iter::once(alias.name.span)
@ -1955,12 +1773,11 @@ impl Spanned for TableFactor {
} => union_spans(
name.0
.iter()
.map(|i| i.span())
.map(|i| i.span)
.chain(args.iter().map(|i| i.span()))
.chain(alias.as_ref().map(|alias| alias.span())),
),
TableFactor::JsonTable { .. } => Span::empty(),
TableFactor::XmlTable { .. } => Span::empty(),
TableFactor::Pivot {
table,
aggregate_functions,
@ -1979,7 +1796,6 @@ impl Spanned for TableFactor {
TableFactor::Unpivot {
table,
value,
null_inclusion: _,
name,
columns,
alias,
@ -2059,7 +1875,8 @@ impl Spanned for OrderByExpr {
fn span(&self) -> Span {
let OrderByExpr {
expr,
options: _,
asc: _, // bool
nulls_first: _, // bool
with_fill,
} = self;
@ -2107,7 +1924,7 @@ impl Spanned for FunctionArgExpr {
match self {
FunctionArgExpr::Expr(expr) => expr.span(),
FunctionArgExpr::QualifiedWildcard(object_name) => {
union_spans(object_name.0.iter().map(|i| i.span()))
union_spans(object_name.0.iter().map(|i| i.span))
}
FunctionArgExpr::Wildcard => Span::empty(),
}
@ -2130,13 +1947,10 @@ impl Spanned for TableAliasColumnDef {
}
}
impl Spanned for ValueWithSpan {
fn span(&self) -> Span {
self.span
}
}
/// The span is stored in the `ValueWrapper` struct
/// # missing span
///
/// The span of a `Value` is currently not implemented, as doing so
/// requires a breaking changes, which may be done in a future release.
impl Spanned for Value {
fn span(&self) -> Span {
Span::empty() // # todo: Value needs to store spans before this is possible
@ -2164,11 +1978,8 @@ impl Spanned for Join {
impl Spanned for JoinOperator {
fn span(&self) -> Span {
match self {
JoinOperator::Join(join_constraint) => join_constraint.span(),
JoinOperator::Inner(join_constraint) => join_constraint.span(),
JoinOperator::Left(join_constraint) => join_constraint.span(),
JoinOperator::LeftOuter(join_constraint) => join_constraint.span(),
JoinOperator::Right(join_constraint) => join_constraint.span(),
JoinOperator::RightOuter(join_constraint) => join_constraint.span(),
JoinOperator::FullOuter(join_constraint) => join_constraint.span(),
JoinOperator::CrossJoin => Span::empty(),
@ -2184,7 +1995,6 @@ impl Spanned for JoinOperator {
} => match_condition.span().union(&constraint.span()),
JoinOperator::Anti(join_constraint) => join_constraint.span(),
JoinOperator::Semi(join_constraint) => join_constraint.span(),
JoinOperator::StraightJoin(join_constraint) => join_constraint.span(),
}
}
}
@ -2220,7 +2030,6 @@ impl Spanned for Select {
distinct: _, // todo
top: _, // todo, mysql specific
projection,
exclude: _,
into,
from,
lateral_views,
@ -2237,7 +2046,6 @@ impl Spanned for Select {
value_table_mode: _, // todo, BigQuery specific
connect_by,
top_before_distinct: _,
flavor: _,
} = self;
union_spans(
@ -2316,11 +2124,10 @@ impl Spanned for SelectInto {
impl Spanned for UpdateTableFromKind {
fn span(&self) -> Span {
let from = match self {
UpdateTableFromKind::BeforeSet(from) => from,
UpdateTableFromKind::AfterSet(from) => from,
};
union_spans(from.iter().map(|t| t.span()))
match self {
UpdateTableFromKind::BeforeSet(from) => from.span(),
UpdateTableFromKind::AfterSet(from) => from.span(),
}
}
}
@ -2328,35 +2135,13 @@ impl Spanned for TableObject {
fn span(&self) -> Span {
match self {
TableObject::TableName(ObjectName(segments)) => {
union_spans(segments.iter().map(|i| i.span()))
union_spans(segments.iter().map(|i| i.span))
}
TableObject::TableFunction(func) => func.span(),
}
}
}
impl Spanned for BeginEndStatements {
fn span(&self) -> Span {
let BeginEndStatements {
begin_token,
statements,
end_token,
} = self;
union_spans(
core::iter::once(begin_token.0.span)
.chain(statements.iter().map(|i| i.span()))
.chain(core::iter::once(end_token.0.span)),
)
}
}
impl Spanned for OpenStatement {
fn span(&self) -> Span {
let OpenStatement { cursor_name } = self;
cursor_name.span
}
}
#[cfg(test)]
pub mod tests {
use crate::dialect::{Dialect, GenericDialect, SnowflakeDialect};
@ -2496,16 +2281,4 @@ pub mod tests {
assert_eq!(test.get_source(body_span), "SELECT cte.* FROM cte");
}
#[test]
fn test_case_expr_span() {
let dialect = &GenericDialect;
let mut test = SpanTest::new(dialect, "CASE 1 WHEN 2 THEN 3 ELSE 4 END");
let expr = test.0.parse_expr().unwrap();
let expr_span = expr.span();
assert_eq!(
test.get_source(expr_span),
"CASE 1 WHEN 2 THEN 3 ELSE 4 END"
);
}
}

View file

@ -110,7 +110,6 @@ impl fmt::Display for TriggerEvent {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum TriggerPeriod {
For,
After,
Before,
InsteadOf,
@ -119,7 +118,6 @@ pub enum TriggerPeriod {
impl fmt::Display for TriggerPeriod {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
TriggerPeriod::For => write!(f, "FOR"),
TriggerPeriod::After => write!(f, "AFTER"),
TriggerPeriod::Before => write!(f, "BEFORE"),
TriggerPeriod::InsteadOf => write!(f, "INSTEAD OF"),

View file

@ -26,96 +26,14 @@ use bigdecimal::BigDecimal;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::{ast::Ident, tokenizer::Span};
use crate::ast::Ident;
#[cfg(feature = "visitor")]
use sqlparser_derive::{Visit, VisitMut};
/// Wraps a primitive SQL [`Value`] with its [`Span`] location
///
/// # Example: create a `ValueWithSpan` from a `Value`
/// ```
/// # use sqlparser::ast::{Value, ValueWithSpan};
/// # use sqlparser::tokenizer::{Location, Span};
/// let value = Value::SingleQuotedString(String::from("endpoint"));
/// // from line 1, column 1 to line 1, column 7
/// let span = Span::new(Location::new(1, 1), Location::new(1, 7));
/// let value_with_span = value.with_span(span);
/// ```
///
/// # Example: create a `ValueWithSpan` from a `Value` with an empty span
///
/// You can call [`Value::with_empty_span`] to create a `ValueWithSpan` with an empty span
/// ```
/// # use sqlparser::ast::{Value, ValueWithSpan};
/// # use sqlparser::tokenizer::{Location, Span};
/// let value = Value::SingleQuotedString(String::from("endpoint"));
/// let value_with_span = value.with_empty_span();
/// assert_eq!(value_with_span.span, Span::empty());
/// ```
///
/// You can also use the [`From`] trait to convert `ValueWithSpan` to/from `Value`s
/// ```
/// # use sqlparser::ast::{Value, ValueWithSpan};
/// # use sqlparser::tokenizer::{Location, Span};
/// let value = Value::SingleQuotedString(String::from("endpoint"));
/// // converting `Value` to `ValueWithSpan` results in an empty span
/// let value_with_span: ValueWithSpan = value.into();
/// assert_eq!(value_with_span.span, Span::empty());
/// // convert back to `Value`
/// let value: Value = value_with_span.into();
/// ```
#[derive(Debug, Clone, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub struct ValueWithSpan {
pub value: Value,
pub span: Span,
}
impl PartialEq for ValueWithSpan {
fn eq(&self, other: &Self) -> bool {
self.value == other.value
}
}
impl Ord for ValueWithSpan {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.value.cmp(&other.value)
}
}
impl PartialOrd for ValueWithSpan {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(Ord::cmp(self, other))
}
}
impl core::hash::Hash for ValueWithSpan {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
self.value.hash(state);
}
}
impl From<Value> for ValueWithSpan {
fn from(value: Value) -> Self {
value.with_empty_span()
}
}
impl From<ValueWithSpan> for Value {
fn from(value: ValueWithSpan) -> Self {
value.value
}
}
/// Primitive SQL values such as number and string
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "visitor",
derive(Visit, VisitMut),
visit(with = "visit_value")
)]
#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))]
pub enum Value {
/// Numeric literal
#[cfg(not(feature = "bigdecimal"))]
@ -179,53 +97,6 @@ pub enum Value {
Placeholder(String),
}
impl ValueWithSpan {
/// If the underlying literal is a string, regardless of quote style, returns the associated string value
pub fn into_string(self) -> Option<String> {
self.value.into_string()
}
}
impl Value {
/// If the underlying literal is a string, regardless of quote style, returns the associated string value
pub fn into_string(self) -> Option<String> {
match self {
Value::SingleQuotedString(s)
| Value::DoubleQuotedString(s)
| Value::TripleSingleQuotedString(s)
| Value::TripleDoubleQuotedString(s)
| Value::SingleQuotedByteStringLiteral(s)
| Value::DoubleQuotedByteStringLiteral(s)
| Value::TripleSingleQuotedByteStringLiteral(s)
| Value::TripleDoubleQuotedByteStringLiteral(s)
| Value::SingleQuotedRawStringLiteral(s)
| Value::DoubleQuotedRawStringLiteral(s)
| Value::TripleSingleQuotedRawStringLiteral(s)
| Value::TripleDoubleQuotedRawStringLiteral(s)
| Value::EscapedStringLiteral(s)
| Value::UnicodeStringLiteral(s)
| Value::NationalStringLiteral(s)
| Value::HexStringLiteral(s) => Some(s),
Value::DollarQuotedString(s) => Some(s.value),
_ => None,
}
}
pub fn with_span(self, span: Span) -> ValueWithSpan {
ValueWithSpan { value: self, span }
}
pub fn with_empty_span(self) -> ValueWithSpan {
self.with_span(Span::empty())
}
}
impl fmt::Display for ValueWithSpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
@ -455,38 +326,30 @@ impl fmt::Display for EscapeQuotedString<'_> {
// | `"A\"B\"A"` | default | `DoubleQuotedString(String::from("A\"B\"A"))` | `"A""B""A"` |
let quote = self.quote;
let mut previous_char = char::default();
let mut start_idx = 0;
let mut peekable_chars = self.string.char_indices().peekable();
while let Some(&(idx, ch)) = peekable_chars.peek() {
let mut peekable_chars = self.string.chars().peekable();
while let Some(&ch) = peekable_chars.peek() {
match ch {
char if char == quote => {
if previous_char == '\\' {
// the quote is already escaped with a backslash, skip
write!(f, "{char}")?;
peekable_chars.next();
continue;
}
peekable_chars.next();
match peekable_chars.peek() {
Some((_, c)) if *c == quote => {
// the quote is already escaped with another quote, skip
peekable_chars.next();
}
_ => {
// The quote is not escaped.
// Including idx in the range, so the quote at idx will be printed twice:
// in this call to write_str() and in the next one.
f.write_str(&self.string[start_idx..=idx])?;
start_idx = idx;
}
if peekable_chars.peek().map(|c| *c == quote).unwrap_or(false) {
write!(f, "{char}{char}")?;
peekable_chars.next();
} else {
write!(f, "{char}{char}")?;
}
}
_ => {
write!(f, "{ch}")?;
peekable_chars.next();
}
}
previous_char = ch;
}
f.write_str(&self.string[start_idx..])?;
Ok(())
}
}
@ -550,16 +413,16 @@ impl fmt::Display for EscapeUnicodeStringLiteral<'_> {
write!(f, r#"\\"#)?;
}
x if x.is_ascii() => {
write!(f, "{c}")?;
write!(f, "{}", c)?;
}
_ => {
let codepoint = c as u32;
// if the character fits in 32 bits, we can use the \XXXX format
// otherwise, we need to use the \+XXXXXX format
if codepoint <= 0xFFFF {
write!(f, "\\{codepoint:04X}")?;
write!(f, "\\{:04X}", codepoint)?;
} else {
write!(f, "\\+{codepoint:06X}")?;
write!(f, "\\+{:06X}", codepoint)?;
}
}
}

View file

@ -17,7 +17,7 @@
//! Recursive visitors for ast Nodes. See [`Visitor`] for more details.
use crate::ast::{Expr, ObjectName, Query, Statement, TableFactor, Value};
use crate::ast::{Expr, ObjectName, Query, Statement, TableFactor};
use core::ops::ControlFlow;
/// A type that can be visited by a [`Visitor`]. See [`Visitor`] for
@ -233,16 +233,6 @@ pub trait Visitor {
fn post_visit_statement(&mut self, _statement: &Statement) -> ControlFlow<Self::Break> {
ControlFlow::Continue(())
}
/// Invoked for any Value that appear in the AST before visiting children
fn pre_visit_value(&mut self, _value: &Value) -> ControlFlow<Self::Break> {
ControlFlow::Continue(())
}
/// Invoked for any Value that appear in the AST after visiting children
fn post_visit_value(&mut self, _value: &Value) -> ControlFlow<Self::Break> {
ControlFlow::Continue(())
}
}
/// A visitor that can be used to mutate an AST tree.
@ -347,16 +337,6 @@ pub trait VisitorMut {
fn post_visit_statement(&mut self, _statement: &mut Statement) -> ControlFlow<Self::Break> {
ControlFlow::Continue(())
}
/// Invoked for any value that appear in the AST before visiting children
fn pre_visit_value(&mut self, _value: &mut Value) -> ControlFlow<Self::Break> {
ControlFlow::Continue(())
}
/// Invoked for any statements that appear in the AST after visiting children
fn post_visit_value(&mut self, _value: &mut Value) -> ControlFlow<Self::Break> {
ControlFlow::Continue(())
}
}
struct RelationVisitor<F>(F);
@ -423,7 +403,7 @@ where
/// ```
/// # use sqlparser::parser::Parser;
/// # use sqlparser::dialect::GenericDialect;
/// # use sqlparser::ast::{ObjectName, ObjectNamePart, Ident, visit_relations_mut};
/// # use sqlparser::ast::{ObjectName, visit_relations_mut};
/// # use core::ops::ControlFlow;
/// let sql = "SELECT a FROM foo";
/// let mut statements = Parser::parse_sql(&GenericDialect{}, sql)
@ -431,7 +411,7 @@ where
///
/// // visit statements, renaming table foo to bar
/// visit_relations_mut(&mut statements, |table| {
/// table.0[0] = ObjectNamePart::Identifier(Ident::new("bar"));
/// table.0[0].value = table.0[0].value.replace("foo", "bar");
/// ControlFlow::<()>::Continue(())
/// });
///
@ -523,7 +503,7 @@ where
/// // Remove all select limits in sub-queries
/// visit_expressions_mut(&mut statements, |expr| {
/// if let Expr::Subquery(q) = expr {
/// q.limit_clause = None;
/// q.limit = None
/// }
/// ControlFlow::<()>::Continue(())
/// });
@ -547,9 +527,9 @@ where
///
/// visit_expressions_mut(&mut statements, |expr| {
/// if matches!(expr, Expr::Identifier(col_name) if col_name.value == "x") {
/// let old_expr = std::mem::replace(expr, Expr::value(Value::Null));
/// let old_expr = std::mem::replace(expr, Expr::Value(Value::Null));
/// *expr = Expr::Function(Function {
/// name: ObjectName::from(vec![Ident::new("f")]),
/// name: ObjectName(vec![Ident::new("f")]),
/// uses_odbc_syntax: false,
/// args: FunctionArguments::List(FunctionArgumentList {
/// duplicate_treatment: None,
@ -647,7 +627,7 @@ where
/// // Remove all select limits in outer statements (not in sub-queries)
/// visit_statements_mut(&mut statements, |stmt| {
/// if let Statement::Query(q) = stmt {
/// q.limit_clause = None;
/// q.limit = None
/// }
/// ControlFlow::<()>::Continue(())
/// });
@ -667,7 +647,6 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::ast::Statement;
use crate::dialect::GenericDialect;
use crate::parser::Parser;
use crate::tokenizer::Tokenizer;
@ -741,7 +720,7 @@ mod tests {
}
}
fn do_visit<V: Visitor<Break = ()>>(sql: &str, visitor: &mut V) -> Statement {
fn do_visit(sql: &str) -> Vec<String> {
let dialect = GenericDialect {};
let tokens = Tokenizer::new(&dialect, sql).tokenize().unwrap();
let s = Parser::new(&dialect)
@ -749,9 +728,9 @@ mod tests {
.parse_statement()
.unwrap();
let flow = s.visit(visitor);
assert_eq!(flow, ControlFlow::Continue(()));
s
let mut visitor = TestVisitor::default();
s.visit(&mut visitor);
visitor.visited
}
#[test]
@ -910,9 +889,8 @@ mod tests {
),
];
for (sql, expected) in tests {
let mut visitor = TestVisitor::default();
let _ = do_visit(sql, &mut visitor);
let actual: Vec<_> = visitor.visited.iter().map(|x| x.as_str()).collect();
let actual = do_visit(sql);
let actual: Vec<_> = actual.iter().map(|x| x.as_str()).collect();
assert_eq!(actual, expected)
}
}
@ -926,10 +904,10 @@ mod tests {
#[test]
fn overflow() {
let cond = (0..1000)
.map(|n| format!("X = {n}"))
.map(|n| format!("X = {}", n))
.collect::<Vec<_>>()
.join(" OR ");
let sql = format!("SELECT x where {cond}");
let sql = format!("SELECT x where {0}", cond);
let dialect = GenericDialect {};
let tokens = Tokenizer::new(&dialect, sql.as_str()).tokenize().unwrap();
@ -939,72 +917,6 @@ mod tests {
.unwrap();
let mut visitor = QuickVisitor {};
let flow = s.visit(&mut visitor);
assert_eq!(flow, ControlFlow::Continue(()));
}
}
#[cfg(test)]
mod visit_mut_tests {
use crate::ast::{Statement, Value, VisitMut, VisitorMut};
use crate::dialect::GenericDialect;
use crate::parser::Parser;
use crate::tokenizer::Tokenizer;
use core::ops::ControlFlow;
#[derive(Default)]
struct MutatorVisitor {
index: u64,
}
impl VisitorMut for MutatorVisitor {
type Break = ();
fn pre_visit_value(&mut self, value: &mut Value) -> ControlFlow<Self::Break> {
self.index += 1;
*value = Value::SingleQuotedString(format!("REDACTED_{}", self.index));
ControlFlow::Continue(())
}
fn post_visit_value(&mut self, _value: &mut Value) -> ControlFlow<Self::Break> {
ControlFlow::Continue(())
}
}
fn do_visit_mut<V: VisitorMut<Break = ()>>(sql: &str, visitor: &mut V) -> Statement {
let dialect = GenericDialect {};
let tokens = Tokenizer::new(&dialect, sql).tokenize().unwrap();
let mut s = Parser::new(&dialect)
.with_tokens(tokens)
.parse_statement()
.unwrap();
let flow = s.visit(visitor);
assert_eq!(flow, ControlFlow::Continue(()));
s
}
#[test]
fn test_value_redact() {
let tests = vec![
(
concat!(
"SELECT * FROM monthly_sales ",
"PIVOT(SUM(a.amount) FOR a.MONTH IN ('JAN', 'FEB', 'MAR', 'APR')) AS p (c, d) ",
"ORDER BY EMPID"
),
concat!(
"SELECT * FROM monthly_sales ",
"PIVOT(SUM(a.amount) FOR a.MONTH IN ('REDACTED_1', 'REDACTED_2', 'REDACTED_3', 'REDACTED_4')) AS p (c, d) ",
"ORDER BY EMPID"
),
),
];
for (sql, expected) in tests {
let mut visitor = MutatorVisitor::default();
let mutated = do_visit_mut(sql, &mut visitor);
assert_eq!(mutated.to_string(), expected)
}
s.visit(&mut visitor);
}
}

View file

@ -15,45 +15,14 @@
// specific language governing permissions and limitations
// under the License.
use crate::ast::Statement;
use crate::dialect::Dialect;
use crate::keywords::Keyword;
use crate::parser::{Parser, ParserError};
/// These keywords are disallowed as column identifiers. Such that
/// `SELECT 5 AS <col> FROM T` is rejected by BigQuery.
const RESERVED_FOR_COLUMN_ALIAS: &[Keyword] = &[
Keyword::WITH,
Keyword::SELECT,
Keyword::WHERE,
Keyword::GROUP,
Keyword::HAVING,
Keyword::ORDER,
Keyword::LATERAL,
Keyword::LIMIT,
Keyword::FETCH,
Keyword::UNION,
Keyword::EXCEPT,
Keyword::INTERSECT,
Keyword::FROM,
Keyword::INTO,
Keyword::END,
];
/// A [`Dialect`] for [Google Bigquery](https://cloud.google.com/bigquery/)
#[derive(Debug, Default)]
pub struct BigQueryDialect;
impl Dialect for BigQueryDialect {
fn parse_statement(&self, parser: &mut Parser) -> Option<Result<Statement, ParserError>> {
if parser.parse_keyword(Keyword::BEGIN) {
return Some(parser.parse_begin_exception_end());
}
None
}
/// See <https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#identifiers>
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#identifiers
fn is_delimited_identifier_start(&self, ch: char) -> bool {
ch == '`'
}
@ -62,16 +31,8 @@ impl Dialect for BigQueryDialect {
true
}
/// See <https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement>
fn supports_column_definition_trailing_commas(&self) -> bool {
true
}
fn is_identifier_start(&self, ch: char) -> bool {
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_'
// BigQuery supports `@@foo.bar` variable syntax in its procedural language.
// https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#beginexceptionend
|| ch == '@'
}
fn is_identifier_part(&self, ch: char) -> bool {
@ -117,31 +78,8 @@ impl Dialect for BigQueryDialect {
true
}
/// See <https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_expression_star>
fn supports_select_expr_star(&self) -> bool {
true
}
/// See <https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#execute_immediate>
fn supports_execute_immediate(&self) -> bool {
true
}
// See <https://cloud.google.com/bigquery/docs/access-historical-data>
fn supports_timestamp_versioning(&self) -> bool {
true
}
// See <https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#group_by_clause>
fn supports_group_by_expr(&self) -> bool {
true
}
fn is_column_alias(&self, kw: &Keyword, _parser: &mut Parser) -> bool {
!RESERVED_FOR_COLUMN_ALIAS.contains(kw)
}
fn supports_pipe_operator(&self) -> bool {
true
}
}

View file

@ -59,10 +59,6 @@ impl Dialect for ClickHouseDialect {
true
}
fn supports_numeric_literal_underscores(&self) -> bool {
true
}
// ClickHouse uses this for some FORMAT expressions in `INSERT` context, e.g. when inserting
// with FORMAT JSONEachRow a raw JSON key-value expression is valid and expected.
//
@ -70,28 +66,4 @@ impl Dialect for ClickHouseDialect {
fn supports_dictionary_syntax(&self) -> bool {
true
}
/// See <https://clickhouse.com/docs/en/sql-reference/functions#higher-order-functions---operator-and-lambdaparams-expr-function>
fn supports_lambda_functions(&self) -> bool {
true
}
fn supports_from_first_select(&self) -> bool {
true
}
/// See <https://clickhouse.com/docs/en/sql-reference/statements/select/order-by>
fn supports_order_by_all(&self) -> bool {
true
}
// See <https://clickhouse.com/docs/en/sql-reference/aggregate-functions/grouping_function#grouping-sets>
fn supports_group_by_expr(&self) -> bool {
true
}
/// See <https://clickhouse.com/docs/en/sql-reference/statements/select/group-by#rollup-modifier>
fn supports_group_by_with_modifier(&self) -> bool {
true
}
}

View file

@ -65,11 +65,6 @@ impl Dialect for DuckDbDialect {
true
}
/// See <https://duckdb.org/docs/sql/functions/lambda.html>
fn supports_lambda_functions(&self) -> bool {
true
}
// DuckDB is compatible with PostgreSQL syntax for this statement,
// although not all features may be implemented.
fn supports_explain_with_utility_options(&self) -> bool {
@ -80,22 +75,4 @@ impl Dialect for DuckDbDialect {
fn supports_load_extension(&self) -> bool {
true
}
// See DuckDB <https://duckdb.org/docs/sql/data_types/array.html#defining-an-array-field>
fn supports_array_typedef_with_brackets(&self) -> bool {
true
}
fn supports_from_first_select(&self) -> bool {
true
}
/// See DuckDB <https://duckdb.org/docs/sql/query_syntax/orderby.html#order-by-all-examples>
fn supports_order_by_all(&self) -> bool {
true
}
fn supports_select_wildcard_exclude(&self) -> bool {
true
}
}

View file

@ -48,14 +48,6 @@ impl Dialect for GenericDialect {
true
}
fn supports_group_by_with_modifier(&self) -> bool {
true
}
fn supports_left_associative_joins_without_parens(&self) -> bool {
true
}
fn supports_connect_by(&self) -> bool {
true
}
@ -112,14 +104,6 @@ impl Dialect for GenericDialect {
true
}
fn supports_from_first_select(&self) -> bool {
true
}
fn supports_projection_trailing_commas(&self) -> bool {
true
}
fn supports_asc_desc_in_column_definition(&self) -> bool {
true
}
@ -155,32 +139,4 @@ impl Dialect for GenericDialect {
fn supports_user_host_grantee(&self) -> bool {
true
}
fn supports_string_escape_constant(&self) -> bool {
true
}
fn supports_array_typedef_with_brackets(&self) -> bool {
true
}
fn supports_match_against(&self) -> bool {
true
}
fn supports_set_names(&self) -> bool {
true
}
fn supports_comma_separated_set_assignments(&self) -> bool {
true
}
fn supports_filter_during_aggregation(&self) -> bool {
true
}
fn supports_select_wildcard_exclude(&self) -> bool {
true
}
}

View file

@ -52,23 +52,18 @@ impl Dialect for HiveDialect {
true
}
/// See <https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=27362061#Tutorial-BuiltInOperators>
/// See Hive <https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=27362061#Tutorial-BuiltInOperators>
fn supports_bang_not_operator(&self) -> bool {
true
}
/// See <https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=27362036#LanguageManualDML-Loadingfilesintotables>
/// See Hive <https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=27362036#LanguageManualDML-Loadingfilesintotables>
fn supports_load_data(&self) -> bool {
true
}
/// See <https://cwiki.apache.org/confluence/display/hive/languagemanual+sampling>
/// See Hive <https://cwiki.apache.org/confluence/display/hive/languagemanual+sampling>
fn supports_table_sample_before_alias(&self) -> bool {
true
}
/// See <https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=30151323#EnhancedAggregation,Cube,GroupingandRollup-CubesandRollupsr>
fn supports_group_by_with_modifier(&self) -> bool {
true
}
}

View file

@ -49,7 +49,7 @@ pub use self::postgresql::PostgreSqlDialect;
pub use self::redshift::RedshiftSqlDialect;
pub use self::snowflake::SnowflakeDialect;
pub use self::sqlite::SQLiteDialect;
use crate::ast::{ColumnOption, Expr, GranteesType, Ident, ObjectNamePart, Statement};
use crate::ast::{ColumnOption, Expr, Statement};
pub use crate::keywords;
use crate::keywords::Keyword;
use crate::parser::{Parser, ParserError};
@ -201,33 +201,6 @@ pub trait Dialect: Debug + Any {
false
}
/// Determine whether the dialect strips the backslash when escaping LIKE wildcards (%, _).
///
/// [MySQL] has a special case when escaping single quoted strings which leaves these unescaped
/// so they can be used in LIKE patterns without double-escaping (as is necessary in other
/// escaping dialects, such as [Snowflake]). Generally, special characters have escaping rules
/// causing them to be replaced with a different byte sequences (e.g. `'\0'` becoming the zero
/// byte), and the default if an escaped character does not have a specific escaping rule is to
/// strip the backslash (e.g. there is no rule for `h`, so `'\h' = 'h'`). MySQL's special case
/// for ignoring LIKE wildcard escapes is to *not* strip the backslash, so that `'\%' = '\\%'`.
/// This applies to all string literals though, not just those used in LIKE patterns.
///
/// ```text
/// mysql> select '\_', hex('\\'), hex('_'), hex('\_');
/// +----+-----------+----------+-----------+
/// | \_ | hex('\\') | hex('_') | hex('\_') |
/// +----+-----------+----------+-----------+
/// | \_ | 5C | 5F | 5C5F |
/// +----+-----------+----------+-----------+
/// 1 row in set (0.00 sec)
/// ```
///
/// [MySQL]: https://dev.mysql.com/doc/refman/8.4/en/string-literals.html
/// [Snowflake]: https://docs.snowflake.com/en/sql-reference/functions/like#usage-notes
fn ignores_wildcard_escapes(&self) -> bool {
false
}
/// Determine if the dialect supports string literals with `U&` prefix.
/// This is used to specify Unicode code points in string literals.
/// For example, in PostgreSQL, the following is a valid string literal:
@ -272,55 +245,11 @@ pub trait Dialect: Debug + Any {
false
}
/// Returns true if the dialects supports `GROUP BY` modifiers prefixed by a `WITH` keyword.
/// Example: `GROUP BY value WITH ROLLUP`.
fn supports_group_by_with_modifier(&self) -> bool {
false
}
/// Indicates whether the dialect supports left-associative join parsing
/// by default when parentheses are omitted in nested joins.
///
/// Most dialects (like MySQL or Postgres) assume **left-associative** precedence,
/// so a query like:
///
/// ```sql
/// SELECT * FROM t1 NATURAL JOIN t5 INNER JOIN t0 ON ...
/// ```
/// is interpreted as:
/// ```sql
/// ((t1 NATURAL JOIN t5) INNER JOIN t0 ON ...)
/// ```
/// and internally represented as a **flat list** of joins.
///
/// In contrast, some dialects (e.g. **Snowflake**) assume **right-associative**
/// precedence and interpret the same query as:
/// ```sql
/// (t1 NATURAL JOIN (t5 INNER JOIN t0 ON ...))
/// ```
/// which results in a **nested join** structure in the AST.
///
/// If this method returns `false`, the parser must build nested join trees
/// even in the absence of parentheses to reflect the correct associativity
fn supports_left_associative_joins_without_parens(&self) -> bool {
true
}
/// Returns true if the dialect supports the `(+)` syntax for OUTER JOIN.
fn supports_outer_join_operator(&self) -> bool {
false
}
/// Returns true if the dialect supports CONNECT BY.
fn supports_connect_by(&self) -> bool {
false
}
/// Returns true if the dialect supports `EXECUTE IMMEDIATE` statements.
fn supports_execute_immediate(&self) -> bool {
false
}
/// Returns true if the dialect supports the MATCH_RECOGNIZE operation.
fn supports_match_recognize(&self) -> bool {
false
@ -375,11 +304,6 @@ pub trait Dialect: Debug + Any {
false
}
/// Returns true if the dialect supports numbers containing underscores, e.g. `10_000_000`
fn supports_numeric_literal_underscores(&self) -> bool {
false
}
/// Returns true if the dialects supports specifying null treatment
/// as part of a window function's parameter list as opposed
/// to after the parameter list.
@ -417,6 +341,15 @@ pub trait Dialect: Debug + Any {
false
}
/// Returns true if the dialect supports method calls, for example:
///
/// ```sql
/// SELECT (SELECT ',' + name FROM sys.objects FOR XML PATH(''), TYPE).value('.','NVARCHAR(MAX)')
/// ```
fn supports_methods(&self) -> bool {
false
}
/// Returns true if the dialect supports multiple variable assignment
/// using parentheses in a `SET` variable declaration.
///
@ -427,16 +360,6 @@ pub trait Dialect: Debug + Any {
false
}
/// Returns true if the dialect supports multiple `SET` statements
/// in a single statement.
///
/// ```sql
/// SET variable = expression [, variable = expression];
/// ```
fn supports_comma_separated_set_assignments(&self) -> bool {
false
}
/// Returns true if the dialect supports an `EXCEPT` clause following a
/// wildcard in a select list.
///
@ -482,18 +405,11 @@ pub trait Dialect: Debug + Any {
}
/// Returns true if the dialect supports trailing commas in the `FROM` clause of a `SELECT` statement.
/// Example: `SELECT 1 FROM T, U, LIMIT 1`
/// /// Example: `SELECT 1 FROM T, U, LIMIT 1`
fn supports_from_trailing_commas(&self) -> bool {
false
}
/// Returns true if the dialect supports trailing commas in the
/// column definitions list of a `CREATE` statement.
/// Example: `CREATE TABLE T (x INT, y TEXT,)`
fn supports_column_definition_trailing_commas(&self) -> bool {
false
}
/// Returns true if the dialect supports double dot notation for object names
///
/// Example
@ -524,72 +440,11 @@ pub trait Dialect: Debug + Any {
false
}
/// Return true if the dialect supports wildcard expansion on
/// arbitrary expressions in projections.
///
/// Example:
/// ```sql
/// SELECT STRUCT<STRING>('foo').* FROM T
/// ```
fn supports_select_expr_star(&self) -> bool {
false
}
/// Return true if the dialect supports "FROM-first" selects.
///
/// Example:
/// ```sql
/// FROM table
/// SELECT *
/// ```
fn supports_from_first_select(&self) -> bool {
false
}
/// Return true if the dialect supports pipe operator.
///
/// Example:
/// ```sql
/// SELECT *
/// FROM table
/// |> limit 1
/// ```
///
/// See <https://cloud.google.com/bigquery/docs/pipe-syntax-guide#basic_syntax>
fn supports_pipe_operator(&self) -> bool {
false
}
/// Does the dialect support MySQL-style `'user'@'host'` grantee syntax?
fn supports_user_host_grantee(&self) -> bool {
false
}
/// Does the dialect support the `MATCH() AGAINST()` syntax?
fn supports_match_against(&self) -> bool {
false
}
/// Returns true if the dialect supports an exclude option
/// following a wildcard in the projection section. For example:
/// `SELECT * EXCLUDE col1 FROM tbl`.
///
/// [Redshift](https://docs.aws.amazon.com/redshift/latest/dg/r_EXCLUDE_list.html)
/// [Snowflake](https://docs.snowflake.com/en/sql-reference/sql/select)
fn supports_select_wildcard_exclude(&self) -> bool {
false
}
/// Returns true if the dialect supports an exclude option
/// as the last item in the projection section, not necessarily
/// after a wildcard. For example:
/// `SELECT *, c1, c2 EXCLUDE c3 FROM tbl`
///
/// [Redshift](https://docs.aws.amazon.com/redshift/latest/dg/r_EXCLUDE_list.html)
fn supports_select_exclude(&self) -> bool {
false
}
/// Dialect-specific infix parser override
///
/// This method is called to parse the next infix expression.
@ -635,7 +490,7 @@ pub trait Dialect: Debug + Any {
}
let token = parser.peek_token();
debug!("get_next_precedence_full() {token:?}");
debug!("get_next_precedence_full() {:?}", token);
match token.token {
Token::Word(w) if w.keyword == Keyword::OR => Ok(p!(Or)),
Token::Word(w) if w.keyword == Keyword::AND => Ok(p!(And)),
@ -667,9 +522,7 @@ pub trait Dialect: Debug + Any {
Token::Word(w) if w.keyword == Keyword::ILIKE => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::RLIKE => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::REGEXP => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::MATCH => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::SIMILAR => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::MEMBER => Ok(p!(Like)),
_ => Ok(self.prec_unknown()),
},
Token::Word(w) if w.keyword == Keyword::IS => Ok(p!(Is)),
@ -680,14 +533,10 @@ pub trait Dialect: Debug + Any {
Token::Word(w) if w.keyword == Keyword::ILIKE => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::RLIKE => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::REGEXP => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::MATCH => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::SIMILAR => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::MEMBER => Ok(p!(Like)),
Token::Word(w) if w.keyword == Keyword::OPERATOR => Ok(p!(Between)),
Token::Word(w) if w.keyword == Keyword::DIV => Ok(p!(MulDivModOp)),
Token::Period => Ok(p!(Period)),
Token::Assignment
| Token::Eq
Token::Eq
| Token::Lt
| Token::LtEq
| Token::Neq
@ -703,34 +552,18 @@ pub trait Dialect: Debug + Any {
| Token::ExclamationMarkDoubleTilde
| Token::ExclamationMarkDoubleTildeAsterisk
| Token::Spaceship => Ok(p!(Eq)),
Token::Pipe
| Token::QuestionMarkDash
| Token::DoubleSharp
| Token::Overlap
| Token::AmpersandLeftAngleBracket
| Token::AmpersandRightAngleBracket
| Token::QuestionMarkDashVerticalBar
| Token::AmpersandLeftAngleBracketVerticalBar
| Token::VerticalBarAmpersandRightAngleBracket
| Token::TwoWayArrow
| Token::LeftAngleBracketCaret
| Token::RightAngleBracketCaret
| Token::QuestionMarkSharp
| Token::QuestionMarkDoubleVerticalBar
| Token::QuestionPipe
| Token::TildeEqual
| Token::AtSign
| Token::ShiftLeftVerticalBar
| Token::VerticalBarShiftRight => Ok(p!(Pipe)),
Token::Pipe => Ok(p!(Pipe)),
Token::Caret | Token::Sharp | Token::ShiftRight | Token::ShiftLeft => Ok(p!(Caret)),
Token::Ampersand => Ok(p!(Ampersand)),
Token::Plus | Token::Minus => Ok(p!(PlusMinus)),
Token::Mul | Token::Div | Token::DuckIntDiv | Token::Mod | Token::StringConcat => {
Ok(p!(MulDivModOp))
}
Token::DoubleColon | Token::ExclamationMark | Token::LBracket | Token::CaretAt => {
Ok(p!(DoubleColon))
}
Token::DoubleColon
| Token::ExclamationMark
| Token::LBracket
| Token::Overlap
| Token::CaretAt => Ok(p!(DoubleColon)),
Token::Arrow
| Token::LongArrow
| Token::HashArrow
@ -742,6 +575,7 @@ pub trait Dialect: Debug + Any {
| Token::AtAt
| Token::Question
| Token::QuestionAnd
| Token::QuestionPipe
| Token::CustomBinaryOperator(_) => Ok(p!(PgOther)),
_ => Ok(self.prec_unknown()),
}
@ -775,7 +609,6 @@ pub trait Dialect: Debug + Any {
/// Uses (APPROXIMATELY) <https://www.postgresql.org/docs/7.0/operators.htm#AEN2026> as a reference
fn prec_value(&self, prec: Precedence) -> u8 {
match prec {
Precedence::Period => 100,
Precedence::DoubleColon => 50,
Precedence::AtTz => 41,
Precedence::MulDivModOp => 40,
@ -948,23 +781,12 @@ pub trait Dialect: Debug + Any {
keywords::RESERVED_FOR_IDENTIFIER.contains(&kw)
}
/// Returns reserved keywords when looking to parse a `TableFactor`.
// Returns reserved keywords when looking to parse a [TableFactor].
/// See [Self::supports_from_trailing_commas]
fn get_reserved_keywords_for_table_factor(&self) -> &[Keyword] {
keywords::RESERVED_FOR_TABLE_FACTOR
}
/// Returns reserved keywords that may prefix a select item expression
/// e.g. `SELECT CONNECT_BY_ROOT name FROM Tbl2` (Snowflake)
fn get_reserved_keywords_for_select_item_operator(&self) -> &[Keyword] {
&[]
}
/// Returns grantee types that should be treated as identifiers
fn get_reserved_grantees_types(&self) -> &[GranteesType] {
&[]
}
/// Returns true if this dialect supports the `TABLESAMPLE` option
/// before the table alias option. For example:
///
@ -999,30 +821,18 @@ pub trait Dialect: Debug + Any {
false
}
/// Returns true if the specified keyword should be parsed as a column identifier.
/// See [keywords::RESERVED_FOR_COLUMN_ALIAS]
fn is_column_alias(&self, kw: &Keyword, _parser: &mut Parser) -> bool {
!keywords::RESERVED_FOR_COLUMN_ALIAS.contains(kw)
}
/// Returns true if the specified keyword should be parsed as a select item alias.
/// When explicit is true, the keyword is preceded by an `AS` word. Parser is provided
/// to enable looking ahead if needed.
fn is_select_item_alias(&self, explicit: bool, kw: &Keyword, parser: &mut Parser) -> bool {
explicit || self.is_column_alias(kw, parser)
}
/// Returns true if the specified keyword should be parsed as a table identifier.
/// See [keywords::RESERVED_FOR_TABLE_ALIAS]
fn is_table_alias(&self, kw: &Keyword, _parser: &mut Parser) -> bool {
!keywords::RESERVED_FOR_TABLE_ALIAS.contains(kw)
fn is_select_item_alias(&self, explicit: bool, kw: &Keyword, _parser: &mut Parser) -> bool {
explicit || !keywords::RESERVED_FOR_COLUMN_ALIAS.contains(kw)
}
/// Returns true if the specified keyword should be parsed as a table factor alias.
/// When explicit is true, the keyword is preceded by an `AS` word. Parser is provided
/// to enable looking ahead if needed.
fn is_table_factor_alias(&self, explicit: bool, kw: &Keyword, parser: &mut Parser) -> bool {
explicit || self.is_table_alias(kw, parser)
fn is_table_factor_alias(&self, explicit: bool, kw: &Keyword, _parser: &mut Parser) -> bool {
explicit || !keywords::RESERVED_FOR_TABLE_ALIAS.contains(kw)
}
/// Returns true if this dialect supports querying historical table data
@ -1030,91 +840,6 @@ pub trait Dialect: Debug + Any {
fn supports_timestamp_versioning(&self) -> bool {
false
}
/// Returns true if this dialect supports the E'...' syntax for string literals
///
/// Postgres: <https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-ESCAPE>
fn supports_string_escape_constant(&self) -> bool {
false
}
/// Returns true if the dialect supports the table hints in the `FROM` clause.
fn supports_table_hints(&self) -> bool {
false
}
/// Returns true if this dialect requires a whitespace character after `--` to start a single line comment.
///
/// MySQL: <https://dev.mysql.com/doc/refman/8.4/en/ansi-diff-comments.html>
/// e.g. UPDATE account SET balance=balance--1
// WHERE account_id=5752 ^^^ will be interpreted as two minus signs instead of a comment
fn requires_single_line_comment_whitespace(&self) -> bool {
false
}
/// Returns true if the dialect supports array type definition with brackets with
/// an optional size. For example:
/// ```CREATE TABLE my_table (arr1 INT[], arr2 INT[3])```
/// ```SELECT x::INT[]```
fn supports_array_typedef_with_brackets(&self) -> bool {
false
}
/// Returns true if the dialect supports geometric types.
///
/// Postgres: <https://www.postgresql.org/docs/9.5/functions-geometry.html>
/// e.g. @@ circle '((0,0),10)'
fn supports_geometric_types(&self) -> bool {
false
}
/// Returns true if the dialect supports `ORDER BY ALL`.
/// `ALL` which means all columns of the SELECT clause.
///
/// For example: ```SELECT * FROM addresses ORDER BY ALL;```.
fn supports_order_by_all(&self) -> bool {
false
}
/// Returns true if the dialect supports `SET NAMES <charset_name> [COLLATE <collation_name>]`.
///
/// - [MySQL](https://dev.mysql.com/doc/refman/8.4/en/set-names.html)
/// - [PostgreSQL](https://www.postgresql.org/docs/17/sql-set.html)
///
/// Note: Postgres doesn't support the `COLLATE` clause, but we permissively parse it anyway.
fn supports_set_names(&self) -> bool {
false
}
fn supports_space_separated_column_options(&self) -> bool {
false
}
/// Returns true if the dialect supports the `USING` clause in an `ALTER COLUMN` statement.
/// Example:
/// ```sql
/// ALTER TABLE tbl ALTER COLUMN col SET DATA TYPE <type> USING <exp>`
/// ```
fn supports_alter_column_type_using(&self) -> bool {
false
}
/// Returns true if the dialect supports `ALTER TABLE tbl DROP COLUMN c1, ..., cn`
fn supports_comma_separated_drop_column_list(&self) -> bool {
false
}
/// Returns true if the dialect considers the specified ident as a function
/// that returns an identifier. Typically used to generate identifiers
/// programmatically.
///
/// - [Snowflake](https://docs.snowflake.com/en/sql-reference/identifier-literal)
fn is_identifier_generating_function_name(
&self,
_ident: &Ident,
_name_parts: &[ObjectNamePart],
) -> bool {
false
}
}
/// This represents the operators for which precedence must be defined
@ -1122,7 +847,6 @@ pub trait Dialect: Debug + Any {
/// higher number -> higher precedence
#[derive(Debug, Clone, Copy)]
pub enum Precedence {
Period,
DoubleColon,
AtTz,
MulDivModOp,

View file

@ -15,19 +15,7 @@
// specific language governing permissions and limitations
// under the License.
use crate::ast::helpers::attached_token::AttachedToken;
use crate::ast::{
BeginEndStatements, ConditionalStatementBlock, ConditionalStatements, GranteesType,
IfStatement, Statement, TriggerObject,
};
use crate::dialect::Dialect;
use crate::keywords::{self, Keyword};
use crate::parser::{Parser, ParserError};
use crate::tokenizer::Token;
#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
const RESERVED_FOR_COLUMN_ALIAS: &[Keyword] = &[Keyword::IF, Keyword::ELSE];
/// A [`Dialect`] for [Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server/)
#[derive(Debug)]
@ -52,20 +40,12 @@ impl Dialect for MsSqlDialect {
|| ch == '_'
}
fn identifier_quote_style(&self, _identifier: &str) -> Option<char> {
Some('[')
}
/// SQL Server has `CONVERT(type, value)` instead of `CONVERT(value, type)`
/// <https://learn.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql?view=sql-server-ver16>
fn convert_type_before_value(&self) -> bool {
true
}
fn supports_outer_join_operator(&self) -> bool {
true
}
fn supports_connect_by(&self) -> bool {
true
}
@ -83,6 +63,10 @@ impl Dialect for MsSqlDialect {
false
}
fn supports_methods(&self) -> bool {
true
}
fn supports_named_fn_args_with_colon_operator(&self) -> bool {
true
}
@ -98,7 +82,6 @@ impl Dialect for MsSqlDialect {
fn supports_start_transaction_modifier(&self) -> bool {
true
}
fn supports_end_transaction_modifier(&self) -> bool {
true
}
@ -112,187 +95,4 @@ impl Dialect for MsSqlDialect {
fn supports_timestamp_versioning(&self) -> bool {
true
}
/// See <https://learn.microsoft.com/en-us/sql/t-sql/language-elements/slash-star-comment-transact-sql?view=sql-server-ver16>
fn supports_nested_comments(&self) -> bool {
true
}
/// See <https://learn.microsoft.com/en-us/sql/t-sql/queries/from-transact-sql>
fn supports_object_name_double_dot_notation(&self) -> bool {
true
}
/// See <https://learn.microsoft.com/en-us/sql/relational-databases/security/authentication-access/server-level-roles>
fn get_reserved_grantees_types(&self) -> &[GranteesType] {
&[GranteesType::Public]
}
fn is_column_alias(&self, kw: &Keyword, _parser: &mut Parser) -> bool {
!keywords::RESERVED_FOR_COLUMN_ALIAS.contains(kw) && !RESERVED_FOR_COLUMN_ALIAS.contains(kw)
}
fn parse_statement(&self, parser: &mut Parser) -> Option<Result<Statement, ParserError>> {
if parser.peek_keyword(Keyword::IF) {
Some(self.parse_if_stmt(parser))
} else if parser.parse_keywords(&[Keyword::CREATE, Keyword::TRIGGER]) {
Some(self.parse_create_trigger(parser, false))
} else if parser.parse_keywords(&[
Keyword::CREATE,
Keyword::OR,
Keyword::ALTER,
Keyword::TRIGGER,
]) {
Some(self.parse_create_trigger(parser, true))
} else {
None
}
}
}
impl MsSqlDialect {
/// ```sql
/// IF boolean_expression
/// { sql_statement | statement_block }
/// [ ELSE
/// { sql_statement | statement_block } ]
/// ```
fn parse_if_stmt(&self, parser: &mut Parser) -> Result<Statement, ParserError> {
let if_token = parser.expect_keyword(Keyword::IF)?;
let condition = parser.parse_expr()?;
let if_block = if parser.peek_keyword(Keyword::BEGIN) {
let begin_token = parser.expect_keyword(Keyword::BEGIN)?;
let statements = self.parse_statement_list(parser, Some(Keyword::END))?;
let end_token = parser.expect_keyword(Keyword::END)?;
ConditionalStatementBlock {
start_token: AttachedToken(if_token),
condition: Some(condition),
then_token: None,
conditional_statements: ConditionalStatements::BeginEnd(BeginEndStatements {
begin_token: AttachedToken(begin_token),
statements,
end_token: AttachedToken(end_token),
}),
}
} else {
let stmt = parser.parse_statement()?;
ConditionalStatementBlock {
start_token: AttachedToken(if_token),
condition: Some(condition),
then_token: None,
conditional_statements: ConditionalStatements::Sequence {
statements: vec![stmt],
},
}
};
let mut prior_statement_ended_with_semi_colon = false;
while let Token::SemiColon = parser.peek_token_ref().token {
parser.advance_token();
prior_statement_ended_with_semi_colon = true;
}
let mut else_block = None;
if parser.peek_keyword(Keyword::ELSE) {
let else_token = parser.expect_keyword(Keyword::ELSE)?;
if parser.peek_keyword(Keyword::BEGIN) {
let begin_token = parser.expect_keyword(Keyword::BEGIN)?;
let statements = self.parse_statement_list(parser, Some(Keyword::END))?;
let end_token = parser.expect_keyword(Keyword::END)?;
else_block = Some(ConditionalStatementBlock {
start_token: AttachedToken(else_token),
condition: None,
then_token: None,
conditional_statements: ConditionalStatements::BeginEnd(BeginEndStatements {
begin_token: AttachedToken(begin_token),
statements,
end_token: AttachedToken(end_token),
}),
});
} else {
let stmt = parser.parse_statement()?;
else_block = Some(ConditionalStatementBlock {
start_token: AttachedToken(else_token),
condition: None,
then_token: None,
conditional_statements: ConditionalStatements::Sequence {
statements: vec![stmt],
},
});
}
} else if prior_statement_ended_with_semi_colon {
parser.prev_token();
}
Ok(Statement::If(IfStatement {
if_block,
else_block,
elseif_blocks: Vec::new(),
end_token: None,
}))
}
/// Parse `CREATE TRIGGER` for [MsSql]
///
/// [MsSql]: https://learn.microsoft.com/en-us/sql/t-sql/statements/create-trigger-transact-sql
fn parse_create_trigger(
&self,
parser: &mut Parser,
or_alter: bool,
) -> Result<Statement, ParserError> {
let name = parser.parse_object_name(false)?;
parser.expect_keyword_is(Keyword::ON)?;
let table_name = parser.parse_object_name(false)?;
let period = parser.parse_trigger_period()?;
let events = parser.parse_comma_separated(Parser::parse_trigger_event)?;
parser.expect_keyword_is(Keyword::AS)?;
let statements = Some(parser.parse_conditional_statements(&[Keyword::END])?);
Ok(Statement::CreateTrigger {
or_alter,
or_replace: false,
is_constraint: false,
name,
period,
events,
table_name,
referenced_table_name: None,
referencing: Vec::new(),
trigger_object: TriggerObject::Statement,
include_each: false,
condition: None,
exec_body: None,
statements,
characteristics: None,
})
}
/// Parse a sequence of statements, optionally separated by semicolon.
///
/// Stops parsing when reaching EOF or the given keyword.
fn parse_statement_list(
&self,
parser: &mut Parser,
terminal_keyword: Option<Keyword>,
) -> Result<Vec<Statement>, ParserError> {
let mut stmts = Vec::new();
loop {
if let Token::EOF = parser.peek_token_ref().token {
break;
}
if let Some(term) = terminal_keyword {
if parser.peek_keyword(term) {
break;
}
}
stmts.push(parser.parse_statement()?);
while let Token::SemiColon = parser.peek_token_ref().token {
parser.advance_token();
}
}
Ok(stmts)
}
}

View file

@ -25,15 +25,6 @@ use crate::{
parser::{Parser, ParserError},
};
use super::keywords;
const RESERVED_FOR_TABLE_ALIAS_MYSQL: &[Keyword] = &[
Keyword::USE,
Keyword::IGNORE,
Keyword::FORCE,
Keyword::STRAIGHT_JOIN,
];
/// A [`Dialect`] for [MySQL](https://www.mysql.com/)
#[derive(Debug)]
pub struct MySqlDialect {}
@ -67,10 +58,6 @@ impl Dialect for MySqlDialect {
true
}
fn ignores_wildcard_escapes(&self) -> bool {
true
}
fn supports_numeric_prefix(&self) -> bool {
true
}
@ -124,32 +111,6 @@ impl Dialect for MySqlDialect {
fn supports_user_host_grantee(&self) -> bool {
true
}
fn is_table_factor_alias(&self, explicit: bool, kw: &Keyword, _parser: &mut Parser) -> bool {
explicit
|| (!keywords::RESERVED_FOR_TABLE_ALIAS.contains(kw)
&& !RESERVED_FOR_TABLE_ALIAS_MYSQL.contains(kw))
}
fn supports_table_hints(&self) -> bool {
true
}
fn requires_single_line_comment_whitespace(&self) -> bool {
true
}
fn supports_match_against(&self) -> bool {
true
}
fn supports_set_names(&self) -> bool {
true
}
fn supports_comma_separated_set_assignments(&self) -> bool {
true
}
}
/// `LOCK TABLES`

View file

@ -28,6 +28,7 @@
// limitations under the License.
use log::debug;
use crate::ast::{ObjectName, Statement, UserDefinedTypeRepresentation};
use crate::dialect::{Dialect, Precedence};
use crate::keywords::Keyword;
use crate::parser::{Parser, ParserError};
@ -37,7 +38,6 @@ use crate::tokenizer::Token;
#[derive(Debug)]
pub struct PostgreSqlDialect {}
const PERIOD_PREC: u8 = 200;
const DOUBLE_COLON_PREC: u8 = 140;
const BRACKET_PREC: u8 = 130;
const COLLATE_PREC: u8 = 120;
@ -104,7 +104,7 @@ impl Dialect for PostgreSqlDialect {
fn get_next_precedence(&self, parser: &Parser) -> Option<Result<u8, ParserError>> {
let token = parser.peek_token();
debug!("get_next_precedence() {token:?}");
debug!("get_next_precedence() {:?}", token);
// we only return some custom value here when the behaviour (not merely the numeric value) differs
// from the default implementation
@ -135,6 +135,15 @@ impl Dialect for PostgreSqlDialect {
}
}
fn parse_statement(&self, parser: &mut Parser) -> Option<Result<Statement, ParserError>> {
if parser.parse_keyword(Keyword::CREATE) {
parser.prev_token(); // unconsume the CREATE in case we don't end up parsing anything
parse_create(parser)
} else {
None
}
}
fn supports_filter_during_aggregation(&self) -> bool {
true
}
@ -145,7 +154,6 @@ impl Dialect for PostgreSqlDialect {
fn prec_value(&self, prec: Precedence) -> u8 {
match prec {
Precedence::Period => PERIOD_PREC,
Precedence::DoubleColon => DOUBLE_COLON_PREC,
Precedence::AtTz => AT_TZ_PREC,
Precedence::MulDivModOp => MUL_DIV_MOD_OP_PREC,
@ -237,29 +245,38 @@ impl Dialect for PostgreSqlDialect {
fn supports_nested_comments(&self) -> bool {
true
}
}
fn supports_string_escape_constant(&self) -> bool {
true
}
pub fn parse_create(parser: &mut Parser) -> Option<Result<Statement, ParserError>> {
let name = parser.maybe_parse(|parser| -> Result<ObjectName, ParserError> {
parser.expect_keyword_is(Keyword::CREATE)?;
parser.expect_keyword_is(Keyword::TYPE)?;
let name = parser.parse_object_name(false)?;
parser.expect_keyword_is(Keyword::AS)?;
parser.expect_keyword_is(Keyword::ENUM)?;
Ok(name)
});
fn supports_numeric_literal_underscores(&self) -> bool {
true
}
/// See: <https://www.postgresql.org/docs/current/arrays.html#ARRAYS-DECLARATION>
fn supports_array_typedef_with_brackets(&self) -> bool {
true
}
fn supports_geometric_types(&self) -> bool {
true
}
fn supports_set_names(&self) -> bool {
true
}
fn supports_alter_column_type_using(&self) -> bool {
true
match name {
Ok(name) => name.map(|name| parse_create_type_as_enum(parser, name)),
Err(e) => Some(Err(e)),
}
}
// https://www.postgresql.org/docs/current/sql-createtype.html
pub fn parse_create_type_as_enum(
parser: &mut Parser,
name: ObjectName,
) -> Result<Statement, ParserError> {
if !parser.consume_token(&Token::LParen) {
return parser.expected("'(' after CREATE TYPE AS ENUM", parser.peek_token());
}
let labels = parser.parse_comma_separated0(|p| p.parse_identifier(), Token::RParen)?;
parser.expect_token(&Token::RParen)?;
Ok(Statement::CreateType {
name,
representation: UserDefinedTypeRepresentation::Enum { labels },
})
}

View file

@ -80,15 +80,13 @@ impl Dialect for RedshiftSqlDialect {
}
fn is_identifier_start(&self, ch: char) -> bool {
// Extends Postgres dialect with sharp and UTF-8 multibyte chars
// https://docs.aws.amazon.com/redshift/latest/dg/r_names.html
PostgreSqlDialect {}.is_identifier_start(ch) || ch == '#' || !ch.is_ascii()
// Extends Postgres dialect with sharp
PostgreSqlDialect {}.is_identifier_start(ch) || ch == '#'
}
fn is_identifier_part(&self, ch: char) -> bool {
// Extends Postgres dialect with sharp and UTF-8 multibyte chars
// https://docs.aws.amazon.com/redshift/latest/dg/r_names.html
PostgreSqlDialect {}.is_identifier_part(ch) || ch == '#' || !ch.is_ascii()
// Extends Postgres dialect with sharp
PostgreSqlDialect {}.is_identifier_part(ch) || ch == '#'
}
/// redshift has `CONVERT(type, value)` instead of `CONVERT(value, type)`
@ -111,32 +109,4 @@ impl Dialect for RedshiftSqlDialect {
fn supports_partiql(&self) -> bool {
true
}
fn supports_string_escape_constant(&self) -> bool {
true
}
fn supports_geometric_types(&self) -> bool {
true
}
fn supports_array_typedef_with_brackets(&self) -> bool {
true
}
fn allow_extract_single_quotes(&self) -> bool {
true
}
fn supports_string_literal_backslash_escape(&self) -> bool {
true
}
fn supports_select_wildcard_exclude(&self) -> bool {
true
}
fn supports_select_exclude(&self) -> bool {
true
}
}

File diff suppressed because it is too large Load diff

View file

@ -15,11 +15,7 @@
// specific language governing permissions and limitations
// under the License.
#[cfg(not(feature = "std"))]
use alloc::boxed::Box;
use crate::ast::BinaryOperator;
use crate::ast::{Expr, Statement};
use crate::ast::Statement;
use crate::dialect::Dialect;
use crate::keywords::Keyword;
use crate::parser::{Parser, ParserError};
@ -74,27 +70,6 @@ impl Dialect for SQLiteDialect {
}
}
fn parse_infix(
&self,
parser: &mut crate::parser::Parser,
expr: &crate::ast::Expr,
_precedence: u8,
) -> Option<Result<crate::ast::Expr, ParserError>> {
// Parse MATCH and REGEXP as operators
// See <https://www.sqlite.org/lang_expr.html#the_like_glob_regexp_match_and_extract_operators>
for (keyword, op) in [
(Keyword::REGEXP, BinaryOperator::Regexp),
(Keyword::MATCH, BinaryOperator::Match),
] {
if parser.parse_keyword(keyword) {
let left = Box::new(expr.clone());
let right = Box::new(parser.parse_expr().unwrap());
return Some(Ok(Expr::BinaryOp { left, op, right }));
}
}
None
}
fn supports_in_empty_list(&self) -> bool {
true
}

View file

@ -1,135 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Utilities for formatting SQL AST nodes with pretty printing support.
//!
//! The module provides formatters that implement the `Display` trait with support
//! for both regular (`{}`) and pretty (`{:#}`) formatting modes. Pretty printing
//! adds proper indentation and line breaks to make SQL statements more readable.
use core::fmt::{self, Display, Write};
/// A wrapper around a value that adds an indent to the value when displayed with {:#}.
pub(crate) struct Indent<T>(pub T);
const INDENT: &str = " ";
impl<T> Display for Indent<T>
where
T: Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.alternate() {
f.write_str(INDENT)?;
write!(Indent(f), "{:#}", self.0)
} else {
self.0.fmt(f)
}
}
}
/// Adds an indent to the inner writer
impl<T> Write for Indent<T>
where
T: Write,
{
fn write_str(&mut self, s: &str) -> fmt::Result {
self.0.write_str(s)?;
// Our NewLine and SpaceOrNewline utils always print individual newlines as a single-character string.
if s == "\n" {
self.0.write_str(INDENT)?;
}
Ok(())
}
}
/// A value that inserts a newline when displayed with {:#}, but not when displayed with {}.
pub(crate) struct NewLine;
impl Display for NewLine {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.alternate() {
f.write_char('\n')
} else {
Ok(())
}
}
}
/// A value that inserts a space when displayed with {}, but a newline when displayed with {:#}.
pub(crate) struct SpaceOrNewline;
impl Display for SpaceOrNewline {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.alternate() {
f.write_char('\n')
} else {
f.write_char(' ')
}
}
}
/// A value that displays a comma-separated list of values.
/// When pretty-printed (using {:#}), it displays each value on a new line.
pub(crate) struct DisplayCommaSeparated<'a, T: fmt::Display>(pub(crate) &'a [T]);
impl<T: fmt::Display> fmt::Display for DisplayCommaSeparated<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut first = true;
for t in self.0 {
if !first {
f.write_char(',')?;
SpaceOrNewline.fmt(f)?;
}
first = false;
t.fmt(f)?;
}
Ok(())
}
}
/// Displays a whitespace, followed by a comma-separated list that is indented when pretty-printed.
pub(crate) fn indented_list<T: fmt::Display>(f: &mut fmt::Formatter, items: &[T]) -> fmt::Result {
SpaceOrNewline.fmt(f)?;
Indent(DisplayCommaSeparated(items)).fmt(f)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_indent() {
struct TwoLines;
impl Display for TwoLines {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("line 1")?;
SpaceOrNewline.fmt(f)?;
f.write_str("line 2")
}
}
let indent = Indent(TwoLines);
assert_eq!(
indent.to_string(),
TwoLines.to_string(),
"Only the alternate form should be indented"
);
assert_eq!(format!("{:#}", indent), " line 1\n line 2");
}
}

View file

@ -18,14 +18,14 @@
//! This module defines
//! 1) a list of constants for every keyword
//! 2) an `ALL_KEYWORDS` array with every keyword in it
//! This is not a list of *reserved* keywords: some of these can be
//! parsed as identifiers if the parser decides so. This means that
//! new keywords can be added here without affecting the parse result.
//! This is not a list of *reserved* keywords: some of these can be
//! parsed as identifiers if the parser decides so. This means that
//! new keywords can be added here without affecting the parse result.
//!
//! As a matter of fact, most of these keywords are not used at all
//! and could be removed.
//! As a matter of fact, most of these keywords are not used at all
//! and could be removed.
//! 3) a `RESERVED_FOR_TABLE_ALIAS` array with keywords reserved in a
//! "table alias" context.
//! "table alias" context.
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@ -83,9 +83,7 @@ define_keywords!(
ADMIN,
AFTER,
AGAINST,
AGGREGATE,
AGGREGATION,
ALERT,
ALGORITHM,
ALIAS,
ALL,
@ -98,7 +96,6 @@ define_keywords!(
ANY,
APPLICATION,
APPLY,
APPLYBUDGET,
ARCHIVE,
ARE,
ARRAY,
@ -112,19 +109,14 @@ define_keywords!(
AT,
ATOMIC,
ATTACH,
AUDIT,
AUTHENTICATION,
AUTHORIZATION,
AUTO,
AUTOEXTEND_SIZE,
AUTOINCREMENT,
AUTO_INCREMENT,
AVG,
AVG_ROW_LENGTH,
AVRO,
BACKWARD,
BASE64,
BASE_LOCATION,
BEFORE,
BEGIN,
BEGIN_FRAME,
@ -135,18 +127,14 @@ define_keywords!(
BIGINT,
BIGNUMERIC,
BINARY,
BIND,
BINDING,
BIT,
BLOB,
BLOCK,
BLOOM,
BLOOMFILTER,
BOOL,
BOOLEAN,
BOTH,
BOX,
BRIN,
BROWSE,
BTREE,
BUCKET,
@ -162,10 +150,8 @@ define_keywords!(
CASCADE,
CASCADED,
CASE,
CASES,
CAST,
CATALOG,
CATALOG_SYNC,
CATCH,
CEIL,
CEILING,
@ -176,14 +162,11 @@ define_keywords!(
CHANNEL,
CHAR,
CHARACTER,
CHARACTERISTICS,
CHARACTERS,
CHARACTER_LENGTH,
CHARSET,
CHAR_LENGTH,
CHECK,
CHECKSUM,
CIRCLE,
CLEAR,
CLOB,
CLONE,
@ -202,7 +185,6 @@ define_keywords!(
COMMENT,
COMMIT,
COMMITTED,
COMPATIBLE,
COMPRESSION,
COMPUTE,
CONCURRENTLY,
@ -210,8 +192,6 @@ define_keywords!(
CONFLICT,
CONNECT,
CONNECTION,
CONNECTOR,
CONNECT_BY_ROOT,
CONSTRAINT,
CONTAINS,
CONTINUE,
@ -257,7 +237,6 @@ define_keywords!(
DAYOFWEEK,
DAYOFYEAR,
DAYS,
DCPROPERTIES,
DEALLOCATE,
DEC,
DECADE,
@ -272,13 +251,11 @@ define_keywords!(
DEFINED,
DEFINER,
DELAYED,
DELAY_KEY_WRITE,
DELETE,
DELIMITED,
DELIMITER,
DELTA,
DENSE_RANK,
DENY,
DEREF,
DESC,
DESCRIBE,
@ -293,7 +270,6 @@ define_keywords!(
DISTRIBUTE,
DIV,
DO,
DOMAIN,
DOUBLE,
DOW,
DOY,
@ -305,7 +281,6 @@ define_keywords!(
ELEMENT,
ELEMENTS,
ELSE,
ELSEIF,
EMPTY,
ENABLE,
ENABLE_SCHEMA_EVOLUTION,
@ -318,7 +293,6 @@ define_keywords!(
END_PARTITION,
ENFORCED,
ENGINE,
ENGINE_ATTRIBUTE,
ENUM,
ENUM16,
ENUM8,
@ -331,29 +305,23 @@ define_keywords!(
ESTIMATE,
EVENT,
EVERY,
EVOLVE,
EXCEPT,
EXCEPTION,
EXCHANGE,
EXCLUDE,
EXCLUSIVE,
EXEC,
EXECUTE,
EXECUTION,
EXISTS,
EXP,
EXPANSION,
EXPLAIN,
EXPLICIT,
EXPORT,
EXTEND,
EXTENDED,
EXTENSION,
EXTERNAL,
EXTERNAL_VOLUME,
EXTRACT,
FAIL,
FAILOVER,
FALSE,
FETCH,
FIELDS,
@ -389,20 +357,16 @@ define_keywords!(
FREEZE,
FROM,
FSCK,
FULFILLMENT,
FULL,
FULLTEXT,
FUNCTION,
FUNCTIONS,
FUSION,
FUTURE,
GENERAL,
GENERATE,
GENERATED,
GEOGRAPHY,
GET,
GIN,
GIST,
GLOBAL,
GRANT,
GRANTED,
@ -422,8 +386,6 @@ define_keywords!(
HOSTS,
HOUR,
HOURS,
HUGEINT,
ICEBERG,
ID,
IDENTITY,
IDENTITY_INSERT,
@ -432,8 +394,6 @@ define_keywords!(
ILIKE,
IMMEDIATE,
IMMUTABLE,
IMPORT,
IMPORTED,
IN,
INCLUDE,
INCLUDE_NULL_VALUES,
@ -441,19 +401,15 @@ define_keywords!(
INDEX,
INDICATOR,
INHERIT,
INHERITS,
INITIALLY,
INNER,
INOUT,
INPATH,
INPLACE,
INPUT,
INPUTFORMAT,
INSENSITIVE,
INSERT,
INSERT_METHOD,
INSTALL,
INSTANT,
INSTEAD,
INT,
INT128,
@ -465,7 +421,6 @@ define_keywords!(
INT64,
INT8,
INTEGER,
INTEGRATION,
INTERPOLATE,
INTERSECT,
INTERSECTION,
@ -488,7 +443,6 @@ define_keywords!(
JULIAN,
KEY,
KEYS,
KEY_BLOCK_SIZE,
KILL,
LAG,
LANGUAGE,
@ -503,11 +457,9 @@ define_keywords!(
LIKE,
LIKE_REGEX,
LIMIT,
LINE,
LINES,
LIST,
LISTEN,
LISTING,
LN,
LOAD,
LOCAL,
@ -525,10 +477,7 @@ define_keywords!(
LOWER,
LOW_PRIORITY,
LS,
LSEG,
MACRO,
MANAGE,
MANAGED,
MANAGEDLOCATION,
MAP,
MASKING,
@ -542,17 +491,14 @@ define_keywords!(
MAX,
MAXVALUE,
MAX_DATA_EXTENSION_TIME_IN_DAYS,
MAX_ROWS,
MEASURES,
MEDIUMBLOB,
MEDIUMINT,
MEDIUMTEXT,
MEMBER,
MERGE,
MESSAGE,
METADATA,
METHOD,
METRIC,
MICROSECOND,
MICROSECONDS,
MILLENIUM,
@ -564,20 +510,17 @@ define_keywords!(
MINUTE,
MINUTES,
MINVALUE,
MIN_ROWS,
MOD,
MODE,
MODIFIES,
MODIFY,
MODULE,
MONITOR,
MONTH,
MONTHS,
MSCK,
MULTISET,
MUTATION,
NAME,
NAMES,
NANOSECOND,
NANOSECONDS,
NATIONAL,
@ -585,7 +528,6 @@ define_keywords!(
NCHAR,
NCLOB,
NESTED,
NETWORK,
NEW,
NEXT,
NFC,
@ -619,7 +561,6 @@ define_keywords!(
NUMERIC,
NVARCHAR,
OBJECT,
OBJECTS,
OCCURRENCES_REGEX,
OCTETS,
OCTET_LENGTH,
@ -634,11 +575,8 @@ define_keywords!(
ONLY,
OPEN,
OPENJSON,
OPERATE,
OPERATOR,
OPTIMIZATION,
OPTIMIZE,
OPTIMIZED,
OPTIMIZER_COSTS,
OPTION,
OPTIONS,
@ -646,24 +584,16 @@ define_keywords!(
ORC,
ORDER,
ORDINALITY,
ORGANIZATION,
OTHER,
OUT,
OUTER,
OUTPUT,
OUTPUTFORMAT,
OVER,
OVERFLOW,
OVERLAPS,
OVERLAY,
OVERRIDE,
OVERWRITE,
OWNED,
OWNER,
OWNERSHIP,
PACKAGE,
PACKAGES,
PACK_KEYS,
PARALLEL,
PARAMETER,
PARQUET,
@ -671,7 +601,6 @@ define_keywords!(
PARTITION,
PARTITIONED,
PARTITIONS,
PASSING,
PASSWORD,
PAST,
PATH,
@ -688,10 +617,7 @@ define_keywords!(
PLACING,
PLAN,
PLANS,
POINT,
POLICY,
POLYGON,
POOL,
PORTION,
POSITION,
POSITION_REGEX,
@ -704,7 +630,6 @@ define_keywords!(
PRESERVE,
PREWHERE,
PRIMARY,
PRINT,
PRIOR,
PRIVILEGES,
PROCEDURE,
@ -712,13 +637,11 @@ define_keywords!(
PROGRAM,
PROJECTION,
PUBLIC,
PURCHASE,
PURGE,
QUALIFY,
QUARTER,
QUERY,
QUOTE,
RAISE,
RAISERROR,
RANGE,
RANK,
@ -747,7 +670,6 @@ define_keywords!(
RELATIVE,
RELAY,
RELEASE,
RELEASES,
REMOTE,
REMOVE,
RENAME,
@ -756,16 +678,12 @@ define_keywords!(
REPEATABLE,
REPLACE,
REPLICA,
REPLICATE,
REPLICATION,
RESET,
RESOLVE,
RESOURCE,
RESPECT,
RESTART,
RESTRICT,
RESTRICTED,
RESTRICTIONS,
RESTRICTIVE,
RESULT,
RESULTSET,
@ -786,7 +704,6 @@ define_keywords!(
ROW,
ROWID,
ROWS,
ROW_FORMAT,
ROW_NUMBER,
RULE,
RUN,
@ -801,7 +718,6 @@ define_keywords!(
SEARCH,
SECOND,
SECONDARY,
SECONDARY_ENGINE_ATTRIBUTE,
SECONDS,
SECRET,
SECURITY,
@ -816,8 +732,6 @@ define_keywords!(
SERDE,
SERDEPROPERTIES,
SERIALIZABLE,
SERVER,
SERVICE,
SESSION,
SESSION_USER,
SET,
@ -825,10 +739,7 @@ define_keywords!(
SETS,
SETTINGS,
SHARE,
SHARED,
SHARING,
SHOW,
SIGNED,
SIMILAR,
SKIP,
SLOW,
@ -841,13 +752,11 @@ define_keywords!(
SPATIAL,
SPECIFIC,
SPECIFICTYPE,
SPGIST,
SQL,
SQLEXCEPTION,
SQLSTATE,
SQLWARNING,
SQRT,
SRID,
STABLE,
STAGE,
START,
@ -855,32 +764,24 @@ define_keywords!(
STATEMENT,
STATIC,
STATISTICS,
STATS_AUTO_RECALC,
STATS_PERSISTENT,
STATS_SAMPLE_PAGES,
STATUS,
STDDEV_POP,
STDDEV_SAMP,
STDIN,
STDOUT,
STEP,
STORAGE,
STORAGE_INTEGRATION,
STORAGE_SERIALIZATION_POLICY,
STORED,
STRAIGHT_JOIN,
STRICT,
STRING,
STRUCT,
SUBMULTISET,
SUBSTR,
SUBSTRING,
SUBSTRING_REGEX,
SUCCEEDS,
SUM,
SUPER,
SUPERUSER,
SUPPORT,
SUSPEND,
SWAP,
SYMMETRIC,
@ -891,10 +792,8 @@ define_keywords!(
TABLE,
TABLES,
TABLESAMPLE,
TABLESPACE,
TAG,
TARGET,
TASK,
TBLPROPERTIES,
TEMP,
TEMPORARY,
@ -908,7 +807,6 @@ define_keywords!(
TIME,
TIMESTAMP,
TIMESTAMPTZ,
TIMESTAMP_NTZ,
TIMETZ,
TIMEZONE,
TIMEZONE_ABBR,
@ -921,7 +819,6 @@ define_keywords!(
TO,
TOP,
TOTALS,
TRACE,
TRAILING,
TRANSACTION,
TRANSIENT,
@ -937,13 +834,9 @@ define_keywords!(
TRY,
TRY_CAST,
TRY_CONVERT,
TSQUERY,
TSVECTOR,
TUPLE,
TYPE,
UBIGINT,
UESCAPE,
UHUGEINT,
UINT128,
UINT16,
UINT256,
@ -966,7 +859,6 @@ define_keywords!(
UNNEST,
UNPIVOT,
UNSAFE,
UNSET,
UNSIGNED,
UNTIL,
UPDATE,
@ -977,18 +869,14 @@ define_keywords!(
USER,
USER_RESOURCES,
USING,
USMALLINT,
UTINYINT,
UUID,
VACUUM,
VALID,
VALIDATE,
VALIDATION_MODE,
VALUE,
VALUES,
VALUE_OF,
VARBINARY,
VARBIT,
VARCHAR,
VARIABLES,
VARYING,
@ -997,20 +885,16 @@ define_keywords!(
VERBOSE,
VERSION,
VERSIONING,
VERSIONS,
VIEW,
VIEWS,
VIRTUAL,
VOLATILE,
VOLUME,
WAREHOUSE,
WAREHOUSES,
WEEK,
WEEKS,
WHEN,
WHENEVER,
WHERE,
WHILE,
WIDTH_BUCKET,
WINDOW,
WITH,
@ -1018,11 +902,8 @@ define_keywords!(
WITHOUT,
WITHOUT_ARRAY_WRAPPER,
WORK,
WRAPPER,
WRITE,
XML,
XMLNAMESPACES,
XMLTABLE,
XOR,
YEAR,
YEARS,
@ -1071,8 +952,6 @@ pub const RESERVED_FOR_TABLE_ALIAS: &[Keyword] = &[
Keyword::ANTI,
Keyword::SEMI,
Keyword::RETURNING,
Keyword::ASOF,
Keyword::MATCH_CONDITION,
// for MSSQL-specific OUTER APPLY (seems reserved in most dialects)
Keyword::OUTER,
Keyword::SET,
@ -1095,7 +974,6 @@ pub const RESERVED_FOR_TABLE_ALIAS: &[Keyword] = &[
Keyword::SAMPLE,
Keyword::TABLESAMPLE,
Keyword::FROM,
Keyword::OPEN,
];
/// Can't be used as a column alias, so that `SELECT <expr> alias`
@ -1119,7 +997,6 @@ pub const RESERVED_FOR_COLUMN_ALIAS: &[Keyword] = &[
Keyword::FETCH,
Keyword::UNION,
Keyword::EXCEPT,
Keyword::EXCLUDE,
Keyword::INTERSECT,
Keyword::MINUS,
Keyword::CLUSTER,
@ -1131,7 +1008,7 @@ pub const RESERVED_FOR_COLUMN_ALIAS: &[Keyword] = &[
Keyword::END,
];
// Global list of reserved keywords allowed after FROM.
// Global list of reserved keywords alloweed after FROM.
// Parser should call Dialect::get_reserved_keyword_after_from
// to allow for each dialect to customize the list.
pub const RESERVED_FOR_TABLE_FACTOR: &[Keyword] = &[

View file

@ -64,27 +64,6 @@
//! // The original SQL text can be generated from the AST
//! assert_eq!(ast[0].to_string(), sql);
//! ```
//!
//! # Pretty Printing
//!
//! SQL statements can be pretty-printed with proper indentation and line breaks using the alternate flag (`{:#}`):
//!
//! ```
//! # use sqlparser::dialect::GenericDialect;
//! # use sqlparser::parser::Parser;
//! let sql = "SELECT a, b FROM table_1";
//! let ast = Parser::parse_sql(&GenericDialect, sql).unwrap();
//!
//! // Pretty print with indentation and line breaks
//! let pretty_sql = format!("{:#}", ast[0]);
//! assert_eq!(pretty_sql, r#"
//! SELECT
//! a,
//! b
//! FROM
//! table_1
//! "#.trim());
//! ```
//! [sqlparser crates.io page]: https://crates.io/crates/sqlparser
//! [`Parser::parse_sql`]: crate::parser::Parser::parse_sql
//! [`Parser::new`]: crate::parser::Parser::new
@ -149,10 +128,6 @@
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(clippy::upper_case_acronyms)]
// Permit large enum variants to keep a unified, expressive AST.
// Splitting complex nodes (expressions, statements, types) into separate types
// would bloat the API and hide intent. Extra memory is a worthwhile tradeoff.
#![allow(clippy::large_enum_variant)]
// Allow proc-macros to find this crate
extern crate self as sqlparser;
@ -167,7 +142,6 @@ extern crate pretty_assertions;
pub mod ast;
#[macro_use]
pub mod dialect;
mod display_utils;
pub mod keywords;
pub mod parser;
pub mod tokenizer;

View file

@ -18,8 +18,8 @@ use alloc::vec;
use super::{Parser, ParserError};
use crate::{
ast::{
AlterConnectorOwner, AlterPolicyOperation, AlterRoleOperation, Expr, Password, ResetConfig,
RoleOption, SetConfigValue, Statement,
AlterPolicyOperation, AlterRoleOperation, Expr, Password, ResetConfig, RoleOption,
SetConfigValue, Statement,
},
dialect::{MsSqlDialect, PostgreSqlDialect},
keywords::Keyword,
@ -99,47 +99,6 @@ impl Parser<'_> {
}
}
/// Parse an `ALTER CONNECTOR` statement
/// ```sql
/// ALTER CONNECTOR connector_name SET DCPROPERTIES(property_name=property_value, ...);
///
/// ALTER CONNECTOR connector_name SET URL new_url;
///
/// ALTER CONNECTOR connector_name SET OWNER [USER|ROLE] user_or_role;
/// ```
pub fn parse_alter_connector(&mut self) -> Result<Statement, ParserError> {
let name = self.parse_identifier()?;
self.expect_keyword_is(Keyword::SET)?;
let properties = match self.parse_options_with_keywords(&[Keyword::DCPROPERTIES])? {
properties if !properties.is_empty() => Some(properties),
_ => None,
};
let url = if self.parse_keyword(Keyword::URL) {
Some(self.parse_literal_string()?)
} else {
None
};
let owner = if self.parse_keywords(&[Keyword::OWNER, Keyword::USER]) {
let owner = self.parse_identifier()?;
Some(AlterConnectorOwner::User(owner))
} else if self.parse_keywords(&[Keyword::OWNER, Keyword::ROLE]) {
let owner = self.parse_identifier()?;
Some(AlterConnectorOwner::Role(owner))
} else {
None
};
Ok(Statement::AlterConnector {
name,
properties,
url,
owner,
})
}
fn parse_mssql_alter_role(&mut self) -> Result<Statement, ParserError> {
let role_name = self.parse_identifier()?;

File diff suppressed because it is too large Load diff

View file

@ -33,7 +33,7 @@ use core::fmt::Debug;
use crate::dialect::*;
use crate::parser::{Parser, ParserError};
use crate::tokenizer::{Token, Tokenizer};
use crate::tokenizer::Tokenizer;
use crate::{ast::*, parser::ParserOptions};
#[cfg(test)]
@ -151,8 +151,6 @@ impl TestedDialects {
///
/// 2. re-serializing the result of parsing `sql` produces the same
/// `canonical` sql string
///
/// For multiple statements, use [`statements_parse_to`].
pub fn one_statement_parses_to(&self, sql: &str, canonical: &str) -> Statement {
let mut statements = self.parse_sql_statements(sql).expect(sql);
assert_eq!(statements.len(), 1);
@ -168,24 +166,6 @@ impl TestedDialects {
only_statement
}
/// The same as [`one_statement_parses_to`] but it works for a multiple statements
pub fn statements_parse_to(&self, sql: &str, canonical: &str) -> Vec<Statement> {
let statements = self.parse_sql_statements(sql).expect(sql);
if !canonical.is_empty() && sql != canonical {
assert_eq!(self.parse_sql_statements(canonical).unwrap(), statements);
} else {
assert_eq!(
sql,
statements
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>()
.join("; ")
);
}
statements
}
/// Ensures that `sql` parses as an [`Expr`], and that
/// re-serializing the parse result produces canonical
pub fn expr_parses_to(&self, sql: &str, canonical: &str) -> Expr {
@ -257,22 +237,6 @@ impl TestedDialects {
pub fn verified_expr(&self, sql: &str) -> Expr {
self.expr_parses_to(sql, sql)
}
/// Check that the tokenizer returns the expected tokens for the given SQL.
pub fn tokenizes_to(&self, sql: &str, expected: Vec<Token>) {
if self.dialects.is_empty() {
panic!("No dialects to test");
}
self.dialects.iter().for_each(|dialect| {
let mut tokenizer = Tokenizer::new(&**dialect, sql);
if let Some(options) = &self.options {
tokenizer = tokenizer.with_unescape(options.unescape);
}
let tokens = tokenizer.tokenize().unwrap();
assert_eq!(expected, tokens, "Tokenized differently for {dialect:?}");
});
}
}
/// Returns all available dialects.
@ -294,11 +258,6 @@ pub fn all_dialects() -> TestedDialects {
])
}
// Returns all available dialects with the specified parser options
pub fn all_dialects_with_options(options: ParserOptions) -> TestedDialects {
TestedDialects::new_with_options(all_dialects().dialects, options)
}
/// Returns all dialects matching the given predicate.
pub fn all_dialects_where<F>(predicate: F) -> TestedDialects
where
@ -350,12 +309,10 @@ pub fn alter_table_op_with_name(stmt: Statement, expected_name: &str) -> AlterTa
operations,
on_cluster: _,
location: _,
iceberg,
} => {
assert_eq!(name.to_string(), expected_name);
assert!(!if_exists);
assert!(!is_only);
assert!(!iceberg);
only(operations)
}
_ => panic!("Expected ALTER TABLE statement"),
@ -371,11 +328,6 @@ pub fn number(n: &str) -> Value {
Value::Number(n.parse().unwrap(), false)
}
/// Creates a [Value::SingleQuotedString]
pub fn single_quoted_string(s: impl Into<String>) -> Value {
Value::SingleQuotedString(s.into())
}
pub fn table_alias(name: impl Into<String>) -> Option<TableAlias> {
Some(TableAlias {
name: Ident::new(name),
@ -385,7 +337,7 @@ pub fn table_alias(name: impl Into<String>) -> Option<TableAlias> {
pub fn table(name: impl Into<String>) -> TableFactor {
TableFactor::Table {
name: ObjectName::from(vec![Ident::new(name.into())]),
name: ObjectName(vec![Ident::new(name.into())]),
alias: None,
args: None,
with_hints: vec![],
@ -394,7 +346,6 @@ pub fn table(name: impl Into<String>) -> TableFactor {
with_ordinality: false,
json_path: None,
sample: None,
index_hints: vec![],
}
}
@ -409,13 +360,12 @@ pub fn table_from_name(name: ObjectName) -> TableFactor {
with_ordinality: false,
json_path: None,
sample: None,
index_hints: vec![],
}
}
pub fn table_with_alias(name: impl Into<String>, alias: impl Into<String>) -> TableFactor {
TableFactor::Table {
name: ObjectName::from(vec![Ident::new(name)]),
name: ObjectName(vec![Ident::new(name)]),
alias: Some(TableAlias {
name: Ident::new(alias),
columns: vec![],
@ -427,7 +377,6 @@ pub fn table_with_alias(name: impl Into<String>, alias: impl Into<String>) -> Ta
with_ordinality: false,
json_path: None,
sample: None,
index_hints: vec![],
}
}
@ -435,13 +384,13 @@ pub fn join(relation: TableFactor) -> Join {
Join {
relation,
global: false,
join_operator: JoinOperator::Join(JoinConstraint::Natural),
join_operator: JoinOperator::Inner(JoinConstraint::Natural),
}
}
pub fn call(function: &str, args: impl IntoIterator<Item = Expr>) -> Expr {
Expr::Function(Function {
name: ObjectName::from(vec![Ident::new(function)]),
name: ObjectName(vec![Ident::new(function)]),
uses_odbc_syntax: false,
parameters: FunctionArguments::None,
args: FunctionArguments::List(FunctionArgumentList {
@ -458,52 +407,3 @@ pub fn call(function: &str, args: impl IntoIterator<Item = Expr>) -> Expr {
within_group: vec![],
})
}
/// Gets the first index column (mysql calls it a key part) of the first index found in a
/// [`Statement::CreateIndex`], [`Statement::CreateTable`], or [`Statement::AlterTable`].
pub fn index_column(stmt: Statement) -> Expr {
match stmt {
Statement::CreateIndex(CreateIndex { columns, .. }) => {
columns.first().unwrap().column.expr.clone()
}
Statement::CreateTable(CreateTable { constraints, .. }) => {
match constraints.first().unwrap() {
TableConstraint::Index { columns, .. } => {
columns.first().unwrap().column.expr.clone()
}
TableConstraint::Unique { columns, .. } => {
columns.first().unwrap().column.expr.clone()
}
TableConstraint::PrimaryKey { columns, .. } => {
columns.first().unwrap().column.expr.clone()
}
TableConstraint::FulltextOrSpatial { columns, .. } => {
columns.first().unwrap().column.expr.clone()
}
_ => panic!("Expected an index, unique, primary, full text, or spatial constraint (foreign key does not support general key part expressions)"),
}
}
Statement::AlterTable { operations, .. } => match operations.first().unwrap() {
AlterTableOperation::AddConstraint { constraint, .. } => {
match constraint {
TableConstraint::Index { columns, .. } => {
columns.first().unwrap().column.expr.clone()
}
TableConstraint::Unique { columns, .. } => {
columns.first().unwrap().column.expr.clone()
}
TableConstraint::PrimaryKey { columns, .. } => {
columns.first().unwrap().column.expr.clone()
}
TableConstraint::FulltextOrSpatial {
columns,
..
} => columns.first().unwrap().column.expr.clone(),
_ => panic!("Expected an index, unique, primary, full text, or spatial constraint (foreign key does not support general key part expressions)"),
}
}
_ => panic!("Expected a constraint"),
},
_ => panic!("Expected CREATE INDEX, ALTER TABLE, or CREATE TABLE, got: {stmt:?}"),
}
}

View file

@ -170,10 +170,8 @@ pub enum Token {
RBrace,
/// Right Arrow `=>`
RArrow,
/// Sharp `#` used for PostgreSQL Bitwise XOR operator, also PostgreSQL/Redshift geometrical unary/binary operator (Number of points in path or polygon/Intersection)
/// Sharp `#` used for PostgreSQL Bitwise XOR operator
Sharp,
/// `##` PostgreSQL/Redshift geometrical binary operator (Point of closest proximity)
DoubleSharp,
/// Tilde `~` used for PostgreSQL Bitwise NOT operator or case sensitive match regular expression operator
Tilde,
/// `~*` , a case insensitive match regular expression operator in PostgreSQL
@ -200,7 +198,7 @@ pub enum Token {
ExclamationMark,
/// Double Exclamation Mark `!!` used for PostgreSQL prefix factorial operator
DoubleExclamationMark,
/// AtSign `@` used for PostgreSQL abs operator, also PostgreSQL/Redshift geometrical unary/binary operator (Center, Contained or on)
/// AtSign `@` used for PostgreSQL abs operator
AtSign,
/// `^@`, a "starts with" string operator in PostgreSQL
CaretAt,
@ -216,38 +214,6 @@ pub enum Token {
LongArrow,
/// `#>`, extracts JSON sub-object at the specified path
HashArrow,
/// `@-@` PostgreSQL/Redshift geometrical unary operator (Length or circumference)
AtDashAt,
/// `?-` PostgreSQL/Redshift geometrical unary/binary operator (Is horizontal?/Are horizontally aligned?)
QuestionMarkDash,
/// `&<` PostgreSQL/Redshift geometrical binary operator (Overlaps to left?)
AmpersandLeftAngleBracket,
/// `&>` PostgreSQL/Redshift geometrical binary operator (Overlaps to right?)`
AmpersandRightAngleBracket,
/// `&<|` PostgreSQL/Redshift geometrical binary operator (Does not extend above?)`
AmpersandLeftAngleBracketVerticalBar,
/// `|&>` PostgreSQL/Redshift geometrical binary operator (Does not extend below?)`
VerticalBarAmpersandRightAngleBracket,
/// `<->` PostgreSQL/Redshift geometrical binary operator (Distance between)
TwoWayArrow,
/// `<^` PostgreSQL/Redshift geometrical binary operator (Is below?)
LeftAngleBracketCaret,
/// `>^` PostgreSQL/Redshift geometrical binary operator (Is above?)
RightAngleBracketCaret,
/// `?#` PostgreSQL/Redshift geometrical binary operator (Intersects or overlaps)
QuestionMarkSharp,
/// `?-|` PostgreSQL/Redshift geometrical binary operator (Is perpendicular?)
QuestionMarkDashVerticalBar,
/// `?||` PostgreSQL/Redshift geometrical binary operator (Are parallel?)
QuestionMarkDoubleVerticalBar,
/// `~=` PostgreSQL/Redshift geometrical binary operator (Same as)
TildeEqual,
/// `<<| PostgreSQL/Redshift geometrical binary operator (Is strictly below?)
ShiftLeftVerticalBar,
/// `|>> PostgreSQL/Redshift geometrical binary operator (Is strictly above?)
VerticalBarShiftRight,
/// `|> BigQuery pipe operator
VerticalBarRightAngleBracket,
/// `#>>`, extracts JSON sub-object at the specified path as text
HashLongArrow,
/// jsonb @> jsonb -> boolean: Test whether left json contains the right json
@ -337,7 +303,6 @@ impl fmt::Display for Token {
Token::RBrace => f.write_str("}"),
Token::RArrow => f.write_str("=>"),
Token::Sharp => f.write_str("#"),
Token::DoubleSharp => f.write_str("##"),
Token::ExclamationMark => f.write_str("!"),
Token::DoubleExclamationMark => f.write_str("!!"),
Token::Tilde => f.write_str("~"),
@ -355,22 +320,6 @@ impl fmt::Display for Token {
Token::Overlap => f.write_str("&&"),
Token::PGSquareRoot => f.write_str("|/"),
Token::PGCubeRoot => f.write_str("||/"),
Token::AtDashAt => f.write_str("@-@"),
Token::QuestionMarkDash => f.write_str("?-"),
Token::AmpersandLeftAngleBracket => f.write_str("&<"),
Token::AmpersandRightAngleBracket => f.write_str("&>"),
Token::AmpersandLeftAngleBracketVerticalBar => f.write_str("&<|"),
Token::VerticalBarAmpersandRightAngleBracket => f.write_str("|&>"),
Token::VerticalBarRightAngleBracket => f.write_str("|>"),
Token::TwoWayArrow => f.write_str("<->"),
Token::LeftAngleBracketCaret => f.write_str("<^"),
Token::RightAngleBracketCaret => f.write_str(">^"),
Token::QuestionMarkSharp => f.write_str("?#"),
Token::QuestionMarkDashVerticalBar => f.write_str("?-|"),
Token::QuestionMarkDoubleVerticalBar => f.write_str("?||"),
Token::TildeEqual => f.write_str("~="),
Token::ShiftLeftVerticalBar => f.write_str("<<|"),
Token::VerticalBarShiftRight => f.write_str("|>>"),
Token::Placeholder(ref s) => write!(f, "{s}"),
Token::Arrow => write!(f, "->"),
Token::LongArrow => write!(f, "->>"),
@ -898,7 +847,7 @@ impl<'a> Tokenizer<'a> {
};
let mut location = state.location();
while let Some(token) = self.next_token(&mut state, buf.last().map(|t| &t.token))? {
while let Some(token) = self.next_token(&mut state)? {
let span = location.span_to(state.location());
buf.push(TokenWithSpan { token, span });
@ -935,11 +884,7 @@ impl<'a> Tokenizer<'a> {
}
/// Get the next token or return None
fn next_token(
&self,
chars: &mut State,
prev_token: Option<&Token>,
) -> Result<Option<Token>, TokenizerError> {
fn next_token(&self, chars: &mut State) -> Result<Option<Token>, TokenizerError> {
match chars.peek() {
Some(&ch) => match ch {
' ' => self.consume_and_return(chars, Token::Whitespace(Whitespace::Space)),
@ -1026,10 +971,7 @@ impl<'a> Tokenizer<'a> {
match chars.peek() {
Some('\'') => {
// N'...' - a <national character string literal>
let backslash_escape =
self.dialect.supports_string_literal_backslash_escape();
let s =
self.tokenize_single_quoted_string(chars, '\'', backslash_escape)?;
let s = self.tokenize_single_quoted_string(chars, '\'', true)?;
Ok(Some(Token::NationalStringLiteral(s)))
}
_ => {
@ -1040,7 +982,7 @@ impl<'a> Tokenizer<'a> {
}
}
// PostgreSQL accepts "escape" string constants, which are an extension to the SQL standard.
x @ 'e' | x @ 'E' if self.dialect.supports_string_escape_constant() => {
x @ 'e' | x @ 'E' => {
let starting_loc = chars.location();
chars.next(); // consume, to check the next char
match chars.peek() {
@ -1191,40 +1133,12 @@ impl<'a> Tokenizer<'a> {
}
// numbers and period
'0'..='9' | '.' => {
// special case where if ._ is encountered after a word then that word
// is a table and the _ is the start of the col name.
// if the prev token is not a word, then this is not a valid sql
// word or number.
if ch == '.' && chars.peekable.clone().nth(1) == Some('_') {
if let Some(Token::Word(_)) = prev_token {
chars.next();
return Ok(Some(Token::Period));
}
return self.tokenizer_error(
chars.location(),
"Unexpected character '_'".to_string(),
);
}
// Some dialects support underscore as number separator
// There can only be one at a time and it must be followed by another digit
let is_number_separator = |ch: char, next_char: Option<char>| {
self.dialect.supports_numeric_literal_underscores()
&& ch == '_'
&& next_char.is_some_and(|next_ch| next_ch.is_ascii_hexdigit())
};
let mut s = peeking_next_take_while(chars, |ch, next_ch| {
ch.is_ascii_digit() || is_number_separator(ch, next_ch)
});
let mut s = peeking_take_while(chars, |ch| ch.is_ascii_digit());
// match binary literal that starts with 0x
if s == "0" && chars.peek() == Some(&'x') {
chars.next();
let s2 = peeking_next_take_while(chars, |ch, next_ch| {
ch.is_ascii_hexdigit() || is_number_separator(ch, next_ch)
});
let s2 = peeking_take_while(chars, |ch| ch.is_ascii_hexdigit());
return Ok(Some(Token::HexStringLiteral(s2)));
}
@ -1233,30 +1147,15 @@ impl<'a> Tokenizer<'a> {
s.push('.');
chars.next();
}
s += &peeking_take_while(chars, |ch| ch.is_ascii_digit());
// If the dialect supports identifiers that start with a numeric prefix
// and we have now consumed a dot, check if the previous token was a Word.
// If so, what follows is definitely not part of a decimal number and
// we should yield the dot as a dedicated token so compound identifiers
// starting with digits can be parsed correctly.
if s == "." && self.dialect.supports_numeric_prefix() {
if let Some(Token::Word(_)) = prev_token {
return Ok(Some(Token::Period));
}
}
// Consume fractional digits.
s += &peeking_next_take_while(chars, |ch, next_ch| {
ch.is_ascii_digit() || is_number_separator(ch, next_ch)
});
// No fraction -> Token::Period
// No number -> Token::Period
if s == "." {
return Ok(Some(Token::Period));
}
// Parse exponent as number
let mut exponent_part = String::new();
// Parse exponent as number
if chars.peek() == Some(&'e') || chars.peek() == Some(&'E') {
let mut char_clone = chars.peekable.clone();
exponent_part.push(char_clone.next().unwrap());
@ -1285,23 +1184,14 @@ impl<'a> Tokenizer<'a> {
}
}
// If the dialect supports identifiers that start with a numeric prefix,
// we need to check if the value is in fact an identifier and must thus
// be tokenized as a word.
if self.dialect.supports_numeric_prefix() {
if exponent_part.is_empty() {
// If it is not a number with an exponent, it may be
// an identifier starting with digits.
let word =
peeking_take_while(chars, |ch| self.dialect.is_identifier_part(ch));
// mysql dialect supports identifiers that start with a numeric prefix,
// as long as they aren't an exponent number.
if self.dialect.supports_numeric_prefix() && exponent_part.is_empty() {
let word =
peeking_take_while(chars, |ch| self.dialect.is_identifier_part(ch));
if !word.is_empty() {
s += word.as_str();
return Ok(Some(Token::make_word(s.as_str(), None)));
}
} else if prev_token == Some(&Token::Period) {
// If the previous token was a period, thus not belonging to a number,
// the value we have is part of an identifier.
if !word.is_empty() {
s += word.as_str();
return Ok(Some(Token::make_word(s.as_str(), None)));
}
}
@ -1321,26 +1211,14 @@ impl<'a> Tokenizer<'a> {
// operators
'-' => {
chars.next(); // consume the '-'
match chars.peek() {
Some('-') => {
let mut is_comment = true;
if self.dialect.requires_single_line_comment_whitespace() {
is_comment = Some(' ') == chars.peekable.clone().nth(1);
}
if is_comment {
chars.next(); // consume second '-'
let comment = self.tokenize_single_line_comment(chars);
return Ok(Some(Token::Whitespace(
Whitespace::SingleLineComment {
prefix: "--".to_owned(),
comment,
},
)));
}
self.start_binop(chars, "-", Token::Minus)
chars.next(); // consume the second '-', starting a single-line comment
let comment = self.tokenize_single_line_comment(chars);
Ok(Some(Token::Whitespace(Whitespace::SingleLineComment {
prefix: "--".to_owned(),
comment,
})))
}
Some('>') => {
chars.next();
@ -1400,31 +1278,6 @@ impl<'a> Tokenizer<'a> {
_ => self.start_binop(chars, "||", Token::StringConcat),
}
}
Some('&') if self.dialect.supports_geometric_types() => {
chars.next(); // consume
match chars.peek() {
Some('>') => self.consume_for_binop(
chars,
"|&>",
Token::VerticalBarAmpersandRightAngleBracket,
),
_ => self.start_binop_opt(chars, "|&", None),
}
}
Some('>') if self.dialect.supports_geometric_types() => {
chars.next(); // consume
match chars.peek() {
Some('>') => self.consume_for_binop(
chars,
"|>>",
Token::VerticalBarShiftRight,
),
_ => self.start_binop_opt(chars, "|>", None),
}
}
Some('>') if self.dialect.supports_pipe_operator() => {
self.consume_for_binop(chars, "|>", Token::VerticalBarRightAngleBracket)
}
// Bitshift '|' operator
_ => self.start_binop(chars, "|", Token::Pipe),
}
@ -1473,34 +1326,8 @@ impl<'a> Tokenizer<'a> {
_ => self.start_binop(chars, "<=", Token::LtEq),
}
}
Some('|') if self.dialect.supports_geometric_types() => {
self.consume_for_binop(chars, "<<|", Token::ShiftLeftVerticalBar)
}
Some('>') => self.consume_for_binop(chars, "<>", Token::Neq),
Some('<') if self.dialect.supports_geometric_types() => {
chars.next(); // consume
match chars.peek() {
Some('|') => self.consume_for_binop(
chars,
"<<|",
Token::ShiftLeftVerticalBar,
),
_ => self.start_binop(chars, "<<", Token::ShiftLeft),
}
}
Some('<') => self.consume_for_binop(chars, "<<", Token::ShiftLeft),
Some('-') if self.dialect.supports_geometric_types() => {
chars.next(); // consume
match chars.peek() {
Some('>') => {
self.consume_for_binop(chars, "<->", Token::TwoWayArrow)
}
_ => self.start_binop_opt(chars, "<-", None),
}
}
Some('^') if self.dialect.supports_geometric_types() => {
self.consume_for_binop(chars, "<^", Token::LeftAngleBracketCaret)
}
Some('@') => self.consume_for_binop(chars, "<@", Token::ArrowAt),
_ => self.start_binop(chars, "<", Token::Lt),
}
@ -1510,9 +1337,6 @@ impl<'a> Tokenizer<'a> {
match chars.peek() {
Some('=') => self.consume_for_binop(chars, ">=", Token::GtEq),
Some('>') => self.consume_for_binop(chars, ">>", Token::ShiftRight),
Some('^') if self.dialect.supports_geometric_types() => {
self.consume_for_binop(chars, ">^", Token::RightAngleBracketCaret)
}
_ => self.start_binop(chars, ">", Token::Gt),
}
}
@ -1531,22 +1355,6 @@ impl<'a> Tokenizer<'a> {
'&' => {
chars.next(); // consume the '&'
match chars.peek() {
Some('>') if self.dialect.supports_geometric_types() => {
chars.next();
self.consume_and_return(chars, Token::AmpersandRightAngleBracket)
}
Some('<') if self.dialect.supports_geometric_types() => {
chars.next(); // consume
match chars.peek() {
Some('|') => self.consume_and_return(
chars,
Token::AmpersandLeftAngleBracketVerticalBar,
),
_ => {
self.start_binop(chars, "&<", Token::AmpersandLeftAngleBracket)
}
}
}
Some('&') => {
chars.next(); // consume the second '&'
self.start_binop(chars, "&&", Token::Overlap)
@ -1577,9 +1385,6 @@ impl<'a> Tokenizer<'a> {
chars.next(); // consume
match chars.peek() {
Some('*') => self.consume_for_binop(chars, "~*", Token::TildeAsterisk),
Some('=') if self.dialect.supports_geometric_types() => {
self.consume_for_binop(chars, "~=", Token::TildeEqual)
}
Some('~') => {
chars.next();
match chars.peek() {
@ -1606,9 +1411,6 @@ impl<'a> Tokenizer<'a> {
}
}
Some(' ') => Ok(Some(Token::Sharp)),
Some('#') if self.dialect.supports_geometric_types() => {
self.consume_for_binop(chars, "##", Token::DoubleSharp)
}
Some(sch) if self.dialect.is_identifier_start('#') => {
self.tokenize_identifier_or_keyword([ch, *sch], chars)
}
@ -1618,16 +1420,6 @@ impl<'a> Tokenizer<'a> {
'@' => {
chars.next();
match chars.peek() {
Some('@') if self.dialect.supports_geometric_types() => {
self.consume_and_return(chars, Token::AtAt)
}
Some('-') if self.dialect.supports_geometric_types() => {
chars.next();
match chars.peek() {
Some('@') => self.consume_and_return(chars, Token::AtDashAt),
_ => self.start_binop_opt(chars, "@-", None),
}
}
Some('>') => self.consume_and_return(chars, Token::AtArrow),
Some('?') => self.consume_and_return(chars, Token::AtQuestion),
Some('@') => {
@ -1660,30 +1452,11 @@ impl<'a> Tokenizer<'a> {
}
}
// Postgres uses ? for jsonb operators, not prepared statements
'?' if self.dialect.supports_geometric_types() => {
chars.next(); // consume
'?' if dialect_of!(self is PostgreSqlDialect) => {
chars.next();
match chars.peek() {
Some('|') => {
chars.next();
match chars.peek() {
Some('|') => self.consume_and_return(
chars,
Token::QuestionMarkDoubleVerticalBar,
),
_ => Ok(Some(Token::QuestionPipe)),
}
}
Some('|') => self.consume_and_return(chars, Token::QuestionPipe),
Some('&') => self.consume_and_return(chars, Token::QuestionAnd),
Some('-') => {
chars.next(); // consume
match chars.peek() {
Some('|') => self
.consume_and_return(chars, Token::QuestionMarkDashVerticalBar),
_ => Ok(Some(Token::QuestionMarkDash)),
}
}
Some('#') => self.consume_and_return(chars, Token::QuestionMarkSharp),
_ => self.consume_and_return(chars, Token::Question),
}
}
@ -1717,7 +1490,7 @@ impl<'a> Tokenizer<'a> {
default: Token,
) -> Result<Option<Token>, TokenizerError> {
chars.next(); // consume the first char
self.start_binop_opt(chars, prefix, Some(default))
self.start_binop(chars, prefix, default)
}
/// parse a custom binary operator
@ -1726,16 +1499,6 @@ impl<'a> Tokenizer<'a> {
chars: &mut State,
prefix: &str,
default: Token,
) -> Result<Option<Token>, TokenizerError> {
self.start_binop_opt(chars, prefix, Some(default))
}
/// parse a custom binary operator
fn start_binop_opt(
&self,
chars: &mut State,
prefix: &str,
default: Option<Token>,
) -> Result<Option<Token>, TokenizerError> {
let mut custom = None;
while let Some(&ch) = chars.peek() {
@ -1746,14 +1509,10 @@ impl<'a> Tokenizer<'a> {
custom.get_or_insert_with(|| prefix.to_string()).push(ch);
chars.next();
}
match (custom, default) {
(Some(custom), _) => Ok(Token::CustomBinaryOperator(custom).into()),
(None, Some(tok)) => Ok(Some(tok)),
(None, None) => self.tokenizer_error(
chars.location(),
format!("Expected a valid binary operator after '{prefix}'"),
),
}
Ok(Some(
custom.map(Token::CustomBinaryOperator).unwrap_or(default),
))
}
/// Tokenize dollar preceded value (i.e: a string/placeholder)
@ -1809,7 +1568,7 @@ impl<'a> Tokenizer<'a> {
chars.next();
let mut temp = String::new();
let end_delimiter = format!("${value}$");
let end_delimiter = format!("${}$", value);
loop {
match chars.next() {
@ -2058,13 +1817,8 @@ impl<'a> Tokenizer<'a> {
num_consecutive_quotes = 0;
if let Some(next) = chars.peek() {
if !self.unescape
|| (self.dialect.ignores_wildcard_escapes()
&& (*next == '%' || *next == '_'))
{
// In no-escape mode, the given query has to be saved completely
// including backslashes. Similarly, with ignore_like_wildcard_escapes,
// the backslash is not stripped.
if !self.unescape {
// In no-escape mode, the given query has to be saved completely including backslashes.
s.push(ch);
s.push(*next);
chars.next(); // consume next
@ -2189,24 +1943,6 @@ fn peeking_take_while(chars: &mut State, mut predicate: impl FnMut(char) -> bool
s
}
/// Same as peeking_take_while, but also passes the next character to the predicate.
fn peeking_next_take_while(
chars: &mut State,
mut predicate: impl FnMut(char, Option<char>) -> bool,
) -> String {
let mut s = String::new();
while let Some(&ch) = chars.peek() {
let next_char = chars.peekable.clone().nth(1);
if predicate(ch, next_char) {
chars.next(); // consume
s.push(ch);
} else {
break;
}
}
s
}
fn unescape_single_quoted_string(chars: &mut State<'_>) -> Option<String> {
Unescape::new(chars).unescape()
}
@ -2402,13 +2138,13 @@ fn take_char_from_hex_digits(
location: chars.location(),
})?;
let digit = next_char.to_digit(16).ok_or_else(|| TokenizerError {
message: format!("Invalid hex digit in escaped unicode string: {next_char}"),
message: format!("Invalid hex digit in escaped unicode string: {}", next_char),
location: chars.location(),
})?;
result = result * 16 + digit;
}
char::from_u32(result).ok_or_else(|| TokenizerError {
message: format!("Invalid unicode character: {result:x}"),
message: format!("Invalid unicode character: {:x}", result),
location: chars.location(),
})
}
@ -2419,7 +2155,6 @@ mod tests {
use crate::dialect::{
BigQueryDialect, ClickHouseDialect, HiveDialect, MsSqlDialect, MySqlDialect, SQLiteDialect,
};
use crate::test_utils::all_dialects_where;
use core::fmt::Debug;
#[test]
@ -2488,41 +2223,6 @@ mod tests {
compare(expected, tokens);
}
#[test]
fn tokenize_numeric_literal_underscore() {
let dialect = GenericDialect {};
let sql = String::from("SELECT 10_000");
let mut tokenizer = Tokenizer::new(&dialect, &sql);
let tokens = tokenizer.tokenize().unwrap();
let expected = vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Number("10".to_string(), false),
Token::make_word("_000", None),
];
compare(expected, tokens);
all_dialects_where(|dialect| dialect.supports_numeric_literal_underscores()).tokenizes_to(
"SELECT 10_000, _10_000, 10_00_, 10___0",
vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Number("10_000".to_string(), false),
Token::Comma,
Token::Whitespace(Whitespace::Space),
Token::make_word("_10_000", None), // leading underscore tokenizes as a word (parsed as column identifier)
Token::Comma,
Token::Whitespace(Whitespace::Space),
Token::Number("10_00".to_string(), false),
Token::make_word("_", None), // trailing underscores tokenizes as a word (syntax error in some dialects)
Token::Comma,
Token::Whitespace(Whitespace::Space),
Token::Number("10".to_string(), false),
Token::make_word("___0", None), // multiple underscores tokenizes as a word (syntax error in some dialects)
],
);
}
#[test]
fn tokenize_select_exponent() {
let sql = String::from("SELECT 1e10, 1e-10, 1e+10, 1ea, 1e-10a, 1e-10-10");
@ -3504,7 +3204,7 @@ mod tests {
}
fn check_unescape(s: &str, expected: Option<&str>) {
let s = format!("'{s}'");
let s = format!("'{}'", s);
let mut state = State {
peekable: s.chars().peekable(),
line: 0,
@ -3637,9 +3337,6 @@ mod tests {
(r#"'\\a\\b\'c'"#, r#"\\a\\b\'c"#, r#"\a\b'c"#),
(r#"'\'abcd'"#, r#"\'abcd"#, r#"'abcd"#),
(r#"'''a''b'"#, r#"''a''b"#, r#"'a'b"#),
(r#"'\q'"#, r#"\q"#, r#"q"#),
(r#"'\%\_'"#, r#"\%\_"#, r#"%_"#),
(r#"'\\%\\_'"#, r#"\\%\\_"#, r#"\%\_"#),
] {
let tokens = Tokenizer::new(&dialect, sql)
.with_unescape(false)
@ -3673,16 +3370,6 @@ mod tests {
compare(expected, tokens);
}
// MySQL special case for LIKE escapes
for (sql, expected) in [(r#"'\%'"#, r#"\%"#), (r#"'\_'"#, r#"\_"#)] {
let dialect = MySqlDialect {};
let tokens = Tokenizer::new(&dialect, sql).tokenize().unwrap();
let expected = vec![Token::SingleQuotedString(expected.to_string())];
compare(expected, tokens);
}
}
#[test]
@ -3856,218 +3543,4 @@ mod tests {
];
compare(expected, tokens);
}
#[test]
fn test_national_strings_backslash_escape_not_supported() {
all_dialects_where(|dialect| !dialect.supports_string_literal_backslash_escape())
.tokenizes_to(
"select n'''''\\'",
vec![
Token::make_keyword("select"),
Token::Whitespace(Whitespace::Space),
Token::NationalStringLiteral("''\\".to_string()),
],
);
}
#[test]
fn test_national_strings_backslash_escape_supported() {
all_dialects_where(|dialect| dialect.supports_string_literal_backslash_escape())
.tokenizes_to(
"select n'''''\\''",
vec![
Token::make_keyword("select"),
Token::Whitespace(Whitespace::Space),
Token::NationalStringLiteral("'''".to_string()),
],
);
}
#[test]
fn test_string_escape_constant_not_supported() {
all_dialects_where(|dialect| !dialect.supports_string_escape_constant()).tokenizes_to(
"select e'...'",
vec![
Token::make_keyword("select"),
Token::Whitespace(Whitespace::Space),
Token::make_word("e", None),
Token::SingleQuotedString("...".to_string()),
],
);
all_dialects_where(|dialect| !dialect.supports_string_escape_constant()).tokenizes_to(
"select E'...'",
vec![
Token::make_keyword("select"),
Token::Whitespace(Whitespace::Space),
Token::make_word("E", None),
Token::SingleQuotedString("...".to_string()),
],
);
}
#[test]
fn test_string_escape_constant_supported() {
all_dialects_where(|dialect| dialect.supports_string_escape_constant()).tokenizes_to(
"select e'\\''",
vec![
Token::make_keyword("select"),
Token::Whitespace(Whitespace::Space),
Token::EscapedStringLiteral("'".to_string()),
],
);
all_dialects_where(|dialect| dialect.supports_string_escape_constant()).tokenizes_to(
"select E'\\''",
vec![
Token::make_keyword("select"),
Token::Whitespace(Whitespace::Space),
Token::EscapedStringLiteral("'".to_string()),
],
);
}
#[test]
fn test_whitespace_required_after_single_line_comment() {
all_dialects_where(|dialect| dialect.requires_single_line_comment_whitespace())
.tokenizes_to(
"SELECT --'abc'",
vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Minus,
Token::Minus,
Token::SingleQuotedString("abc".to_string()),
],
);
all_dialects_where(|dialect| dialect.requires_single_line_comment_whitespace())
.tokenizes_to(
"SELECT -- 'abc'",
vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Whitespace(Whitespace::SingleLineComment {
prefix: "--".to_string(),
comment: " 'abc'".to_string(),
}),
],
);
all_dialects_where(|dialect| dialect.requires_single_line_comment_whitespace())
.tokenizes_to(
"SELECT --",
vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Minus,
Token::Minus,
],
);
}
#[test]
fn test_whitespace_not_required_after_single_line_comment() {
all_dialects_where(|dialect| !dialect.requires_single_line_comment_whitespace())
.tokenizes_to(
"SELECT --'abc'",
vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Whitespace(Whitespace::SingleLineComment {
prefix: "--".to_string(),
comment: "'abc'".to_string(),
}),
],
);
all_dialects_where(|dialect| !dialect.requires_single_line_comment_whitespace())
.tokenizes_to(
"SELECT -- 'abc'",
vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Whitespace(Whitespace::SingleLineComment {
prefix: "--".to_string(),
comment: " 'abc'".to_string(),
}),
],
);
all_dialects_where(|dialect| !dialect.requires_single_line_comment_whitespace())
.tokenizes_to(
"SELECT --",
vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Whitespace(Whitespace::SingleLineComment {
prefix: "--".to_string(),
comment: "".to_string(),
}),
],
);
}
#[test]
fn test_tokenize_identifiers_numeric_prefix() {
all_dialects_where(|dialect| dialect.supports_numeric_prefix())
.tokenizes_to("123abc", vec![Token::make_word("123abc", None)]);
all_dialects_where(|dialect| dialect.supports_numeric_prefix())
.tokenizes_to("12e34", vec![Token::Number("12e34".to_string(), false)]);
all_dialects_where(|dialect| dialect.supports_numeric_prefix()).tokenizes_to(
"t.12e34",
vec![
Token::make_word("t", None),
Token::Period,
Token::make_word("12e34", None),
],
);
all_dialects_where(|dialect| dialect.supports_numeric_prefix()).tokenizes_to(
"t.1two3",
vec![
Token::make_word("t", None),
Token::Period,
Token::make_word("1two3", None),
],
);
}
#[test]
fn tokenize_period_underscore() {
let sql = String::from("SELECT table._col");
// a dialect that supports underscores in numeric literals
let dialect = PostgreSqlDialect {};
let tokens = Tokenizer::new(&dialect, &sql).tokenize().unwrap();
let expected = vec![
Token::make_keyword("SELECT"),
Token::Whitespace(Whitespace::Space),
Token::Word(Word {
value: "table".to_string(),
quote_style: None,
keyword: Keyword::TABLE,
}),
Token::Period,
Token::Word(Word {
value: "_col".to_string(),
quote_style: None,
keyword: Keyword::NoKeyword,
}),
];
compare(expected, tokens);
let sql = String::from("SELECT ._123");
if let Ok(tokens) = Tokenizer::new(&dialect, &sql).tokenize() {
panic!("Tokenizer should have failed on {sql}, but it succeeded with {tokens:?}");
}
let sql = String::from("SELECT ._abc");
if let Ok(tokens) = Tokenizer::new(&dialect, &sql).tokenize() {
panic!("Tokenizer should have failed on {sql}, but it succeeded with {tokens:?}");
}
}
}

View file

@ -1,414 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use sqlparser::dialect::GenericDialect;
use sqlparser::parser::Parser;
fn prettify(sql: &str) -> String {
let ast = Parser::parse_sql(&GenericDialect {}, sql).unwrap();
format!("{:#}", ast[0])
}
#[test]
fn test_pretty_print_select() {
assert_eq!(
prettify("SELECT a, b, c FROM my_table WHERE x = 1 AND y = 2"),
r#"
SELECT
a,
b,
c
FROM
my_table
WHERE
x = 1 AND y = 2
"#
.trim()
);
}
#[test]
fn test_pretty_print_join() {
assert_eq!(
prettify("SELECT a FROM table1 JOIN table2 ON table1.id = table2.id"),
r#"
SELECT
a
FROM
table1
JOIN table2 ON table1.id = table2.id
"#
.trim()
);
}
#[test]
fn test_pretty_print_subquery() {
assert_eq!(
prettify("SELECT * FROM (SELECT a, b FROM my_table) AS subquery"),
r#"
SELECT
*
FROM
(
SELECT
a,
b
FROM
my_table
) AS subquery
"#
.trim()
);
}
#[test]
fn test_pretty_print_union() {
assert_eq!(
prettify("SELECT a FROM table1 UNION SELECT b FROM table2"),
r#"
SELECT
a
FROM
table1
UNION
SELECT
b
FROM
table2
"#
.trim()
);
}
#[test]
fn test_pretty_print_group_by() {
assert_eq!(
prettify("SELECT a, COUNT(*) FROM my_table GROUP BY a HAVING COUNT(*) > 1"),
r#"
SELECT
a,
COUNT(*)
FROM
my_table
GROUP BY
a
HAVING
COUNT(*) > 1
"#
.trim()
);
}
#[test]
fn test_pretty_print_cte() {
assert_eq!(
prettify("WITH cte AS (SELECT a, b FROM my_table) SELECT * FROM cte"),
r#"
WITH cte AS (
SELECT
a,
b
FROM
my_table
)
SELECT
*
FROM
cte
"#
.trim()
);
}
#[test]
fn test_pretty_print_case_when() {
assert_eq!(
prettify("SELECT CASE WHEN x > 0 THEN 'positive' WHEN x < 0 THEN 'negative' ELSE 'zero' END FROM my_table"),
r#"
SELECT
CASE
WHEN x > 0 THEN
'positive'
WHEN x < 0 THEN
'negative'
ELSE
'zero'
END
FROM
my_table
"#.trim()
);
}
#[test]
fn test_pretty_print_window_function() {
assert_eq!(
prettify("SELECT id, value, ROW_NUMBER() OVER (PARTITION BY category ORDER BY value DESC) as rank FROM my_table"),
r#"
SELECT
id,
value,
ROW_NUMBER() OVER (
PARTITION BY category
ORDER BY value DESC
) AS rank
FROM
my_table
"#.trim()
);
}
#[test]
fn test_pretty_print_multiline_string() {
assert_eq!(
prettify("SELECT 'multiline\nstring' AS str"),
r#"
SELECT
'multiline
string' AS str
"#
.trim(),
"A literal string with a newline should be kept as is. The contents of the string should not be indented."
);
}
#[test]
fn test_pretty_print_insert_values() {
assert_eq!(
prettify("INSERT INTO my_table (a, b, c) VALUES (1, 2, 3), (4, 5, 6)"),
r#"
INSERT INTO my_table (a, b, c)
VALUES
(1, 2, 3),
(4, 5, 6)
"#
.trim()
);
}
#[test]
fn test_pretty_print_insert_select() {
assert_eq!(
prettify("INSERT INTO my_table (a, b) SELECT x, y FROM source_table RETURNING a AS id"),
r#"
INSERT INTO my_table (a, b)
SELECT
x,
y
FROM
source_table
RETURNING
a AS id
"#
.trim()
);
}
#[test]
fn test_pretty_print_update() {
assert_eq!(
prettify("UPDATE my_table SET a = 1, b = 2 WHERE x > 0 RETURNING id, name"),
r#"
UPDATE my_table
SET
a = 1,
b = 2
WHERE
x > 0
RETURNING
id,
name
"#
.trim()
);
}
#[test]
fn test_pretty_print_delete() {
assert_eq!(
prettify("DELETE FROM my_table WHERE x > 0 RETURNING id, name"),
r#"
DELETE FROM
my_table
WHERE
x > 0
RETURNING
id,
name
"#
.trim()
);
assert_eq!(
prettify("DELETE table1, table2"),
r#"
DELETE
table1,
table2
"#
.trim()
);
}
#[test]
fn test_pretty_print_create_table() {
assert_eq!(
prettify("CREATE TABLE my_table (id INT PRIMARY KEY, name VARCHAR(255) NOT NULL, CONSTRAINT fk_other FOREIGN KEY (id) REFERENCES other_table(id))"),
r#"
CREATE TABLE my_table (
id INT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
CONSTRAINT fk_other FOREIGN KEY (id) REFERENCES other_table(id)
)
"#
.trim()
);
}
#[test]
fn test_pretty_print_create_view() {
assert_eq!(
prettify("CREATE VIEW my_view AS SELECT a, b FROM my_table WHERE x > 0"),
r#"
CREATE VIEW my_view AS
SELECT
a,
b
FROM
my_table
WHERE
x > 0
"#
.trim()
);
}
#[test]
#[ignore = "https://github.com/apache/datafusion-sqlparser-rs/issues/1850"]
fn test_pretty_print_create_function() {
assert_eq!(
prettify("CREATE FUNCTION my_func() RETURNS INT BEGIN SELECT COUNT(*) INTO @count FROM my_table; RETURN @count; END"),
r#"
CREATE FUNCTION my_func() RETURNS INT
BEGIN
SELECT COUNT(*) INTO @count FROM my_table;
RETURN @count;
END
"#
.trim()
);
}
#[test]
#[ignore = "https://github.com/apache/datafusion-sqlparser-rs/issues/1850"]
fn test_pretty_print_json_table() {
assert_eq!(
prettify("SELECT * FROM JSON_TABLE(@json, '$[*]' COLUMNS (id INT PATH '$.id', name VARCHAR(255) PATH '$.name')) AS jt"),
r#"
SELECT
*
FROM
JSON_TABLE(
@json,
'$[*]' COLUMNS (
id INT PATH '$.id',
name VARCHAR(255) PATH '$.name'
)
) AS jt
"#
.trim()
);
}
#[test]
#[ignore = "https://github.com/apache/datafusion-sqlparser-rs/issues/1850"]
fn test_pretty_print_transaction_blocks() {
assert_eq!(
prettify("BEGIN; UPDATE my_table SET x = 1; COMMIT;"),
r#"
BEGIN;
UPDATE my_table SET x = 1;
COMMIT;
"#
.trim()
);
}
#[test]
#[ignore = "https://github.com/apache/datafusion-sqlparser-rs/issues/1850"]
fn test_pretty_print_control_flow() {
assert_eq!(
prettify("IF x > 0 THEN SELECT 'positive'; ELSE SELECT 'negative'; END IF;"),
r#"
IF x > 0 THEN
SELECT 'positive';
ELSE
SELECT 'negative';
END IF;
"#
.trim()
);
}
#[test]
#[ignore = "https://github.com/apache/datafusion-sqlparser-rs/issues/1850"]
fn test_pretty_print_merge() {
assert_eq!(
prettify("MERGE INTO target_table t USING source_table s ON t.id = s.id WHEN MATCHED THEN UPDATE SET t.value = s.value WHEN NOT MATCHED THEN INSERT (id, value) VALUES (s.id, s.value)"),
r#"
MERGE INTO target_table t
USING source_table s ON t.id = s.id
WHEN MATCHED THEN
UPDATE SET t.value = s.value
WHEN NOT MATCHED THEN
INSERT (id, value) VALUES (s.id, s.value)
"#
.trim()
);
}
#[test]
#[ignore = "https://github.com/apache/datafusion-sqlparser-rs/issues/1850"]
fn test_pretty_print_create_index() {
assert_eq!(
prettify("CREATE INDEX idx_name ON my_table (column1, column2)"),
r#"
CREATE INDEX idx_name
ON my_table (column1, column2)
"#
.trim()
);
}
#[test]
#[ignore = "https://github.com/apache/datafusion-sqlparser-rs/issues/1850"]
fn test_pretty_print_explain() {
assert_eq!(
prettify("EXPLAIN ANALYZE SELECT * FROM my_table WHERE x > 0"),
r#"
EXPLAIN ANALYZE
SELECT
*
FROM
my_table
WHERE
x > 0
"#
.trim()
);
}

File diff suppressed because it is too large Load diff

View file

@ -28,7 +28,7 @@ use test_utils::*;
use sqlparser::ast::Expr::{BinaryOp, Identifier};
use sqlparser::ast::SelectItem::UnnamedExpr;
use sqlparser::ast::TableFactor::Table;
use sqlparser::ast::Value::Boolean;
use sqlparser::ast::Value::Number;
use sqlparser::ast::*;
use sqlparser::dialect::ClickHouseDialect;
use sqlparser::dialect::GenericDialect;
@ -55,15 +55,14 @@ fn parse_map_access_expr() {
"indexOf",
[
Expr::Identifier(Ident::new("string_names")),
Expr::value(Value::SingleQuotedString("endpoint".to_string()))
Expr::Value(Value::SingleQuotedString("endpoint".to_string()))
]
),
})],
})],
exclude: None,
into: None,
from: vec![TableWithJoins {
relation: table_from_name(ObjectName::from(vec![Ident::new("foos")])),
relation: table_from_name(ObjectName(vec![Ident::new("foos")])),
joins: vec![],
}],
lateral_views: vec![],
@ -72,7 +71,7 @@ fn parse_map_access_expr() {
left: Box::new(BinaryOp {
left: Box::new(Identifier(Ident::new("id"))),
op: BinaryOperator::Eq,
right: Box::new(Expr::value(Value::SingleQuotedString("test".to_string()))),
right: Box::new(Expr::Value(Value::SingleQuotedString("test".to_string()))),
}),
op: BinaryOperator::And,
right: Box::new(BinaryOp {
@ -83,13 +82,13 @@ fn parse_map_access_expr() {
"indexOf",
[
Expr::Identifier(Ident::new("string_name")),
Expr::value(Value::SingleQuotedString("app".to_string()))
Expr::Value(Value::SingleQuotedString("app".to_string()))
]
),
})],
}),
op: BinaryOperator::NotEq,
right: Box::new(Expr::value(Value::SingleQuotedString("foo".to_string()))),
right: Box::new(Expr::Value(Value::SingleQuotedString("foo".to_string()))),
}),
}),
group_by: GroupByExpr::Expressions(vec![], vec![]),
@ -102,7 +101,6 @@ fn parse_map_access_expr() {
qualify: None,
value_table_mode: None,
connect_by: None,
flavor: SelectFlavor::Standard,
},
select
);
@ -115,8 +113,8 @@ fn parse_array_expr() {
assert_eq!(
&Expr::Array(Array {
elem: vec![
Expr::value(Value::SingleQuotedString("1".to_string())),
Expr::value(Value::SingleQuotedString("2".to_string())),
Expr::Value(Value::SingleQuotedString("1".to_string())),
Expr::Value(Value::SingleQuotedString("2".to_string())),
],
named: false,
}),
@ -168,10 +166,7 @@ fn parse_delimited_identifiers() {
version,
..
} => {
assert_eq!(
ObjectName::from(vec![Ident::with_quote('"', "a table")]),
name
);
assert_eq!(vec![Ident::with_quote('"', "a table")], name.0);
assert_eq!(Ident::with_quote('"', "alias"), alias.unwrap().name);
assert!(args.is_none());
assert!(with_hints.is_empty());
@ -190,7 +185,7 @@ fn parse_delimited_identifiers() {
);
assert_eq!(
&Expr::Function(Function {
name: ObjectName::from(vec![Ident::with_quote('"', "myfun")]),
name: ObjectName(vec![Ident::with_quote('"', "myfun")]),
uses_odbc_syntax: false,
parameters: FunctionArguments::None,
args: FunctionArguments::List(FunctionArgumentList {
@ -220,14 +215,10 @@ fn parse_delimited_identifiers() {
#[test]
fn parse_create_table() {
clickhouse().verified_stmt(r#"CREATE TABLE "x" ("a" "int") ENGINE = MergeTree ORDER BY ("x")"#);
clickhouse().verified_stmt(r#"CREATE TABLE "x" ("a" "int") ENGINE = MergeTree ORDER BY "x""#);
clickhouse().verified_stmt(r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY ("x")"#);
clickhouse().verified_stmt(r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY "x""#);
clickhouse().verified_stmt(
r#"CREATE TABLE "x" ("a" "int") ENGINE = MergeTree ORDER BY "x" AS SELECT * FROM "t" WHERE true"#,
);
clickhouse().one_statement_parses_to(
"CREATE TABLE x (a int) ENGINE = MergeTree() ORDER BY a",
"CREATE TABLE x (a INT) ENGINE = MergeTree ORDER BY a",
r#"CREATE TABLE "x" ("a" "int") ENGINE=MergeTree ORDER BY "x" AS SELECT * FROM "t" WHERE true"#,
);
}
@ -311,7 +302,7 @@ fn parse_alter_table_add_projection() {
Statement::AlterTable {
name, operations, ..
} => {
assert_eq!(name, ObjectName::from(vec!["t0".into()]));
assert_eq!(name, ObjectName(vec!["t0".into()]));
assert_eq!(1, operations.len());
assert_eq!(
operations[0],
@ -328,14 +319,12 @@ fn parse_alter_table_add_projection() {
vec![]
)),
order_by: Some(OrderBy {
kind: OrderByKind::Expressions(vec![OrderByExpr {
exprs: vec![OrderByExpr {
expr: Identifier(Ident::new("b")),
options: OrderByOptions {
asc: None,
nulls_first: None,
},
asc: None,
nulls_first: None,
with_fill: None,
}]),
}],
interpolate: None,
}),
}
@ -383,7 +372,7 @@ fn parse_alter_table_drop_projection() {
Statement::AlterTable {
name, operations, ..
} => {
assert_eq!(name, ObjectName::from(vec!["t0".into()]));
assert_eq!(name, ObjectName(vec!["t0".into()]));
assert_eq!(1, operations.len());
assert_eq!(
operations[0],
@ -416,7 +405,7 @@ fn parse_alter_table_clear_and_materialize_projection() {
Statement::AlterTable {
name, operations, ..
} => {
assert_eq!(name, ObjectName::from(vec!["t0".into()]));
assert_eq!(name, ObjectName(vec!["t0".into()]));
assert_eq!(1, operations.len());
assert_eq!(
operations[0],
@ -535,6 +524,7 @@ fn column_def(name: Ident, data_type: DataType) -> ColumnDef {
ColumnDef {
name,
data_type,
collation: None,
options: vec![],
}
}
@ -559,7 +549,7 @@ fn parse_clickhouse_data_types() {
match clickhouse_and_generic().one_statement_parses_to(sql, &canonical_sql) {
Statement::CreateTable(CreateTable { name, columns, .. }) => {
assert_eq!(name, ObjectName::from(vec!["table".into()]));
assert_eq!(name, ObjectName(vec!["table".into()]));
assert_eq!(
columns,
vec![
@ -594,13 +584,13 @@ fn parse_clickhouse_data_types() {
#[test]
fn parse_create_table_with_nullable() {
let sql = r#"CREATE TABLE table (k UInt8, `a` Nullable(String), `b` Nullable(DateTime64(9, 'UTC')), c Nullable(DateTime64(9)), d Date32 NULL) ENGINE = MergeTree ORDER BY (`k`)"#;
let sql = r#"CREATE TABLE table (k UInt8, `a` Nullable(String), `b` Nullable(DateTime64(9, 'UTC')), c Nullable(DateTime64(9)), d Date32 NULL) ENGINE=MergeTree ORDER BY (`k`)"#;
// ClickHouse has a case-sensitive definition of data type, but canonical representation is not
let canonical_sql = sql.replace("String", "STRING");
match clickhouse_and_generic().one_statement_parses_to(sql, &canonical_sql) {
Statement::CreateTable(CreateTable { name, columns, .. }) => {
assert_eq!(name, ObjectName::from(vec!["table".into()]));
assert_eq!(name, ObjectName(vec!["table".into()]));
assert_eq!(
columns,
vec![
@ -623,6 +613,7 @@ fn parse_create_table_with_nullable() {
ColumnDef {
name: "d".into(),
data_type: DataType::Date32,
collation: None,
options: vec![ColumnOptionDef {
name: None,
option: ColumnOption::Null
@ -648,7 +639,7 @@ fn parse_create_table_with_nested_data_types() {
match clickhouse().one_statement_parses_to(sql, "") {
Statement::CreateTable(CreateTable { name, columns, .. }) => {
assert_eq!(name, ObjectName::from(vec!["table".into()]));
assert_eq!(name, ObjectName(vec!["table".into()]));
assert_eq!(
columns,
vec![
@ -666,6 +657,7 @@ fn parse_create_table_with_nested_data_types() {
DataType::LowCardinality(Box::new(DataType::String(None)))
)
]),
collation: None,
options: vec![],
},
ColumnDef {
@ -674,16 +666,15 @@ fn parse_create_table_with_nested_data_types() {
DataType::Tuple(vec![
StructField {
field_name: None,
field_type: DataType::FixedString(128),
options: None,
field_type: DataType::FixedString(128)
},
StructField {
field_name: None,
field_type: DataType::Int128,
options: None,
field_type: DataType::Int128
}
])
))),
collation: None,
options: vec![],
},
ColumnDef {
@ -692,16 +683,15 @@ fn parse_create_table_with_nested_data_types() {
StructField {
field_name: Some("a".into()),
field_type: DataType::Datetime64(9, None),
options: None,
},
StructField {
field_name: Some("b".into()),
field_type: DataType::Array(ArrayElemTypeDef::Parenthesis(
Box::new(DataType::Uuid)
)),
options: None,
))
},
]),
collation: None,
options: vec![],
},
ColumnDef {
@ -710,6 +700,7 @@ fn parse_create_table_with_nested_data_types() {
Box::new(DataType::String(None)),
Box::new(DataType::UInt16)
),
collation: None,
options: vec![],
},
]
@ -723,14 +714,14 @@ fn parse_create_table_with_nested_data_types() {
fn parse_create_table_with_primary_key() {
match clickhouse_and_generic().verified_stmt(concat!(
r#"CREATE TABLE db.table (`i` INT, `k` INT)"#,
" ENGINE = SharedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')",
" ENGINE=SharedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')",
" PRIMARY KEY tuple(i)",
" ORDER BY tuple(i)",
)) {
Statement::CreateTable(CreateTable {
name,
columns,
table_options,
engine,
primary_key,
order_by,
..
@ -741,35 +732,30 @@ fn parse_create_table_with_primary_key() {
ColumnDef {
name: Ident::with_quote('`', "i"),
data_type: DataType::Int(None),
collation: None,
options: vec![],
},
ColumnDef {
name: Ident::with_quote('`', "k"),
data_type: DataType::Int(None),
collation: None,
options: vec![],
},
],
columns
);
let plain_options = match table_options {
CreateTableOptions::Plain(options) => options,
_ => unreachable!(),
};
assert!(plain_options.contains(&SqlOption::NamedParenthesizedList(
NamedParenthesizedList {
key: Ident::new("ENGINE"),
name: Some(Ident::new("SharedMergeTree")),
values: vec![
assert_eq!(
engine,
Some(TableEngine {
name: "SharedMergeTree".to_string(),
parameters: Some(vec![
Ident::with_quote('\'', "/clickhouse/tables/{uuid}/{shard}"),
Ident::with_quote('\'', "{replica}"),
]
}
)));
]),
})
);
fn assert_function(actual: &Function, name: &str, arg: &str) -> bool {
assert_eq!(actual.name, ObjectName::from(vec![Ident::new(name)]));
assert_eq!(actual.name, ObjectName(vec![Ident::new(name)]));
assert_eq!(
actual.args,
FunctionArguments::List(FunctionArgumentList {
@ -814,7 +800,7 @@ fn parse_create_table_with_variant_default_expressions() {
" b DATETIME EPHEMERAL now(),",
" c DATETIME EPHEMERAL,",
" d STRING ALIAS toString(c)",
") ENGINE = MergeTree"
") ENGINE=MergeTree"
);
match clickhouse_and_generic().verified_stmt(sql) {
Statement::CreateTable(CreateTable { columns, .. }) => {
@ -824,10 +810,11 @@ fn parse_create_table_with_variant_default_expressions() {
ColumnDef {
name: Ident::new("a"),
data_type: DataType::Datetime(None),
collation: None,
options: vec![ColumnOptionDef {
name: None,
option: ColumnOption::Materialized(Expr::Function(Function {
name: ObjectName::from(vec![Ident::new("now")]),
name: ObjectName(vec![Ident::new("now")]),
uses_odbc_syntax: false,
args: FunctionArguments::List(FunctionArgumentList {
args: vec![],
@ -845,10 +832,11 @@ fn parse_create_table_with_variant_default_expressions() {
ColumnDef {
name: Ident::new("b"),
data_type: DataType::Datetime(None),
collation: None,
options: vec![ColumnOptionDef {
name: None,
option: ColumnOption::Ephemeral(Some(Expr::Function(Function {
name: ObjectName::from(vec![Ident::new("now")]),
name: ObjectName(vec![Ident::new("now")]),
uses_odbc_syntax: false,
args: FunctionArguments::List(FunctionArgumentList {
args: vec![],
@ -866,6 +854,7 @@ fn parse_create_table_with_variant_default_expressions() {
ColumnDef {
name: Ident::new("c"),
data_type: DataType::Datetime(None),
collation: None,
options: vec![ColumnOptionDef {
name: None,
option: ColumnOption::Ephemeral(None)
@ -874,10 +863,11 @@ fn parse_create_table_with_variant_default_expressions() {
ColumnDef {
name: Ident::new("d"),
data_type: DataType::String(None),
collation: None,
options: vec![ColumnOptionDef {
name: None,
option: ColumnOption::Alias(Expr::Function(Function {
name: ObjectName::from(vec![Ident::new("toString")]),
name: ObjectName(vec![Ident::new("toString")]),
uses_odbc_syntax: false,
args: FunctionArguments::List(FunctionArgumentList {
args: vec![FunctionArg::Unnamed(FunctionArgExpr::Expr(
@ -905,33 +895,33 @@ fn parse_create_table_with_variant_default_expressions() {
fn parse_create_view_with_fields_data_types() {
match clickhouse().verified_stmt(r#"CREATE VIEW v (i "int", f "String") AS SELECT * FROM t"#) {
Statement::CreateView { name, columns, .. } => {
assert_eq!(name, ObjectName::from(vec!["v".into()]));
assert_eq!(name, ObjectName(vec!["v".into()]));
assert_eq!(
columns,
vec![
ViewColumnDef {
name: "i".into(),
data_type: Some(DataType::Custom(
ObjectName::from(vec![Ident {
ObjectName(vec![Ident {
value: "int".into(),
quote_style: Some('"'),
span: Span::empty(),
}]),
vec![]
)),
options: None,
options: None
},
ViewColumnDef {
name: "f".into(),
data_type: Some(DataType::Custom(
ObjectName::from(vec![Ident {
ObjectName(vec![Ident {
value: "String".into(),
quote_style: Some('"'),
span: Span::empty(),
}]),
vec![]
)),
options: None,
options: None
},
]
);
@ -960,113 +950,42 @@ fn parse_limit_by() {
clickhouse_and_generic().verified_stmt(
r#"SELECT * FROM default.last_asset_runs_mv ORDER BY created_at DESC LIMIT 1 BY asset, toStartOfDay(created_at)"#,
);
clickhouse_and_generic().parse_sql_statements(
r#"SELECT * FROM default.last_asset_runs_mv ORDER BY created_at DESC BY asset, toStartOfDay(created_at)"#,
).expect_err("BY without LIMIT");
clickhouse_and_generic()
.parse_sql_statements("SELECT * FROM T OFFSET 5 BY foo")
.expect_err("BY with OFFSET but without LIMIT");
}
#[test]
fn parse_settings_in_query() {
fn check_settings(sql: &str, expected: Vec<Setting>) {
match clickhouse_and_generic().verified_stmt(sql) {
Statement::Query(q) => {
assert_eq!(q.settings, Some(expected));
}
_ => unreachable!(),
match clickhouse_and_generic()
.verified_stmt(r#"SELECT * FROM t SETTINGS max_threads = 1, max_block_size = 10000"#)
{
Statement::Query(query) => {
assert_eq!(
query.settings,
Some(vec![
Setting {
key: Ident::new("max_threads"),
value: Number("1".parse().unwrap(), false)
},
Setting {
key: Ident::new("max_block_size"),
value: Number("10000".parse().unwrap(), false)
},
])
);
}
}
for (sql, expected_settings) in [
(
r#"SELECT * FROM t SETTINGS max_threads = 1, max_block_size = 10000"#,
vec![
Setting {
key: Ident::new("max_threads"),
value: Expr::value(number("1")),
},
Setting {
key: Ident::new("max_block_size"),
value: Expr::value(number("10000")),
},
],
),
(
r#"SELECT * FROM t SETTINGS additional_table_filters = {'table_1': 'x != 2'}"#,
vec![Setting {
key: Ident::new("additional_table_filters"),
value: Expr::Dictionary(vec![DictionaryField {
key: Ident::with_quote('\'', "table_1"),
value: Expr::value(single_quoted_string("x != 2")).into(),
}]),
}],
),
(
r#"SELECT * FROM t SETTINGS additional_result_filter = 'x != 2', query_plan_optimize_lazy_materialization = false"#,
vec![
Setting {
key: Ident::new("additional_result_filter"),
value: Expr::value(single_quoted_string("x != 2")),
},
Setting {
key: Ident::new("query_plan_optimize_lazy_materialization"),
value: Expr::value(Boolean(false)),
},
],
),
] {
check_settings(sql, expected_settings);
_ => unreachable!(),
}
let invalid_cases = vec![
("SELECT * FROM t SETTINGS a", "Expected: =, found: EOF"),
(
"SELECT * FROM t SETTINGS a=",
"Expected: an expression, found: EOF",
),
("SELECT * FROM t SETTINGS a=1, b", "Expected: =, found: EOF"),
(
"SELECT * FROM t SETTINGS a=1, b=",
"Expected: an expression, found: EOF",
),
(
"SELECT * FROM t SETTINGS a = {",
"Expected: identifier, found: EOF",
),
(
"SELECT * FROM t SETTINGS a = {'b'",
"Expected: :, found: EOF",
),
(
"SELECT * FROM t SETTINGS a = {'b': ",
"Expected: an expression, found: EOF",
),
(
"SELECT * FROM t SETTINGS a = {'b': 'c',}",
"Expected: identifier, found: }",
),
(
"SELECT * FROM t SETTINGS a = {'b': 'c', 'd'}",
"Expected: :, found: }",
),
(
"SELECT * FROM t SETTINGS a = {'b': 'c', 'd': }",
"Expected: an expression, found: }",
),
(
"SELECT * FROM t SETTINGS a = {ANY(b)}",
"Expected: :, found: (",
),
"SELECT * FROM t SETTINGS a",
"SELECT * FROM t SETTINGS a=",
"SELECT * FROM t SETTINGS a=1, b",
"SELECT * FROM t SETTINGS a=1, b=",
"SELECT * FROM t SETTINGS a=1, b=c",
];
for (sql, error_msg) in invalid_cases {
assert_eq!(
clickhouse_and_generic()
.parse_sql_statements(sql)
.unwrap_err(),
ParserError(error_msg.to_string())
);
for sql in invalid_cases {
clickhouse_and_generic()
.parse_sql_statements(sql)
.expect_err("Expected: SETTINGS key = value, found: ");
}
}
#[test]
@ -1103,15 +1022,17 @@ fn parse_select_parametric_function() {
assert_eq!(parameters.args.len(), 2);
assert_eq!(
parameters.args[0],
FunctionArg::Unnamed(FunctionArgExpr::Expr(Expr::Value(
(Value::Number("0.5".parse().unwrap(), false)).with_empty_span()
)))
FunctionArg::Unnamed(FunctionArgExpr::Expr(Expr::Value(Value::Number(
"0.5".parse().unwrap(),
false
))))
);
assert_eq!(
parameters.args[1],
FunctionArg::Unnamed(FunctionArgExpr::Expr(Expr::Value(
(Value::Number("0.6".parse().unwrap(), false)).with_empty_span()
)))
FunctionArg::Unnamed(FunctionArgExpr::Expr(Expr::Value(Value::Number(
"0.6".parse().unwrap(),
false
))))
);
}
_ => unreachable!(),
@ -1144,6 +1065,61 @@ fn parse_create_materialized_view() {
clickhouse_and_generic().verified_stmt(sql);
}
#[test]
fn parse_group_by_with_modifier() {
let clauses = ["x", "a, b", "ALL"];
let modifiers = [
"WITH ROLLUP",
"WITH CUBE",
"WITH TOTALS",
"WITH ROLLUP WITH CUBE",
];
let expected_modifiers = [
vec![GroupByWithModifier::Rollup],
vec![GroupByWithModifier::Cube],
vec![GroupByWithModifier::Totals],
vec![GroupByWithModifier::Rollup, GroupByWithModifier::Cube],
];
for clause in &clauses {
for (modifier, expected_modifier) in modifiers.iter().zip(expected_modifiers.iter()) {
let sql = format!("SELECT * FROM t GROUP BY {clause} {modifier}");
match clickhouse_and_generic().verified_stmt(&sql) {
Statement::Query(query) => {
let group_by = &query.body.as_select().unwrap().group_by;
if clause == &"ALL" {
assert_eq!(group_by, &GroupByExpr::All(expected_modifier.to_vec()));
} else {
assert_eq!(
group_by,
&GroupByExpr::Expressions(
clause
.split(", ")
.map(|c| Identifier(Ident::new(c)))
.collect(),
expected_modifier.to_vec()
)
);
}
}
_ => unreachable!(),
}
}
}
// invalid cases
let invalid_cases = [
"SELECT * FROM t GROUP BY x WITH",
"SELECT * FROM t GROUP BY x WITH ROLLUP CUBE",
"SELECT * FROM t GROUP BY x WITH WITH ROLLUP",
"SELECT * FROM t GROUP BY WITH ROLLUP",
];
for sql in invalid_cases {
clickhouse_and_generic()
.parse_sql_statements(sql)
.expect_err("Expected: one of ROLLUP or CUBE or TOTALS, found: WITH");
}
}
#[test]
fn parse_select_order_by_with_fill_interpolate() {
let sql = "SELECT id, fname, lname FROM customer WHERE id < 5 \
@ -1155,53 +1131,42 @@ fn parse_select_order_by_with_fill_interpolate() {
let select = clickhouse().verified_query(sql);
assert_eq!(
OrderBy {
kind: OrderByKind::Expressions(vec![
exprs: vec![
OrderByExpr {
expr: Expr::Identifier(Ident::new("fname")),
options: OrderByOptions {
asc: Some(true),
nulls_first: Some(true),
},
asc: Some(true),
nulls_first: Some(true),
with_fill: Some(WithFill {
from: Some(Expr::value(number("10"))),
to: Some(Expr::value(number("20"))),
step: Some(Expr::value(number("2"))),
from: Some(Expr::Value(number("10"))),
to: Some(Expr::Value(number("20"))),
step: Some(Expr::Value(number("2"))),
}),
},
OrderByExpr {
expr: Expr::Identifier(Ident::new("lname")),
options: OrderByOptions {
asc: Some(false),
nulls_first: Some(false),
},
asc: Some(false),
nulls_first: Some(false),
with_fill: Some(WithFill {
from: Some(Expr::value(number("30"))),
to: Some(Expr::value(number("40"))),
step: Some(Expr::value(number("3"))),
from: Some(Expr::Value(number("30"))),
to: Some(Expr::Value(number("40"))),
step: Some(Expr::Value(number("3"))),
}),
},
]),
],
interpolate: Some(Interpolate {
exprs: Some(vec![InterpolateExpr {
column: Ident::new("col1"),
expr: Some(Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new("col1"))),
op: BinaryOperator::Plus,
right: Box::new(Expr::value(number("1"))),
right: Box::new(Expr::Value(number("1"))),
}),
}])
})
},
select.order_by.expect("ORDER BY expected")
);
assert_eq!(
select.limit_clause,
Some(LimitClause::LimitOffset {
limit: Some(Expr::value(number("2"))),
offset: None,
limit_by: vec![]
})
);
assert_eq!(Some(Expr::Value(number("2"))), select.limit);
}
#[test]
@ -1242,15 +1207,11 @@ fn parse_with_fill() {
let select = clickhouse().verified_query(sql);
assert_eq!(
Some(WithFill {
from: Some(Expr::value(number("10"))),
to: Some(Expr::value(number("20"))),
step: Some(Expr::value(number("2"))),
})
.as_ref(),
match select.order_by.expect("ORDER BY expected").kind {
OrderByKind::Expressions(ref exprs) => exprs[0].with_fill.as_ref(),
_ => None,
}
from: Some(Expr::Value(number("10"))),
to: Some(Expr::Value(number("20"))),
step: Some(Expr::Value(number("2"))),
}),
select.order_by.expect("ORDER BY expected").exprs[0].with_fill
);
}
@ -1285,7 +1246,7 @@ fn parse_interpolate_body_with_columns() {
expr: Some(Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new("col1"))),
op: BinaryOperator::Plus,
right: Box::new(Expr::value(number("1"))),
right: Box::new(Expr::Value(number("1"))),
}),
},
InterpolateExpr {
@ -1297,17 +1258,12 @@ fn parse_interpolate_body_with_columns() {
expr: Some(Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new("col4"))),
op: BinaryOperator::Plus,
right: Box::new(Expr::value(number("4"))),
right: Box::new(Expr::Value(number("4"))),
}),
},
])
})
.as_ref(),
select
.order_by
.expect("ORDER BY expected")
.interpolate
.as_ref()
}),
select.order_by.expect("ORDER BY expected").interpolate
);
}
@ -1316,12 +1272,8 @@ fn parse_interpolate_without_body() {
let sql = "SELECT fname FROM customer ORDER BY fname WITH FILL INTERPOLATE";
let select = clickhouse().verified_query(sql);
assert_eq!(
Some(Interpolate { exprs: None }).as_ref(),
select
.order_by
.expect("ORDER BY expected")
.interpolate
.as_ref()
Some(Interpolate { exprs: None }),
select.order_by.expect("ORDER BY expected").interpolate
);
}
@ -1332,13 +1284,8 @@ fn parse_interpolate_with_empty_body() {
assert_eq!(
Some(Interpolate {
exprs: Some(vec![])
})
.as_ref(),
select
.order_by
.expect("ORDER BY expected")
.interpolate
.as_ref()
}),
select.order_by.expect("ORDER BY expected").interpolate
);
}
@ -1352,9 +1299,7 @@ fn test_prewhere() {
Some(&BinaryOp {
left: Box::new(Identifier(Ident::new("x"))),
op: BinaryOperator::Eq,
right: Box::new(Expr::Value(
(Value::Number("1".parse().unwrap(), false)).with_empty_span()
)),
right: Box::new(Expr::Value(Value::Number("1".parse().unwrap(), false))),
})
);
let selection = query.as_ref().body.as_select().unwrap().selection.as_ref();
@ -1363,9 +1308,7 @@ fn test_prewhere() {
Some(&BinaryOp {
left: Box::new(Identifier(Ident::new("y"))),
op: BinaryOperator::Eq,
right: Box::new(Expr::Value(
(Value::Number("2".parse().unwrap(), false)).with_empty_span()
)),
right: Box::new(Expr::Value(Value::Number("2".parse().unwrap(), false))),
})
);
}
@ -1381,17 +1324,13 @@ fn test_prewhere() {
left: Box::new(BinaryOp {
left: Box::new(Identifier(Ident::new("x"))),
op: BinaryOperator::Eq,
right: Box::new(Expr::Value(
(Value::Number("1".parse().unwrap(), false)).with_empty_span()
)),
right: Box::new(Expr::Value(Value::Number("1".parse().unwrap(), false))),
}),
op: BinaryOperator::And,
right: Box::new(BinaryOp {
left: Box::new(Identifier(Ident::new("y"))),
op: BinaryOperator::Eq,
right: Box::new(Expr::Value(
(Value::Number("2".parse().unwrap(), false)).with_empty_span()
)),
right: Box::new(Expr::Value(Value::Number("2".parse().unwrap(), false))),
}),
})
);
@ -1415,16 +1354,16 @@ fn parse_use() {
for object_name in &valid_object_names {
// Test single identifier without quotes
assert_eq!(
clickhouse().verified_stmt(&format!("USE {object_name}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::new(
clickhouse().verified_stmt(&format!("USE {}", object_name)),
Statement::Use(Use::Object(ObjectName(vec![Ident::new(
object_name.to_string()
)])))
);
for &quote in &quote_styles {
// Test single identifier with different type of quotes
assert_eq!(
clickhouse().verified_stmt(&format!("USE {quote}{object_name}{quote}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::with_quote(
clickhouse().verified_stmt(&format!("USE {0}{1}{0}", quote, object_name)),
Statement::Use(Use::Object(ObjectName(vec![Ident::with_quote(
quote,
object_name.to_string(),
)])))
@ -1437,7 +1376,7 @@ fn parse_use() {
fn test_query_with_format_clause() {
let format_options = vec!["TabSeparated", "JSONCompact", "NULL"];
for format in &format_options {
let sql = format!("SELECT * FROM t FORMAT {format}");
let sql = format!("SELECT * FROM t FORMAT {}", format);
match clickhouse_and_generic().verified_stmt(&sql) {
Statement::Query(query) => {
if *format == "NULL" {
@ -1499,9 +1438,10 @@ fn parse_create_table_on_commit_and_as_query() {
assert_eq!(on_commit, Some(OnCommit::PreserveRows));
assert_eq!(
query.unwrap().body.as_select().unwrap().projection,
vec![UnnamedExpr(Expr::Value(
(Value::Number("1".parse().unwrap(), false)).with_empty_span()
))]
vec![UnnamedExpr(Expr::Value(Value::Number(
"1".parse().unwrap(),
false
)))]
);
}
_ => unreachable!(),
@ -1514,9 +1454,9 @@ fn parse_freeze_and_unfreeze_partition() {
for operation_name in &["FREEZE", "UNFREEZE"] {
let sql = format!("ALTER TABLE t {operation_name} PARTITION '2024-08-14'");
let expected_partition = Partition::Expr(Expr::Value(
Value::SingleQuotedString("2024-08-14".to_string()).with_empty_span(),
));
let expected_partition = Partition::Expr(Expr::Value(Value::SingleQuotedString(
"2024-08-14".to_string(),
)));
match clickhouse_and_generic().verified_stmt(&sql) {
Statement::AlterTable { operations, .. } => {
assert_eq!(operations.len(), 1);
@ -1544,9 +1484,9 @@ fn parse_freeze_and_unfreeze_partition() {
match clickhouse_and_generic().verified_stmt(&sql) {
Statement::AlterTable { operations, .. } => {
assert_eq!(operations.len(), 1);
let expected_partition = Partition::Expr(Expr::Value(
Value::SingleQuotedString("2024-08-14".to_string()).with_empty_span(),
));
let expected_partition = Partition::Expr(Expr::Value(Value::SingleQuotedString(
"2024-08-14".to_string(),
)));
let expected_operation = if operation_name == &"FREEZE" {
AlterTableOperation::FreezePartition {
partition: expected_partition,
@ -1620,11 +1560,11 @@ fn parse_select_table_function_settings() {
settings: Some(vec![
Setting {
key: "s0".into(),
value: Expr::value(number("3")),
value: Value::Number("3".parse().unwrap(), false),
},
Setting {
key: "s1".into(),
value: Expr::value(single_quoted_string("s")),
value: Value::SingleQuotedString("s".into()),
},
]),
},
@ -1645,11 +1585,11 @@ fn parse_select_table_function_settings() {
settings: Some(vec![
Setting {
key: "s0".into(),
value: Expr::value(number("3")),
value: Value::Number("3".parse().unwrap(), false),
},
Setting {
key: "s1".into(),
value: Expr::value(single_quoted_string("s")),
value: Value::SingleQuotedString("s".into()),
},
]),
},
@ -1659,6 +1599,7 @@ fn parse_select_table_function_settings() {
"SELECT * FROM t(SETTINGS a=)",
"SELECT * FROM t(SETTINGS a=1, b)",
"SELECT * FROM t(SETTINGS a=1, b=)",
"SELECT * FROM t(SETTINGS a=1, b=c)",
];
for sql in invalid_cases {
clickhouse_and_generic()

File diff suppressed because it is too large Load diff

View file

@ -41,7 +41,7 @@ fn custom_prefix_parser() -> Result<(), ParserError> {
fn parse_prefix(&self, parser: &mut Parser) -> Option<Result<Expr, ParserError>> {
if parser.consume_token(&Token::Number("1".to_string(), false)) {
Some(Ok(Expr::Value(Value::Null.with_empty_span())))
Some(Ok(Expr::Value(Value::Null)))
} else {
None
}

View file

@ -15,11 +15,9 @@
// specific language governing permissions and limitations
// under the License.
use sqlparser::ast::helpers::attached_token::AttachedToken;
use sqlparser::ast::*;
use sqlparser::dialect::{DatabricksDialect, GenericDialect};
use sqlparser::parser::ParserError;
use sqlparser::tokenizer::Span;
use test_utils::*;
#[macro_use]
@ -49,9 +47,7 @@ fn test_databricks_identifiers() {
databricks()
.verified_only_select(r#"SELECT "Ä""#)
.projection[0],
SelectItem::UnnamedExpr(Expr::Value(
(Value::DoubleQuotedString("Ä".to_owned())).with_empty_span()
))
SelectItem::UnnamedExpr(Expr::Value(Value::DoubleQuotedString("Ä".to_owned())))
);
}
@ -66,9 +62,9 @@ fn test_databricks_exists() {
call(
"array",
[
Expr::value(number("1")),
Expr::value(number("2")),
Expr::value(number("3"))
Expr::Value(number("1")),
Expr::Value(number("2")),
Expr::Value(number("3"))
]
),
Expr::Lambda(LambdaFunction {
@ -103,44 +99,40 @@ fn test_databricks_lambdas() {
call(
"array",
[
Expr::value(Value::SingleQuotedString("Hello".to_owned())),
Expr::value(Value::SingleQuotedString("World".to_owned()))
Expr::Value(Value::SingleQuotedString("Hello".to_owned())),
Expr::Value(Value::SingleQuotedString("World".to_owned()))
]
),
Expr::Lambda(LambdaFunction {
params: OneOrManyWithParens::Many(vec![Ident::new("p1"), Ident::new("p2")]),
body: Box::new(Expr::Case {
case_token: AttachedToken::empty(),
end_token: AttachedToken::empty(),
operand: None,
conditions: vec![
CaseWhen {
condition: Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new("p1"))),
op: BinaryOperator::Eq,
right: Box::new(Expr::Identifier(Ident::new("p2")))
},
result: Expr::value(number("0"))
},
CaseWhen {
condition: Expr::BinaryOp {
left: Box::new(call(
"reverse",
[Expr::Identifier(Ident::new("p1"))]
)),
op: BinaryOperator::Lt,
right: Box::new(call(
"reverse",
[Expr::Identifier(Ident::new("p2"))]
)),
},
result: Expr::UnaryOp {
op: UnaryOperator::Minus,
expr: Box::new(Expr::value(number("1")))
}
Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new("p1"))),
op: BinaryOperator::Eq,
right: Box::new(Expr::Identifier(Ident::new("p2")))
},
Expr::BinaryOp {
left: Box::new(call(
"reverse",
[Expr::Identifier(Ident::new("p1"))]
)),
op: BinaryOperator::Lt,
right: Box::new(call(
"reverse",
[Expr::Identifier(Ident::new("p2"))]
))
}
],
else_result: Some(Box::new(Expr::value(number("1"))))
results: vec![
Expr::Value(number("0")),
Expr::UnaryOp {
op: UnaryOperator::Minus,
expr: Box::new(Expr::Value(number("1")))
}
],
else_result: Some(Box::new(Expr::Value(number("1"))))
})
})
]
@ -160,12 +152,12 @@ fn test_values_clause() {
explicit_row: false,
rows: vec![
vec![
Expr::Value((Value::DoubleQuotedString("one".to_owned())).with_empty_span()),
Expr::value(number("1")),
Expr::Value(Value::DoubleQuotedString("one".to_owned())),
Expr::Value(number("1")),
],
vec![
Expr::Value((Value::SingleQuotedString("two".to_owned())).with_empty_span()),
Expr::value(number("2")),
Expr::Value(Value::SingleQuotedString("two".to_owned())),
Expr::Value(number("2")),
],
],
};
@ -193,9 +185,7 @@ fn test_values_clause() {
"SELECT * FROM values",
));
assert_eq!(
Some(&table_from_name(ObjectName::from(vec![Ident::new(
"values"
)]))),
Some(&table_from_name(ObjectName(vec![Ident::new("values")]))),
query
.body
.as_select()
@ -214,16 +204,16 @@ fn parse_use() {
for object_name in &valid_object_names {
// Test single identifier without quotes
assert_eq!(
databricks().verified_stmt(&format!("USE {object_name}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::new(
databricks().verified_stmt(&format!("USE {}", object_name)),
Statement::Use(Use::Object(ObjectName(vec![Ident::new(
object_name.to_string()
)])))
);
for &quote in &quote_styles {
// Test single identifier with different type of quotes
assert_eq!(
databricks().verified_stmt(&format!("USE {quote}{object_name}{quote}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::with_quote(
databricks().verified_stmt(&format!("USE {0}{1}{0}", quote, object_name)),
Statement::Use(Use::Object(ObjectName(vec![Ident::with_quote(
quote,
object_name.to_string(),
)])))
@ -234,22 +224,22 @@ fn parse_use() {
for &quote in &quote_styles {
// Test single identifier with keyword and different type of quotes
assert_eq!(
databricks().verified_stmt(&format!("USE CATALOG {quote}my_catalog{quote}")),
Statement::Use(Use::Catalog(ObjectName::from(vec![Ident::with_quote(
databricks().verified_stmt(&format!("USE CATALOG {0}my_catalog{0}", quote)),
Statement::Use(Use::Catalog(ObjectName(vec![Ident::with_quote(
quote,
"my_catalog".to_string(),
)])))
);
assert_eq!(
databricks().verified_stmt(&format!("USE DATABASE {quote}my_database{quote}")),
Statement::Use(Use::Database(ObjectName::from(vec![Ident::with_quote(
databricks().verified_stmt(&format!("USE DATABASE {0}my_database{0}", quote)),
Statement::Use(Use::Database(ObjectName(vec![Ident::with_quote(
quote,
"my_database".to_string(),
)])))
);
assert_eq!(
databricks().verified_stmt(&format!("USE SCHEMA {quote}my_schema{quote}")),
Statement::Use(Use::Schema(ObjectName::from(vec![Ident::with_quote(
databricks().verified_stmt(&format!("USE SCHEMA {0}my_schema{0}", quote)),
Statement::Use(Use::Schema(ObjectName(vec![Ident::with_quote(
quote,
"my_schema".to_string(),
)])))
@ -259,19 +249,15 @@ fn parse_use() {
// Test single identifier with keyword and no quotes
assert_eq!(
databricks().verified_stmt("USE CATALOG my_catalog"),
Statement::Use(Use::Catalog(ObjectName::from(vec![Ident::new(
"my_catalog"
)])))
Statement::Use(Use::Catalog(ObjectName(vec![Ident::new("my_catalog")])))
);
assert_eq!(
databricks().verified_stmt("USE DATABASE my_schema"),
Statement::Use(Use::Database(ObjectName::from(vec![Ident::new(
"my_schema"
)])))
Statement::Use(Use::Database(ObjectName(vec![Ident::new("my_schema")])))
);
assert_eq!(
databricks().verified_stmt("USE SCHEMA my_schema"),
Statement::Use(Use::Schema(ObjectName::from(vec![Ident::new("my_schema")])))
Statement::Use(Use::Schema(ObjectName(vec![Ident::new("my_schema")])))
);
// Test invalid syntax - missing identifier
@ -292,8 +278,8 @@ fn parse_databricks_struct_function() {
.projection[0],
SelectItem::UnnamedExpr(Expr::Struct {
values: vec![
Expr::value(number("1")),
Expr::Value((Value::SingleQuotedString("foo".to_string())).with_empty_span())
Expr::Value(number("1")),
Expr::Value(Value::SingleQuotedString("foo".to_string()))
],
fields: vec![]
})
@ -305,62 +291,16 @@ fn parse_databricks_struct_function() {
SelectItem::UnnamedExpr(Expr::Struct {
values: vec![
Expr::Named {
expr: Expr::value(number("1")).into(),
expr: Expr::Value(number("1")).into(),
name: Ident::new("one")
},
Expr::Named {
expr: Expr::Value(
(Value::SingleQuotedString("foo".to_string())).with_empty_span()
)
.into(),
expr: Expr::Value(Value::SingleQuotedString("foo".to_string())).into(),
name: Ident::new("foo")
},
Expr::Value((Value::Boolean(false)).with_empty_span())
Expr::Value(Value::Boolean(false))
],
fields: vec![]
})
);
}
#[test]
fn data_type_timestamp_ntz() {
// Literal
assert_eq!(
databricks().verified_expr("TIMESTAMP_NTZ '2025-03-29T18:52:00'"),
Expr::TypedString {
data_type: DataType::TimestampNtz,
value: ValueWithSpan {
value: Value::SingleQuotedString("2025-03-29T18:52:00".to_owned()),
span: Span::empty(),
}
}
);
// Cast
assert_eq!(
databricks().verified_expr("(created_at)::TIMESTAMP_NTZ"),
Expr::Cast {
kind: CastKind::DoubleColon,
expr: Box::new(Expr::Nested(Box::new(Expr::Identifier(
"created_at".into()
)))),
data_type: DataType::TimestampNtz,
format: None
}
);
// Column definition
match databricks().verified_stmt("CREATE TABLE foo (x TIMESTAMP_NTZ)") {
Statement::CreateTable(CreateTable { columns, .. }) => {
assert_eq!(
columns,
vec![ColumnDef {
name: "x".into(),
data_type: DataType::TimestampNtz,
options: vec![],
}]
);
}
s => panic!("Unexpected statement: {s:?}"),
}
}

View file

@ -24,7 +24,6 @@ use test_utils::*;
use sqlparser::ast::*;
use sqlparser::dialect::{DuckDbDialect, GenericDialect};
use sqlparser::parser::ParserError;
fn duckdb() -> TestedDialects {
TestedDialects::new(vec![Box::new(DuckDbDialect {})])
@ -45,12 +44,10 @@ fn test_struct() {
StructField {
field_name: Some(Ident::new("v")),
field_type: DataType::Varchar(None),
options: None,
},
StructField {
field_name: Some(Ident::new("i")),
field_type: DataType::Integer(None),
options: None,
},
],
StructBracketKind::Parentheses,
@ -63,6 +60,7 @@ fn test_struct() {
vec![ColumnDef {
name: "s".into(),
data_type: struct_type1.clone(),
collation: None,
options: vec![],
}]
);
@ -77,6 +75,7 @@ fn test_struct() {
Box::new(struct_type1),
None
)),
collation: None,
options: vec![],
}]
);
@ -87,7 +86,6 @@ fn test_struct() {
StructField {
field_name: Some(Ident::new("v")),
field_type: DataType::Varchar(None),
options: None,
},
StructField {
field_name: Some(Ident::new("s")),
@ -96,17 +94,14 @@ fn test_struct() {
StructField {
field_name: Some(Ident::new("a1")),
field_type: DataType::Integer(None),
options: None,
},
StructField {
field_name: Some(Ident::new("a2")),
field_type: DataType::Varchar(None),
options: None,
},
],
StructBracketKind::Parentheses,
),
options: None,
},
],
StructBracketKind::Parentheses,
@ -125,6 +120,7 @@ fn test_struct() {
Box::new(struct_type2),
None
)),
collation: None,
options: vec![],
}]
);
@ -164,7 +160,7 @@ fn test_select_wildcard_with_exclude() {
let select =
duckdb().verified_only_select("SELECT name.* EXCLUDE department_id FROM employee_table");
let expected = SelectItem::QualifiedWildcard(
SelectItemQualifiedWildcardKind::ObjectName(ObjectName::from(vec![Ident::new("name")])),
ObjectName(vec![Ident::new("name")]),
WildcardAdditionalOptions {
opt_exclude: Some(ExcludeSelectItem::Single(Ident::new("department_id"))),
..Default::default()
@ -195,7 +191,7 @@ fn test_create_macro() {
let expected = Statement::CreateMacro {
or_replace: false,
temporary: false,
name: ObjectName::from(vec![Ident::new("schema"), Ident::new("add")]),
name: ObjectName(vec![Ident::new("schema"), Ident::new("add")]),
args: Some(vec![MacroArg::new("a"), MacroArg::new("b")]),
definition: MacroDefinition::Expr(Expr::BinaryOp {
left: Box::new(Expr::Identifier(Ident::new("a"))),
@ -212,12 +208,12 @@ fn test_create_macro_default_args() {
let expected = Statement::CreateMacro {
or_replace: false,
temporary: false,
name: ObjectName::from(vec![Ident::new("add_default")]),
name: ObjectName(vec![Ident::new("add_default")]),
args: Some(vec![
MacroArg::new("a"),
MacroArg {
name: Ident::new("b"),
default_expr: Some(Expr::value(number("5"))),
default_expr: Some(Expr::Value(number("5"))),
},
]),
definition: MacroDefinition::Expr(Expr::BinaryOp {
@ -240,7 +236,7 @@ fn test_create_table_macro() {
let expected = Statement::CreateMacro {
or_replace: true,
temporary: true,
name: ObjectName::from(vec![Ident::new("dynamic_table")]),
name: ObjectName(vec![Ident::new("dynamic_table")]),
args: Some(vec![
MacroArg::new("col1_value"),
MacroArg::new("col2_value"),
@ -269,11 +265,10 @@ fn test_select_union_by_name() {
distinct: None,
top: None,
projection: vec![SelectItem::Wildcard(WildcardAdditionalOptions::default())],
exclude: None,
top_before_distinct: false,
into: None,
from: vec![TableWithJoins {
relation: table_from_name(ObjectName::from(vec![Ident {
relation: table_from_name(ObjectName(vec![Ident {
value: "capitals".to_string(),
quote_style: None,
span: Span::empty(),
@ -293,18 +288,16 @@ fn test_select_union_by_name() {
qualify: None,
value_table_mode: None,
connect_by: None,
flavor: SelectFlavor::Standard,
}))),
right: Box::<SetExpr>::new(SetExpr::Select(Box::new(Select {
select_token: AttachedToken::empty(),
distinct: None,
top: None,
projection: vec![SelectItem::Wildcard(WildcardAdditionalOptions::default())],
exclude: None,
top_before_distinct: false,
into: None,
from: vec![TableWithJoins {
relation: table_from_name(ObjectName::from(vec![Ident {
relation: table_from_name(ObjectName(vec![Ident {
value: "weather".to_string(),
quote_style: None,
span: Span::empty(),
@ -324,7 +317,6 @@ fn test_select_union_by_name() {
qualify: None,
value_table_mode: None,
connect_by: None,
flavor: SelectFlavor::Standard,
}))),
});
assert_eq!(ast.body, expected);
@ -361,32 +353,6 @@ fn test_duckdb_load_extension() {
);
}
#[test]
fn test_duckdb_specific_int_types() {
let duckdb_dtypes = vec![
("UTINYINT", DataType::UTinyInt),
("USMALLINT", DataType::USmallInt),
("UBIGINT", DataType::UBigInt),
("UHUGEINT", DataType::UHugeInt),
("HUGEINT", DataType::HugeInt),
];
for (dtype_string, data_type) in duckdb_dtypes {
let sql = format!("SELECT 123::{dtype_string}");
let select = duckdb().verified_only_select(&sql);
assert_eq!(
&Expr::Cast {
kind: CastKind::DoubleColon,
expr: Box::new(Expr::Value(
Value::Number("123".parse().unwrap(), false).with_empty_span()
)),
data_type: data_type.clone(),
format: None,
},
expr_from_projection(&select.projection[0])
);
}
}
#[test]
fn test_duckdb_struct_literal() {
//struct literal syntax https://duckdb.org/docs/sql/data_types/struct#creating-structs
@ -398,15 +364,15 @@ fn test_duckdb_struct_literal() {
&Expr::Dictionary(vec![
DictionaryField {
key: Ident::with_quote('\'', "a"),
value: Box::new(Expr::value(number("1"))),
value: Box::new(Expr::Value(number("1"))),
},
DictionaryField {
key: Ident::with_quote('\'', "b"),
value: Box::new(Expr::value(number("2"))),
value: Box::new(Expr::Value(number("2"))),
},
DictionaryField {
key: Ident::with_quote('\'', "c"),
value: Box::new(Expr::value(number("3"))),
value: Box::new(Expr::Value(number("3"))),
},
],),
expr_from_projection(&select.projection[0])
@ -416,9 +382,7 @@ fn test_duckdb_struct_literal() {
&Expr::Array(Array {
elem: vec![Expr::Dictionary(vec![DictionaryField {
key: Ident::with_quote('\'', "a"),
value: Box::new(Expr::Value(
(Value::SingleQuotedString("abc".to_string())).with_empty_span()
)),
value: Box::new(Expr::Value(Value::SingleQuotedString("abc".to_string()))),
},],)],
named: false
}),
@ -428,7 +392,7 @@ fn test_duckdb_struct_literal() {
&Expr::Dictionary(vec![
DictionaryField {
key: Ident::with_quote('\'', "a"),
value: Box::new(Expr::value(number("1"))),
value: Box::new(Expr::Value(number("1"))),
},
DictionaryField {
key: Ident::with_quote('\'', "b"),
@ -447,14 +411,11 @@ fn test_duckdb_struct_literal() {
&Expr::Dictionary(vec![
DictionaryField {
key: Ident::with_quote('\'', "a"),
value: Expr::value(number("1")).into(),
value: Expr::Value(number("1")).into(),
},
DictionaryField {
key: Ident::with_quote('\'', "b"),
value: Expr::Value(
(Value::SingleQuotedString("abc".to_string())).with_empty_span()
)
.into(),
value: Expr::Value(Value::SingleQuotedString("abc".to_string())).into(),
},
],),
expr_from_projection(&select.projection[3])
@ -471,7 +432,7 @@ fn test_duckdb_struct_literal() {
key: Ident::with_quote('\'', "a"),
value: Expr::Dictionary(vec![DictionaryField {
key: Ident::with_quote('\'', "aa"),
value: Expr::value(number("1")).into(),
value: Expr::Value(number("1")).into(),
}],)
.into(),
}],),
@ -626,7 +587,7 @@ fn test_duckdb_named_argument_function_with_assignment_operator() {
let select = duckdb_and_generic().verified_only_select(sql);
assert_eq!(
&Expr::Function(Function {
name: ObjectName::from(vec![Ident::new("FUN")]),
name: ObjectName(vec![Ident::new("FUN")]),
uses_odbc_syntax: false,
parameters: FunctionArguments::None,
args: FunctionArguments::List(FunctionArgumentList {
@ -634,16 +595,16 @@ fn test_duckdb_named_argument_function_with_assignment_operator() {
args: vec![
FunctionArg::Named {
name: Ident::new("a"),
arg: FunctionArgExpr::Expr(Expr::Value(
(Value::SingleQuotedString("1".to_owned())).with_empty_span()
)),
arg: FunctionArgExpr::Expr(Expr::Value(Value::SingleQuotedString(
"1".to_owned()
))),
operator: FunctionArgOperator::Assignment
},
FunctionArg::Named {
name: Ident::new("b"),
arg: FunctionArgExpr::Expr(Expr::Value(
(Value::SingleQuotedString("2".to_owned())).with_empty_span()
)),
arg: FunctionArgExpr::Expr(Expr::Value(Value::SingleQuotedString(
"2".to_owned()
))),
operator: FunctionArgOperator::Assignment
},
],
@ -672,14 +633,14 @@ fn test_array_index() {
&Expr::CompoundFieldAccess {
root: Box::new(Expr::Array(Array {
elem: vec![
Expr::Value((Value::SingleQuotedString("a".to_owned())).with_empty_span()),
Expr::Value((Value::SingleQuotedString("b".to_owned())).with_empty_span()),
Expr::Value((Value::SingleQuotedString("c".to_owned())).with_empty_span())
Expr::Value(Value::SingleQuotedString("a".to_owned())),
Expr::Value(Value::SingleQuotedString("b".to_owned())),
Expr::Value(Value::SingleQuotedString("c".to_owned()))
],
named: false
})),
access_chain: vec![AccessExpr::Subscript(Subscript::Index {
index: Expr::value(number("3"))
index: Expr::Value(number("3"))
})]
},
expr
@ -699,8 +660,7 @@ fn test_duckdb_union_datatype() {
if_not_exists: Default::default(),
transient: Default::default(),
volatile: Default::default(),
iceberg: Default::default(),
name: ObjectName::from(vec!["tbl1".into()]),
name: ObjectName(vec!["tbl1".into()]),
columns: vec![
ColumnDef {
name: "one".into(),
@ -708,6 +668,7 @@ fn test_duckdb_union_datatype() {
field_name: "a".into(),
field_type: DataType::Int(None)
}]),
collation: Default::default(),
options: Default::default()
},
ColumnDef {
@ -722,6 +683,7 @@ fn test_duckdb_union_datatype() {
field_type: DataType::Int(None)
}
]),
collation: Default::default(),
options: Default::default()
},
ColumnDef {
@ -733,6 +695,7 @@ fn test_duckdb_union_datatype() {
field_type: DataType::Int(None)
}])
}]),
collation: Default::default(),
options: Default::default()
}
],
@ -744,13 +707,19 @@ fn test_duckdb_union_datatype() {
storage: Default::default(),
location: Default::default()
}),
table_properties: Default::default(),
with_options: Default::default(),
file_format: Default::default(),
location: Default::default(),
query: Default::default(),
without_rowid: Default::default(),
like: Default::default(),
clone: Default::default(),
engine: Default::default(),
comment: Default::default(),
auto_increment_offset: Default::default(),
default_charset: Default::default(),
collation: Default::default(),
on_commit: Default::default(),
on_cluster: Default::default(),
primary_key: Default::default(),
@ -758,7 +727,7 @@ fn test_duckdb_union_datatype() {
partition_by: Default::default(),
cluster_by: Default::default(),
clustered_by: Default::default(),
inherits: Default::default(),
options: Default::default(),
strict: Default::default(),
copy_grants: Default::default(),
enable_schema_evolution: Default::default(),
@ -768,13 +737,7 @@ fn test_duckdb_union_datatype() {
default_ddl_collation: Default::default(),
with_aggregation_policy: Default::default(),
with_row_access_policy: Default::default(),
with_tags: Default::default(),
base_location: Default::default(),
external_volume: Default::default(),
catalog: Default::default(),
catalog_sync: Default::default(),
storage_serialization_policy: Default::default(),
table_options: CreateTableOptions::None
with_tags: Default::default()
}),
stmt
);
@ -795,16 +758,16 @@ fn parse_use() {
for object_name in &valid_object_names {
// Test single identifier without quotes
assert_eq!(
duckdb().verified_stmt(&format!("USE {object_name}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::new(
duckdb().verified_stmt(&format!("USE {}", object_name)),
Statement::Use(Use::Object(ObjectName(vec![Ident::new(
object_name.to_string()
)])))
);
for &quote in &quote_styles {
// Test single identifier with different type of quotes
assert_eq!(
duckdb().verified_stmt(&format!("USE {quote}{object_name}{quote}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::with_quote(
duckdb().verified_stmt(&format!("USE {0}{1}{0}", quote, object_name)),
Statement::Use(Use::Object(ObjectName(vec![Ident::with_quote(
quote,
object_name.to_string(),
)])))
@ -815,10 +778,8 @@ fn parse_use() {
for &quote in &quote_styles {
// Test double identifier with different type of quotes
assert_eq!(
duckdb().verified_stmt(&format!(
"USE {quote}CATALOG{quote}.{quote}my_schema{quote}"
)),
Statement::Use(Use::Object(ObjectName::from(vec![
duckdb().verified_stmt(&format!("USE {0}CATALOG{0}.{0}my_schema{0}", quote)),
Statement::Use(Use::Object(ObjectName(vec![
Ident::with_quote(quote, "CATALOG"),
Ident::with_quote(quote, "my_schema")
])))
@ -827,38 +788,9 @@ fn parse_use() {
// Test double identifier without quotes
assert_eq!(
duckdb().verified_stmt("USE mydb.my_schema"),
Statement::Use(Use::Object(ObjectName::from(vec![
Statement::Use(Use::Object(ObjectName(vec![
Ident::new("mydb"),
Ident::new("my_schema")
])))
);
}
#[test]
fn test_duckdb_trim() {
let real_sql = r#"SELECT customer_id, TRIM(item_price_id, '"', "a") AS item_price_id FROM models_staging.subscriptions"#;
assert_eq!(duckdb().verified_stmt(real_sql).to_string(), real_sql);
let sql_only_select = "SELECT TRIM('xyz', 'a')";
let select = duckdb().verified_only_select(sql_only_select);
assert_eq!(
&Expr::Trim {
expr: Box::new(Expr::Value(
Value::SingleQuotedString("xyz".to_owned()).with_empty_span()
)),
trim_where: None,
trim_what: None,
trim_characters: Some(vec![Expr::Value(
Value::SingleQuotedString("a".to_owned()).with_empty_span()
)]),
},
expr_from_projection(only(&select.projection))
);
// missing comma separation
let error_sql = "SELECT TRIM('xyz' 'a')";
assert_eq!(
ParserError::ParserError("Expected: ), found: 'a'".to_owned()),
duckdb().parse_sql_statements(error_sql).unwrap_err()
);
}

View file

@ -22,10 +22,11 @@
use sqlparser::ast::{
ClusteredBy, CommentDef, CreateFunction, CreateFunctionBody, CreateFunctionUsing, CreateTable,
Expr, Function, FunctionArgumentList, FunctionArguments, Ident, ObjectName, OrderByExpr,
OrderByOptions, SelectItem, Set, Statement, TableFactor, UnaryOperator, Use, Value,
Expr, Function, FunctionArgumentList, FunctionArguments, Ident, ObjectName,
OneOrManyWithParens, OrderByExpr, SelectItem, Statement, TableFactor, UnaryOperator, Use,
Value,
};
use sqlparser::dialect::{AnsiDialect, GenericDialect, HiveDialect};
use sqlparser::dialect::{GenericDialect, HiveDialect, MsSqlDialect};
use sqlparser::parser::ParserError;
use sqlparser::test_utils::*;
@ -91,7 +92,7 @@ fn parse_msck() {
}
#[test]
fn parse_set_hivevar() {
fn parse_set() {
let set = "SET HIVEVAR:name = a, b, c_d";
hive().verified_stmt(set);
}
@ -133,7 +134,9 @@ fn create_table_with_comment() {
Statement::CreateTable(CreateTable { comment, .. }) => {
assert_eq!(
comment,
Some(CommentDef::WithoutEq("table comment".to_string()))
Some(CommentDef::AfterColumnDefsWithoutEq(
"table comment".to_string()
))
)
}
_ => unreachable!(),
@ -168,18 +171,14 @@ fn create_table_with_clustered_by() {
sorted_by: Some(vec![
OrderByExpr {
expr: Expr::Identifier(Ident::new("a")),
options: OrderByOptions {
asc: Some(true),
nulls_first: None,
},
asc: Some(true),
nulls_first: None,
with_fill: None,
},
OrderByExpr {
expr: Expr::Identifier(Ident::new("b")),
options: OrderByOptions {
asc: Some(false),
nulls_first: None,
},
asc: Some(false),
nulls_first: None,
with_fill: None,
},
]),
@ -341,9 +340,6 @@ fn lateral_view() {
fn sort_by() {
let sort_by = "SELECT * FROM db.table SORT BY a";
hive().verified_stmt(sort_by);
let sort_by_with_direction = "SELECT * FROM db.table SORT BY a, b DESC";
hive().verified_stmt(sort_by_with_direction);
}
#[test]
@ -369,20 +365,20 @@ fn from_cte() {
fn set_statement_with_minus() {
assert_eq!(
hive().verified_stmt("SET hive.tez.java.opts = -Xmx4g"),
Statement::Set(Set::SingleAssignment {
scope: None,
Statement::SetVariable {
local: false,
hivevar: false,
variable: ObjectName::from(vec![
variables: OneOrManyWithParens::One(ObjectName(vec![
Ident::new("hive"),
Ident::new("tez"),
Ident::new("java"),
Ident::new("opts")
]),
values: vec![Expr::UnaryOp {
])),
value: vec![Expr::UnaryOp {
op: UnaryOperator::Minus,
expr: Box::new(Expr::Identifier(Ident::new("Xmx4g")))
}],
})
}
);
assert_eq!(
@ -409,8 +405,7 @@ fn parse_create_function() {
assert_eq!(
function_body,
Some(CreateFunctionBody::AsBeforeOptions(Expr::Value(
(Value::SingleQuotedString("org.random.class.Name".to_string()))
.with_empty_span()
Value::SingleQuotedString("org.random.class.Name".to_string())
)))
);
assert_eq!(
@ -424,7 +419,7 @@ fn parse_create_function() {
}
// Test error in dialect that doesn't support parsing CREATE FUNCTION
let unsupported_dialects = TestedDialects::new(vec![Box::new(AnsiDialect {})]);
let unsupported_dialects = TestedDialects::new(vec![Box::new(MsSqlDialect {})]);
assert_eq!(
unsupported_dialects.parse_sql_statements(sql).unwrap_err(),
@ -465,12 +460,8 @@ fn parse_delimited_identifiers() {
partitions: _,
json_path: _,
sample: _,
index_hints: _,
} => {
assert_eq!(
ObjectName::from(vec![Ident::with_quote('"', "a table")]),
name
);
assert_eq!(vec![Ident::with_quote('"', "a table")], name.0);
assert_eq!(Ident::with_quote('"', "alias"), alias.unwrap().name);
assert!(args.is_none());
assert!(with_hints.is_empty());
@ -489,7 +480,7 @@ fn parse_delimited_identifiers() {
);
assert_eq!(
&Expr::Function(Function {
name: ObjectName::from(vec![Ident::with_quote('"', "myfun")]),
name: ObjectName(vec![Ident::with_quote('"', "myfun")]),
uses_odbc_syntax: false,
parameters: FunctionArguments::None,
args: FunctionArguments::List(FunctionArgumentList {
@ -524,16 +515,16 @@ fn parse_use() {
for object_name in &valid_object_names {
// Test single identifier without quotes
assert_eq!(
hive().verified_stmt(&format!("USE {object_name}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::new(
hive().verified_stmt(&format!("USE {}", object_name)),
Statement::Use(Use::Object(ObjectName(vec![Ident::new(
object_name.to_string()
)])))
);
for &quote in &quote_styles {
// Test single identifier with different type of quotes
assert_eq!(
hive().verified_stmt(&format!("USE {quote}{object_name}{quote}")),
Statement::Use(Use::Object(ObjectName::from(vec![Ident::with_quote(
hive().verified_stmt(&format!("USE {}{}{}", quote, object_name, quote)),
Statement::Use(Use::Object(ObjectName(vec![Ident::with_quote(
quote,
object_name.to_string(),
)])))

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -39,7 +39,7 @@ fn test_square_brackets_over_db_schema_table_name() {
assert_eq!(
select.from[0],
TableWithJoins {
relation: table_from_name(ObjectName::from(vec![
relation: table_from_name(ObjectName(vec![
Ident {
value: "test_schema".to_string(),
quote_style: Some('['),
@ -81,7 +81,7 @@ fn test_double_quotes_over_db_schema_table_name() {
assert_eq!(
select.from[0],
TableWithJoins {
relation: table_from_name(ObjectName::from(vec![
relation: table_from_name(ObjectName(vec![
Ident {
value: "test_schema".to_string(),
quote_style: Some('"'),
@ -114,10 +114,7 @@ fn parse_delimited_identifiers() {
version,
..
} => {
assert_eq!(
ObjectName::from(vec![Ident::with_quote('"', "a table")]),
name
);
assert_eq!(vec![Ident::with_quote('"', "a table")], name.0);
assert_eq!(Ident::with_quote('"', "alias"), alias.unwrap().name);
assert!(args.is_none());
assert!(with_hints.is_empty());
@ -136,7 +133,7 @@ fn parse_delimited_identifiers() {
);
assert_eq!(
&Expr::Function(Function {
name: ObjectName::from(vec![Ident::with_quote('"', "myfun")]),
name: ObjectName(vec![Ident::with_quote('"', "myfun")]),
uses_odbc_syntax: false,
parameters: FunctionArguments::None,
args: FunctionArguments::List(FunctionArgumentList {
@ -208,7 +205,7 @@ fn test_redshift_json_path() {
path: JsonPath {
path: vec![
JsonPathElem::Bracket {
key: Expr::value(number("0"))
key: Expr::Value(number("0"))
},
JsonPathElem::Dot {
key: "o_orderkey".to_string(),
@ -231,12 +228,10 @@ fn test_redshift_json_path() {
path: JsonPath {
path: vec![
JsonPathElem::Bracket {
key: Expr::value(number("0"))
key: Expr::Value(number("0"))
},
JsonPathElem::Bracket {
key: Expr::Value(
(Value::SingleQuotedString("id".to_owned())).with_empty_span()
)
key: Expr::Value(Value::SingleQuotedString("id".to_owned()))
}
]
}
@ -257,12 +252,10 @@ fn test_redshift_json_path() {
path: JsonPath {
path: vec![
JsonPathElem::Bracket {
key: Expr::value(number("0"))
key: Expr::Value(number("0"))
},
JsonPathElem::Bracket {
key: Expr::Value(
(Value::SingleQuotedString("id".to_owned())).with_empty_span()
)
key: Expr::Value(Value::SingleQuotedString("id".to_owned()))
}
]
}
@ -283,7 +276,7 @@ fn test_redshift_json_path() {
path: JsonPath {
path: vec![
JsonPathElem::Bracket {
key: Expr::value(number("0"))
key: Expr::Value(number("0"))
},
JsonPathElem::Dot {
key: "id".to_string(),
@ -304,13 +297,13 @@ fn test_parse_json_path_from() {
TableFactor::Table {
name, json_path, ..
} => {
assert_eq!(name, &ObjectName::from(vec![Ident::new("src")]));
assert_eq!(name, &ObjectName(vec![Ident::new("src")]));
assert_eq!(
json_path,
&Some(JsonPath {
path: vec![
JsonPathElem::Bracket {
key: Expr::value(number("0"))
key: Expr::Value(number("0"))
},
JsonPathElem::Dot {
key: "a".to_string(),
@ -328,22 +321,20 @@ fn test_parse_json_path_from() {
TableFactor::Table {
name, json_path, ..
} => {
assert_eq!(name, &ObjectName::from(vec![Ident::new("src")]));
assert_eq!(name, &ObjectName(vec![Ident::new("src")]));
assert_eq!(
json_path,
&Some(JsonPath {
path: vec![
JsonPathElem::Bracket {
key: Expr::value(number("0"))
key: Expr::Value(number("0"))
},
JsonPathElem::Dot {
key: "a".to_string(),
quoted: false
},
JsonPathElem::Bracket {
key: Expr::Value(
(Value::Number("1".parse().unwrap(), false)).with_empty_span()
)
key: Expr::Value(Value::Number("1".parse().unwrap(), false))
},
JsonPathElem::Dot {
key: "b".to_string(),
@ -363,7 +354,7 @@ fn test_parse_json_path_from() {
} => {
assert_eq!(
name,
&ObjectName::from(vec![Ident::new("src"), Ident::new("a"), Ident::new("b")])
&ObjectName(vec![Ident::new("src"), Ident::new("a"), Ident::new("b")])
);
assert_eq!(json_path, &None);
}
@ -391,19 +382,3 @@ fn test_parse_nested_quoted_identifier() {
.parse_sql_statements(r#"SELECT 1 AS ["1]"#)
.is_err());
}
#[test]
fn parse_extract_single_quotes() {
let sql = "SELECT EXTRACT('month' FROM my_timestamp) FROM my_table";
redshift().verified_stmt(sql);
}
#[test]
fn parse_string_literal_backslash_escape() {
redshift().one_statement_parses_to(r#"SELECT 'l\'auto'"#, "SELECT 'l''auto'");
}
#[test]
fn parse_utf8_multibyte_idents() {
redshift().verified_stmt("SELECT 🚀.city AS 🎸 FROM customers AS 🚀");
}

File diff suppressed because it is too large Load diff

View file

@ -214,6 +214,7 @@ fn parse_create_table_auto_increment() {
vec![ColumnDef {
name: "bar".into(),
data_type: DataType::Int(None),
collation: None,
options: vec![
ColumnOptionDef {
name: None,
@ -242,6 +243,7 @@ fn parse_create_table_primary_key_asc_desc() {
let expected_column_def = |kind| ColumnDef {
name: "bar".into(),
data_type: DataType::Int(None),
collation: None,
options: vec![
ColumnOptionDef {
name: None,
@ -284,11 +286,13 @@ fn parse_create_sqlite_quote() {
ColumnDef {
name: Ident::with_quote('"', "KEY"),
data_type: DataType::Int(None),
collation: None,
options: vec![],
},
ColumnDef {
name: Ident::with_quote('[', "INDEX"),
data_type: DataType::Int(None),
collation: None,
options: vec![],
},
],
@ -324,7 +328,7 @@ fn parse_create_table_on_conflict_col() {
Keyword::IGNORE,
Keyword::REPLACE,
] {
let sql = format!("CREATE TABLE t1 (a INT, b INT ON CONFLICT {keyword:?})");
let sql = format!("CREATE TABLE t1 (a INT, b INT ON CONFLICT {:?})", keyword);
match sqlite_and_generic().verified_stmt(&sql) {
Statement::CreateTable(CreateTable { columns, .. }) => {
assert_eq!(
@ -369,9 +373,7 @@ fn test_placeholder() {
let ast = sqlite().verified_only_select(sql);
assert_eq!(
ast.projection[0],
UnnamedExpr(Expr::Value(
(Value::Placeholder("@xxx".into())).with_empty_span()
)),
UnnamedExpr(Expr::Value(Value::Placeholder("@xxx".into()))),
);
}
@ -410,13 +412,13 @@ fn parse_window_function_with_filter() {
"count",
"user_defined_function",
] {
let sql = format!("SELECT {func_name}(x) FILTER (WHERE y) OVER () FROM t");
let sql = format!("SELECT {}(x) FILTER (WHERE y) OVER () FROM t", func_name);
let select = sqlite().verified_only_select(&sql);
assert_eq!(select.to_string(), sql);
assert_eq!(
select.projection,
vec![SelectItem::UnnamedExpr(Expr::Function(Function {
name: ObjectName::from(vec![Ident::new(func_name)]),
name: ObjectName(vec![Ident::new(func_name)]),
uses_odbc_syntax: false,
parameters: FunctionArguments::None,
args: FunctionArguments::List(FunctionArgumentList {
@ -444,15 +446,11 @@ fn parse_window_function_with_filter() {
fn parse_attach_database() {
let sql = "ATTACH DATABASE 'test.db' AS test";
let verified_stmt = sqlite().verified_stmt(sql);
assert_eq!(sql, format!("{verified_stmt}"));
assert_eq!(sql, format!("{}", verified_stmt));
match verified_stmt {
Statement::AttachDatabase {
schema_name,
database_file_name:
Expr::Value(ValueWithSpan {
value: Value::SingleQuotedString(literal_name),
span: _,
}),
database_file_name: Expr::Value(Value::SingleQuotedString(literal_name)),
database: true,
} => {
assert_eq!(schema_name.value, "test");
@ -471,17 +469,17 @@ fn parse_update_tuple_row_values() {
or: None,
assignments: vec![Assignment {
target: AssignmentTarget::Tuple(vec![
ObjectName::from(vec![Ident::new("a"),]),
ObjectName::from(vec![Ident::new("b"),]),
ObjectName(vec![Ident::new("a"),]),
ObjectName(vec![Ident::new("b"),]),
]),
value: Expr::Tuple(vec![
Expr::Value((Value::Number("1".parse().unwrap(), false)).with_empty_span()),
Expr::Value((Value::Number("2".parse().unwrap(), false)).with_empty_span())
Expr::Value(Value::Number("1".parse().unwrap(), false)),
Expr::Value(Value::Number("2".parse().unwrap(), false))
])
}],
selection: None,
table: TableWithJoins {
relation: table_from_name(ObjectName::from(vec![Ident::new("x")])),
relation: table_from_name(ObjectName(vec![Ident::new("x")])),
joins: vec![],
},
from: None,
@ -524,6 +522,23 @@ fn parse_start_transaction_with_modifier() {
sqlite_and_generic().verified_stmt("BEGIN DEFERRED");
sqlite_and_generic().verified_stmt("BEGIN IMMEDIATE");
sqlite_and_generic().verified_stmt("BEGIN EXCLUSIVE");
let unsupported_dialects = all_dialects_except(|d| d.supports_start_transaction_modifier());
let res = unsupported_dialects.parse_sql_statements("BEGIN DEFERRED");
assert_eq!(
ParserError::ParserError("Expected: end of statement, found: DEFERRED".to_string()),
res.unwrap_err(),
);
let res = unsupported_dialects.parse_sql_statements("BEGIN IMMEDIATE");
assert_eq!(
ParserError::ParserError("Expected: end of statement, found: IMMEDIATE".to_string()),
res.unwrap_err(),
);
let res = unsupported_dialects.parse_sql_statements("BEGIN EXCLUSIVE");
assert_eq!(
ParserError::ParserError("Expected: end of statement, found: EXCLUSIVE".to_string()),
res.unwrap_err(),
);
}
#[test]
@ -536,12 +551,7 @@ fn test_dollar_identifier_as_placeholder() {
Expr::BinaryOp { op, left, right } => {
assert_eq!(op, BinaryOperator::Eq);
assert_eq!(left, Box::new(Expr::Identifier(Ident::new("id"))));
assert_eq!(
right,
Box::new(Expr::Value(
(Placeholder("$id".to_string())).with_empty_span()
))
);
assert_eq!(right, Box::new(Expr::Value(Placeholder("$id".to_string()))));
}
_ => unreachable!(),
}
@ -551,47 +561,12 @@ fn test_dollar_identifier_as_placeholder() {
Expr::BinaryOp { op, left, right } => {
assert_eq!(op, BinaryOperator::Eq);
assert_eq!(left, Box::new(Expr::Identifier(Ident::new("id"))));
assert_eq!(
right,
Box::new(Expr::Value(
(Placeholder("$$".to_string())).with_empty_span()
))
);
assert_eq!(right, Box::new(Expr::Value(Placeholder("$$".to_string()))));
}
_ => unreachable!(),
}
}
#[test]
fn test_match_operator() {
assert_eq!(
sqlite().verified_expr("col MATCH 'pattern'"),
Expr::BinaryOp {
op: BinaryOperator::Match,
left: Box::new(Expr::Identifier(Ident::new("col"))),
right: Box::new(Expr::Value(
(Value::SingleQuotedString("pattern".to_string())).with_empty_span()
))
}
);
sqlite().verified_only_select("SELECT * FROM email WHERE email MATCH 'fts5'");
}
#[test]
fn test_regexp_operator() {
assert_eq!(
sqlite().verified_expr("col REGEXP 'pattern'"),
Expr::BinaryOp {
op: BinaryOperator::Regexp,
left: Box::new(Expr::Identifier(Ident::new("col"))),
right: Box::new(Expr::Value(
(Value::SingleQuotedString("pattern".to_string())).with_empty_span()
))
}
);
sqlite().verified_only_select(r#"SELECT count(*) FROM messages WHERE msg_text REGEXP '\d+'"#);
}
fn sqlite() -> TestedDialects {
TestedDialects::new(vec![Box::new(SQLiteDialect {})])
}