snowflake: Fix handling of /~% in the stage name (#1009)

This commit is contained in:
Lukasz Stefaniak 2023-10-26 21:23:57 +02:00 committed by GitHub
parent 2f437db2a6
commit 7b3cc18229
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 72 additions and 5 deletions

View file

@ -35,7 +35,7 @@ pub struct SnowflakeDialect;
impl Dialect for SnowflakeDialect {
// see https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html
fn is_identifier_start(&self, ch: char) -> bool {
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_' || ch == '@' || ch == '%'
ch.is_ascii_lowercase() || ch.is_ascii_uppercase() || ch == '_'
}
fn is_identifier_part(&self, ch: char) -> bool {
@ -44,8 +44,6 @@ impl Dialect for SnowflakeDialect {
|| ch.is_ascii_digit()
|| ch == '$'
|| ch == '_'
|| ch == '/'
|| ch == '~'
}
fn supports_within_after_array_aggregation(&self) -> bool {
@ -148,8 +146,48 @@ pub fn parse_create_stage(
})
}
pub fn parse_stage_name_identifier(parser: &mut Parser) -> Result<Ident, ParserError> {
let mut ident = String::new();
while let Some(next_token) = parser.next_token_no_skip() {
match &next_token.token {
Token::Whitespace(_) => break,
Token::Period => {
parser.prev_token();
break;
}
Token::AtSign => ident.push('@'),
Token::Tilde => ident.push('~'),
Token::Mod => ident.push('%'),
Token::Div => ident.push('/'),
Token::Word(w) => ident.push_str(&w.value),
_ => return parser.expected("stage name identifier", parser.peek_token()),
}
}
Ok(Ident::new(ident))
}
pub fn parse_snowflake_stage_name(parser: &mut Parser) -> Result<ObjectName, ParserError> {
match parser.next_token().token {
Token::AtSign => {
parser.prev_token();
let mut idents = vec![];
loop {
idents.push(parse_stage_name_identifier(parser)?);
if !parser.consume_token(&Token::Period) {
break;
}
}
Ok(ObjectName(idents))
}
_ => {
parser.prev_token();
Ok(parser.parse_object_name()?)
}
}
}
pub fn parse_copy_into(parser: &mut Parser) -> Result<Statement, ParserError> {
let into: ObjectName = parser.parse_object_name()?;
let into: ObjectName = parse_snowflake_stage_name(parser)?;
let mut files: Vec<String> = vec![];
let mut from_transformations: Option<Vec<StageLoadSelectItem>> = None;
let from_stage_alias;
@ -165,7 +203,7 @@ pub fn parse_copy_into(parser: &mut Parser) -> Result<Statement, ParserError> {
from_transformations = parse_select_items_for_data_load(parser)?;
parser.expect_keyword(Keyword::FROM)?;
from_stage = parser.parse_object_name()?;
from_stage = parse_snowflake_stage_name(parser)?;
stage_params = parse_stage_params(parser)?;
// as

View file

@ -2001,6 +2001,19 @@ mod tests {
compare(expected, tokens);
}
#[test]
fn tokenize_snowflake_div() {
let sql = r#"field/1000"#;
let dialect = SnowflakeDialect {};
let tokens = Tokenizer::new(&dialect, sql).tokenize().unwrap();
let expected = vec![
Token::make_word(r#"field"#, None),
Token::Div,
Token::Number("1000".to_string(), false),
];
compare(expected, tokens);
}
#[test]
fn tokenize_quoted_identifier_with_no_escape() {
let sql = r#" "a "" b" "a """ "c """"" "#;

View file

@ -26,6 +26,9 @@ use test_utils::*;
#[macro_use]
mod test_utils;
#[cfg(test)]
use pretty_assertions::assert_eq;
#[test]
fn test_snowflake_create_table() {
let sql = "CREATE TABLE _my_$table (am00unt number)";
@ -1118,3 +1121,16 @@ fn parse_subquery_function_argument() {
// the function.
snowflake().one_statement_parses_to("SELECT func(SELECT 1, 2)", "SELECT func((SELECT 1, 2))");
}
#[test]
fn parse_division_correctly() {
snowflake_and_generic().one_statement_parses_to(
"SELECT field/1000 FROM tbl1",
"SELECT field / 1000 FROM tbl1",
);
snowflake_and_generic().one_statement_parses_to(
"SELECT tbl1.field/tbl2.field FROM tbl1 JOIN tbl2 ON tbl1.id = tbl2.entity_id",
"SELECT tbl1.field / tbl2.field FROM tbl1 JOIN tbl2 ON tbl1.id = tbl2.entity_id",
);
}