Merge remote-tracking branch 'upstream/main' into expr-iif

This commit is contained in:
Alex Miller 2024-12-11 16:00:20 -08:00
commit c4d4569dc9
30 changed files with 1038 additions and 256 deletions

View file

@ -1,5 +1,19 @@
# Changelog
## Unreleased
### Added
* Add suport for last_insert_rowid() function (Krishna Vishal)
* Add support JOIN USING and NATURAL JOIN (Jussi Saurio)
* Add support for more scalar functions (Kacper Kołodziej)
* Add support for `HAVING` clause (Jussi Saurio)
* Add `get()` and `iterate()` to JavaScript/Wasm API (Jean Arhancet)
## 0.0.8 - 2024-11-20
### Added

View file

@ -2,11 +2,16 @@
This document describes the SQLite compatibility status of Limbo:
* [Limitations](#limitations)
* [SQL statements](#sql-statements)
* [SQL functions](#sql-functions)
* [SQLite API](#sqlite-api)
* [SQLite VDBE opcodes](#sqlite-vdbe-opcodes)
- [SQLite Compatibility](#sqlite-compatibility)
- [Limitations](#limitations)
- [SQL statements](#sql-statements)
- [SQL functions](#sql-functions)
- [Scalar functions](#scalar-functions)
- [Aggregate functions](#aggregate-functions)
- [Date and time functions](#date-and-time-functions)
- [JSON functions](#json-functions)
- [SQLite API](#sqlite-api)
- [SQLite VDBE opcodes](#sqlite-vdbe-opcodes)
## Limitations
@ -51,15 +56,45 @@ This document describes the SQLite compatibility status of Limbo:
| SELECT ... LIMIT | Yes | |
| SELECT ... ORDER BY | Partial | |
| SELECT ... GROUP BY | Partial | |
| SELECT ... HAVING | Partial | |
| SELECT ... JOIN | Partial | |
| SELECT ... CROSS JOIN | Partial | |
| SELECT ... INNER JOIN | Partial | |
| SELECT ... OUTER JOIN | Partial | |
| SELECT ... JOIN USING | Yes | |
| SELECT ... NATURAL JOIN | Yes | |
| UPDATE | No | |
| UPSERT | No | |
| VACUUM | No | |
| WITH clause | No | |
### SELECT Expressions
Feature support of [sqlite expr syntax](https://www.sqlite.org/lang_expr.html).
| Syntax | Status | Comment |
|------------------------------|---------|---------|
| literals | Yes | |
| schema.table.column | Partial | Schemas aren't supported |
| unary operator | Partial | `-` supported, `+~` aren't |
| binary operator | Partial | Only `%`, `!<`, and `!>` are unsupported |
| agg() FILTER (WHERE ...) | No | Is incorrectly ignored |
| ... OVER (...) | No | Is incorrectly ignored |
| (expr) | Yes | |
| CAST (expr AS type) | Yes | |
| COLLATE | No | |
| (NOT) LIKE | No | |
| (NOT) GLOB | No | |
| (NOT) REGEXP | No | |
| (NOT) MATCH | No | |
| IS (NOT) | No | |
| IS (NOT) DISTINCT FROM | No | |
| (NOT) BETWEEN ... AND ... | No | |
| (NOT) IN (subquery) | No | |
| (NOT) EXISTS (subquery) | No | |
| CASE WHEN THEN ELSE END | Yes | |
| RAISE | No | |
## SQL functions
### Scalar functions
@ -78,7 +113,7 @@ This document describes the SQLite compatibility status of Limbo:
| ifnull(X,Y) | Yes | |
| iif(X,Y,Z) | Yes | |
| instr(X,Y) | Yes | |
| last_insert_rowid() | No | |
| last_insert_rowid() | Yes | |
| length(X) | Yes | |
| like(X,Y) | No | |
| like(X,Y,Z) | No | |
@ -139,7 +174,6 @@ This document describes the SQLite compatibility status of Limbo:
| sum(X) | Yes | |
| total(X) | Yes | |
### Date and time functions
| Function | Status | Comment |

260
Cargo.lock generated
View file

@ -40,10 +40,19 @@ dependencies = [
]
[[package]]
name = "allocator-api2"
version = "0.2.20"
name = "aligned-vec"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9"
checksum = "7e0966165eaf052580bd70eb1b32cb3d6245774c0104d1b2793e9650bf83b52a"
dependencies = [
"equator",
]
[[package]]
name = "allocator-api2"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]]
name = "anarchist-readable-name-generator-lib"
@ -126,9 +135,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.93"
version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775"
checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7"
[[package]]
name = "arrayvec"
@ -186,9 +195,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytemuck"
version = "1.19.0"
version = "1.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d"
checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a"
[[package]]
name = "byteorder"
@ -216,16 +225,16 @@ dependencies = [
"quote",
"serde",
"serde_json",
"syn 2.0.87",
"syn 2.0.90",
"tempfile",
"toml",
]
[[package]]
name = "cc"
version = "1.2.1"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47"
checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d"
dependencies = [
"shlex",
]
@ -250,9 +259,9 @@ checksum = "18758054972164c3264f7c8386f5fc6da6114cb46b619fd365d4e3b2dc3ae487"
[[package]]
name = "chrono"
version = "0.4.38"
version = "0.4.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825"
dependencies = [
"android-tzdata",
"iana-time-zone",
@ -291,9 +300,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.21"
version = "4.5.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f"
checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84"
dependencies = [
"clap_builder",
"clap_derive",
@ -301,9 +310,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.21"
version = "4.5.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec"
checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838"
dependencies = [
"anstream",
"anstyle",
@ -320,14 +329,14 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
name = "clap_lex"
version = "0.7.3"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "cli-table"
@ -421,9 +430,9 @@ dependencies = [
[[package]]
name = "cpufeatures"
version = "0.2.15"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6"
checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3"
dependencies = [
"libc",
]
@ -627,6 +636,26 @@ dependencies = [
"log",
]
[[package]]
name = "equator"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c35da53b5a021d2484a7cc49b2ac7f2d840f8236a286f84202369bd338d761ea"
dependencies = [
"equator-macro",
]
[[package]]
name = "equator-macro"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.90",
]
[[package]]
name = "equivalent"
version = "1.0.1"
@ -635,12 +664,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "errno"
version = "0.3.9"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
dependencies = [
"libc",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@ -679,9 +708,9 @@ checksum = "f8eb564c5c7423d25c886fb561d1e4ee69f72354d16918afa32c08811f6b6a55"
[[package]]
name = "fastrand"
version = "2.2.0"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "fd-lock"
@ -768,7 +797,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
@ -864,9 +893,9 @@ dependencies = [
[[package]]
name = "hashbrown"
version = "0.15.1"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
[[package]]
name = "hashlink"
@ -941,12 +970,12 @@ dependencies = [
[[package]]
name = "indexmap"
version = "2.6.0"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f"
dependencies = [
"equivalent",
"hashbrown 0.15.1",
"hashbrown 0.15.2",
"serde",
]
@ -1012,16 +1041,17 @@ dependencies = [
[[package]]
name = "itoa"
version = "1.0.11"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
[[package]]
name = "js-sys"
version = "0.3.72"
version = "0.3.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9"
checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7"
dependencies = [
"once_cell",
"wasm-bindgen",
]
@ -1058,9 +1088,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.162"
version = "0.2.168"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398"
checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d"
[[package]]
name = "libmimalloc-sys"
@ -1149,7 +1179,7 @@ dependencies = [
"sieve-cache",
"sqlite3-parser",
"tempfile",
"thiserror",
"thiserror 1.0.69",
]
[[package]]
@ -1247,9 +1277,9 @@ dependencies = [
[[package]]
name = "mockall"
version = "0.13.0"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a"
checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2"
dependencies = [
"cfg-if",
"downcast",
@ -1261,14 +1291,14 @@ dependencies = [
[[package]]
name = "mockall_derive"
version = "0.13.0"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020"
checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898"
dependencies = [
"cfg-if",
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
@ -1393,20 +1423,20 @@ dependencies = [
[[package]]
name = "pest"
version = "2.7.14"
version = "2.7.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442"
checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc"
dependencies = [
"memchr",
"thiserror",
"thiserror 2.0.6",
"ucd-trie",
]
[[package]]
name = "pest_derive"
version = "2.7.14"
version = "2.7.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd"
checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e"
dependencies = [
"pest",
"pest_generator",
@ -1414,22 +1444,22 @@ dependencies = [
[[package]]
name = "pest_generator"
version = "2.7.14"
version = "2.7.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e"
checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
name = "pest_meta"
version = "2.7.14"
version = "2.7.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d"
checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea"
dependencies = [
"once_cell",
"pest",
@ -1538,16 +1568,17 @@ dependencies = [
[[package]]
name = "portable-atomic"
version = "1.9.0"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2"
checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6"
[[package]]
name = "pprof"
version = "0.12.1"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978385d59daf9269189d052ca8a84c1acfd0715c0599a5d5188d4acc078ca46a"
checksum = "ebbe2f8898beba44815fdc9e5a4ae9c929e21c5dc29b0c774a15555f7f58d6d0"
dependencies = [
"aligned-vec",
"backtrace",
"cfg-if",
"criterion",
@ -1561,7 +1592,7 @@ dependencies = [
"smallvec",
"symbolic-demangle",
"tempfile",
"thiserror",
"thiserror 1.0.69",
]
[[package]]
@ -1601,9 +1632,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.89"
version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
dependencies = [
"unicode-ident",
]
@ -1667,7 +1698,7 @@ dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
@ -1680,7 +1711,7 @@ dependencies = [
"proc-macro2",
"pyo3-build-config",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
@ -1778,7 +1809,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43"
dependencies = [
"getrandom",
"libredox",
"thiserror",
"thiserror 1.0.69",
]
[[package]]
@ -1850,7 +1881,7 @@ dependencies = [
"regex",
"relative-path",
"rustc_version",
"syn 2.0.87",
"syn 2.0.90",
"unicode-ident",
]
@ -1885,15 +1916,15 @@ dependencies = [
[[package]]
name = "rustix"
version = "0.38.40"
version = "0.38.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0"
checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85"
dependencies = [
"bitflags 2.6.0",
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@ -1948,29 +1979,29 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
[[package]]
name = "serde"
version = "1.0.215"
version = "1.0.216"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.215"
version = "1.0.216"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
name = "serde_json"
version = "1.0.132"
version = "1.0.133"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
dependencies = [
"indexmap",
"itoa",
@ -2075,9 +2106,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "symbolic-common"
version = "12.12.1"
version = "12.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d4d73159efebfb389d819fd479afb2dbd57dcb3e3f4b7fcfa0e675f5a46c1cb"
checksum = "e5ba5365997a4e375660bed52f5b42766475d5bc8ceb1bb13fea09c469ea0f49"
dependencies = [
"debugid",
"memmap2",
@ -2087,9 +2118,9 @@ dependencies = [
[[package]]
name = "symbolic-demangle"
version = "12.12.1"
version = "12.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a767859f6549c665011970874c3f541838b4835d5aaaa493d3ee383918be9f10"
checksum = "beff338b2788519120f38c59ff4bb15174f52a183e547bac3d6072c2c0aa48aa"
dependencies = [
"cpp_demangle",
"rustc-demangle",
@ -2109,9 +2140,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.87"
version = "2.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d"
checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31"
dependencies = [
"proc-macro2",
"quote",
@ -2158,7 +2189,16 @@ version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
"thiserror-impl 1.0.69",
]
[[package]]
name = "thiserror"
version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47"
dependencies = [
"thiserror-impl 2.0.6",
]
[[package]]
@ -2169,7 +2209,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]
[[package]]
name = "thiserror-impl"
version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.90",
]
[[package]]
@ -2218,9 +2269,9 @@ dependencies = [
[[package]]
name = "tracing"
version = "0.1.40"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"pin-project-lite",
"tracing-core",
@ -2228,9 +2279,9 @@ dependencies = [
[[package]]
name = "tracing-core"
version = "0.1.32"
version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
[[package]]
name = "typenum"
@ -2255,9 +2306,9 @@ dependencies = [
[[package]]
name = "unicode-ident"
version = "1.0.13"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]]
name = "unicode-segmentation"
@ -2319,9 +2370,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e"
checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396"
dependencies = [
"cfg-if",
"once_cell",
@ -2330,24 +2381,23 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358"
checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56"
checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@ -2355,28 +2405,28 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.95"
version = "0.2.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6"
[[package]]
name = "web-sys"
version = "0.3.72"
version = "0.3.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112"
checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc"
dependencies = [
"js-sys",
"wasm-bindgen",
@ -2597,5 +2647,5 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.87",
"syn 2.0.90",
]

View file

@ -37,7 +37,7 @@
### CLI
Instal `limbo` with:
Install `limbo` with:
```
curl --proto '=https' --tlsv1.2 -LsSf \

View file

@ -30,6 +30,40 @@ test.serial("Statement.raw().all()", async (t) => {
t.deepEqual(stmt.raw().all(), expected);
});
test.serial("Statement.raw().get()", async (t) => {
const db = t.context.db;
const stmt = db.prepare("SELECT * FROM users");
const expected = [
1, "Alice", "alice@example.org"
];
t.deepEqual(stmt.raw().get(), expected);
const emptyStmt = db.prepare("SELECT * FROM users WHERE id = -1");
t.is(emptyStmt.raw().get(), undefined);
});
test.serial("Statement.raw().iterate()", async (t) => {
const db = t.context.db;
const stmt = db.prepare("SELECT * FROM users");
const expected = [
{ done: false, value: [1, "Alice", "alice@example.org"] },
{ done: false, value: [2, "Bob", "bob@example.com"] },
{ done: true, value: undefined },
];
let iter = stmt.raw().iterate();
t.is(typeof iter[Symbol.iterator], 'function');
t.deepEqual(iter.next(), expected[0])
t.deepEqual(iter.next(), expected[1])
t.deepEqual(iter.next(), expected[2])
const emptyStmt = db.prepare("SELECT * FROM users WHERE id = -1");
t.is(typeof emptyStmt[Symbol.iterator], 'undefined');
t.throws(() => emptyStmt.next(), { instanceOf: TypeError });
});
const connect = async (path_opt) => {
const path = path_opt ?? "hello.db";
const provider = process.env.PROVIDER;

View file

@ -61,6 +61,21 @@ impl Statement {
self
}
pub fn get(&self) -> JsValue {
match self.inner.borrow_mut().step() {
Ok(limbo_core::RowResult::Row(row)) => {
let row_array = js_sys::Array::new();
for value in row.values {
let value = to_js_value(value);
row_array.push(&value);
}
JsValue::from(row_array)
}
Ok(limbo_core::RowResult::IO) | Ok(limbo_core::RowResult::Done) => JsValue::UNDEFINED,
Err(e) => panic!("Error: {:?}", e),
}
}
pub fn all(&self) -> js_sys::Array {
let array = js_sys::Array::new();
loop {
@ -80,6 +95,18 @@ impl Statement {
}
array
}
pub fn iterate(&self) -> JsValue {
let all = self.all();
let iterator_fn = js_sys::Reflect::get(&all, &js_sys::Symbol::iterator())
.expect("Failed to get iterator function")
.dyn_into::<js_sys::Function>()
.expect("Symbol.iterator is not a function");
iterator_fn
.call0(&all)
.expect("Failed to call iterator function")
}
}
fn to_js_value(value: limbo_core::Value) -> JsValue {

View file

@ -20,10 +20,10 @@ path = "main.rs"
[dependencies]
anyhow = "1.0.75"
clap = { version = "4.4.0", features = ["derive"] }
clap = { version = "4.5", features = ["derive"] }
cli-table = "0.4.7"
dirs = "5.0.1"
env_logger = "0.10.1"
limbo_core = { path = "../core" }
rustyline = "12.0.0"
ctrlc = "3.4.4"
ctrlc = "3.4.4"

View file

@ -54,7 +54,7 @@ pest_derive = { version = "2.0", optional = true }
rand = "0.8.5"
[target.'cfg(not(target_family = "windows"))'.dev-dependencies]
pprof = { version = "0.12.1", features = ["criterion", "flamegraph"] }
pprof = { version = "0.14.0", features = ["criterion", "flamegraph"] }
[dev-dependencies]
criterion = { version = "0.5", features = [

View file

@ -69,6 +69,7 @@ pub enum ScalarFunc {
RTrim,
Round,
Length,
OctetLength,
Min,
Max,
Nullif,
@ -85,6 +86,7 @@ pub enum ScalarFunc {
Hex,
Unhex,
ZeroBlob,
LastInsertRowid,
}
impl Display for ScalarFunc {
@ -110,6 +112,7 @@ impl Display for ScalarFunc {
ScalarFunc::RTrim => "rtrim".to_string(),
ScalarFunc::Round => "round".to_string(),
ScalarFunc::Length => "length".to_string(),
ScalarFunc::OctetLength => "octet_length".to_string(),
ScalarFunc::Min => "min".to_string(),
ScalarFunc::Max => "max".to_string(),
ScalarFunc::Nullif => "nullif".to_string(),
@ -126,6 +129,7 @@ impl Display for ScalarFunc {
ScalarFunc::Hex => "hex".to_string(),
ScalarFunc::Unhex => "unhex".to_string(),
ScalarFunc::ZeroBlob => "zeroblob".to_string(),
ScalarFunc::LastInsertRowid => "last_insert_rowid".to_string(),
};
write!(f, "{}", str)
}
@ -189,12 +193,14 @@ impl Func {
"rtrim" => Ok(Func::Scalar(ScalarFunc::RTrim)),
"round" => Ok(Func::Scalar(ScalarFunc::Round)),
"length" => Ok(Func::Scalar(ScalarFunc::Length)),
"octet_length" => Ok(Func::Scalar(ScalarFunc::OctetLength)),
"sign" => Ok(Func::Scalar(ScalarFunc::Sign)),
"substr" => Ok(Func::Scalar(ScalarFunc::Substr)),
"substring" => Ok(Func::Scalar(ScalarFunc::Substring)),
"date" => Ok(Func::Scalar(ScalarFunc::Date)),
"time" => Ok(Func::Scalar(ScalarFunc::Time)),
"typeof" => Ok(Func::Scalar(ScalarFunc::Typeof)),
"last_insert_rowid" => Ok(Func::Scalar(ScalarFunc::LastInsertRowid)),
"unicode" => Ok(Func::Scalar(ScalarFunc::Unicode)),
"quote" => Ok(Func::Scalar(ScalarFunc::Quote)),
"sqlite_version" => Ok(Func::Scalar(ScalarFunc::SqliteVersion)),

View file

@ -20,6 +20,7 @@ use log::trace;
use schema::Schema;
use sqlite3_parser::ast;
use sqlite3_parser::{ast::Cmd, lexer::sql::Parser};
use std::cell::Cell;
use std::rc::Weak;
use std::sync::{Arc, OnceLock};
use std::{cell::RefCell, rc::Rc};
@ -105,6 +106,7 @@ impl Database {
schema: bootstrap_schema.clone(),
header: db_header.clone(),
db: Weak::new(),
last_insert_rowid: Cell::new(0),
});
let mut schema = Schema::new();
let rows = conn.query("SELECT * FROM sqlite_schema")?;
@ -125,6 +127,7 @@ impl Database {
schema: self.schema.clone(),
header: self.header.clone(),
db: Rc::downgrade(self),
last_insert_rowid: Cell::new(0),
})
}
}
@ -175,6 +178,7 @@ pub struct Connection {
schema: Rc<RefCell<Schema>>,
header: Rc<RefCell<DatabaseHeader>>,
db: Weak<Database>, // backpointer to the database holding this connection
last_insert_rowid: Cell<u64>,
}
impl Connection {
@ -310,6 +314,14 @@ impl Connection {
};
}
}
pub fn last_insert_rowid(&self) -> u64 {
self.last_insert_rowid.get()
}
fn update_last_rowid(&self, rowid: u64) {
self.last_insert_rowid.set(rowid);
}
}
pub struct Statement {

View file

@ -23,6 +23,10 @@ impl Cursor for PseudoCursor {
self.current.borrow().is_none()
}
fn root_page(&self) -> usize {
unreachable!()
}
fn rewind(&mut self) -> Result<CursorResult<()>> {
*self.current.borrow_mut() = None;
Ok(CursorResult::Ok(()))

View file

@ -1703,6 +1703,10 @@ impl Cursor for BTreeCursor {
self.record.borrow().is_none()
}
fn root_page(&self) -> usize {
self.root_page
}
fn rewind(&mut self) -> Result<CursorResult<()>> {
self.move_to_root();
@ -1772,6 +1776,7 @@ impl Cursor for BTreeCursor {
}
return_if_io!(self.insert_into_page(key, _record));
self.rowid.replace(Some(*int_key as u64));
Ok(CursorResult::Ok(()))
}

View file

@ -20,7 +20,7 @@ use super::expr::{
ConditionMetadata,
};
use super::optimizer::Optimizable;
use super::plan::{Aggregate, BTreeTableReference, Direction, Plan};
use super::plan::{Aggregate, BTreeTableReference, Direction, GroupBy, Plan};
use super::plan::{ResultSetColumn, SourceOperator};
// Metadata for handling LEFT JOIN operations
@ -282,7 +282,7 @@ fn init_order_by(
/// Initialize resources needed for GROUP BY processing
fn init_group_by(
program: &mut ProgramBuilder,
group_by: &Vec<ast::Expr>,
group_by: &GroupBy,
aggregates: &Vec<Aggregate>,
metadata: &mut Metadata,
) -> Result<()> {
@ -294,8 +294,8 @@ fn init_group_by(
let abort_flag_register = program.alloc_register();
let data_in_accumulator_indicator_register = program.alloc_register();
let group_exprs_comparison_register = program.alloc_registers(group_by.len());
let group_exprs_accumulator_register = program.alloc_registers(group_by.len());
let group_exprs_comparison_register = program.alloc_registers(group_by.exprs.len());
let group_exprs_accumulator_register = program.alloc_registers(group_by.exprs.len());
let agg_exprs_start_reg = program.alloc_registers(num_aggs);
let sorter_key_register = program.alloc_register();
@ -304,12 +304,12 @@ fn init_group_by(
let mut order = Vec::new();
const ASCENDING: i64 = 0;
for _ in group_by.iter() {
for _ in group_by.exprs.iter() {
order.push(OwnedValue::Integer(ASCENDING));
}
program.emit_insn(Insn::SorterOpen {
cursor_id: sort_cursor,
columns: aggregates.len() + group_by.len(),
columns: aggregates.len() + group_by.exprs.len(),
order: OwnedRecord::new(order),
});
@ -325,8 +325,8 @@ fn init_group_by(
);
program.emit_insn(Insn::Null {
dest: group_exprs_comparison_register,
dest_end: if group_by.len() > 1 {
Some(group_exprs_comparison_register + group_by.len() - 1)
dest_end: if group_by.exprs.len() > 1 {
Some(group_exprs_comparison_register + group_by.exprs.len() - 1)
} else {
None
},
@ -778,7 +778,7 @@ fn open_loop(
/// - a ResultRow (there is none of the above, so the loop emits a result row directly)
pub enum InnerLoopEmitTarget<'a> {
GroupBySorter {
group_by: &'a Vec<ast::Expr>,
group_by: &'a GroupBy,
aggregates: &'a Vec<Aggregate>,
},
OrderBySorter {
@ -874,7 +874,7 @@ fn inner_loop_source_emit(
group_by,
aggregates,
} => {
let sort_keys_count = group_by.len();
let sort_keys_count = group_by.exprs.len();
let aggregate_arguments_count =
aggregates.iter().map(|agg| agg.args.len()).sum::<usize>();
let column_count = sort_keys_count + aggregate_arguments_count;
@ -882,7 +882,7 @@ fn inner_loop_source_emit(
let mut cur_reg = start_reg;
// The group by sorter rows will contain the grouping keys first. They are also the sort keys.
for expr in group_by.iter() {
for expr in group_by.exprs.iter() {
let key_reg = cur_reg;
cur_reg += 1;
translate_expr(program, Some(referenced_tables), expr, key_reg, None)?;
@ -1124,7 +1124,7 @@ fn close_loop(
fn group_by_emit(
program: &mut ProgramBuilder,
result_columns: &Vec<ResultSetColumn>,
group_by: &Vec<ast::Expr>,
group_by: &GroupBy,
order_by: Option<&Vec<(ast::Expr, Direction)>>,
aggregates: &Vec<Aggregate>,
limit: Option<usize>,
@ -1153,7 +1153,7 @@ fn group_by_emit(
// all group by columns and all arguments of agg functions are in the sorter.
// the sort keys are the group by columns (the aggregation within groups is done based on how long the sort keys remain the same)
let sorter_column_count =
group_by.len() + aggregates.iter().map(|agg| agg.args.len()).sum::<usize>();
group_by.exprs.len() + aggregates.iter().map(|agg| agg.args.len()).sum::<usize>();
// sorter column names do not matter
let pseudo_columns = (0..sorter_column_count)
.map(|i| Column {
@ -1194,8 +1194,8 @@ fn group_by_emit(
});
// Read the group by columns from the pseudo cursor
let groups_start_reg = program.alloc_registers(group_by.len());
for i in 0..group_by.len() {
let groups_start_reg = program.alloc_registers(group_by.exprs.len());
for i in 0..group_by.exprs.len() {
let sorter_column_index = i;
let group_reg = groups_start_reg + i;
program.emit_insn(Insn::Column {
@ -1209,7 +1209,7 @@ fn group_by_emit(
program.emit_insn(Insn::Compare {
start_reg_a: comparison_register,
start_reg_b: groups_start_reg,
count: group_by.len(),
count: group_by.exprs.len(),
});
let agg_step_label = program.allocate_label();
@ -1232,7 +1232,7 @@ fn group_by_emit(
program.emit_insn(Insn::Move {
source_reg: groups_start_reg,
dest_reg: comparison_register,
count: group_by.len(),
count: group_by.exprs.len(),
});
program.add_comment(
@ -1269,7 +1269,7 @@ fn group_by_emit(
// Accumulate the values into the aggregations
program.resolve_label(agg_step_label, program.offset());
let start_reg = metadata.aggregation_start_register.unwrap();
let mut cursor_index = group_by.len();
let mut cursor_index = group_by.exprs.len();
for (i, agg) in aggregates.iter().enumerate() {
let agg_result_reg = start_reg + i;
translate_aggregation_groupby(
@ -1298,7 +1298,7 @@ fn group_by_emit(
);
// Read the group by columns for a finished group
for i in 0..group_by.len() {
for i in 0..group_by.exprs.len() {
let key_reg = group_exprs_start_register + i;
let sorter_column_index = i;
program.emit_insn(Insn::Column {
@ -1366,6 +1366,11 @@ fn group_by_emit(
},
termination_label,
);
let group_by_end_without_emitting_row_label = program.allocate_label();
program.defer_label_resolution(
group_by_end_without_emitting_row_label,
program.offset() as usize,
);
program.emit_insn(Insn::Return {
return_reg: group_by_metadata.subroutine_accumulator_output_return_offset_register,
});
@ -1387,14 +1392,31 @@ fn group_by_emit(
// and the agg results in (agg_start_reg..agg_start_reg + aggregates.len() - 1)
// we need to call translate_expr on each result column, but replace the expr with a register copy in case any part of the
// result column expression matches a) a group by column or b) an aggregation result.
let mut precomputed_exprs_to_register = Vec::with_capacity(aggregates.len() + group_by.len());
for (i, expr) in group_by.iter().enumerate() {
let mut precomputed_exprs_to_register =
Vec::with_capacity(aggregates.len() + group_by.exprs.len());
for (i, expr) in group_by.exprs.iter().enumerate() {
precomputed_exprs_to_register.push((expr, group_exprs_start_register + i));
}
for (i, agg) in aggregates.iter().enumerate() {
precomputed_exprs_to_register.push((&agg.original_expr, agg_start_reg + i));
}
if let Some(having) = &group_by.having {
for expr in having.iter() {
translate_condition_expr(
program,
referenced_tables,
expr,
ConditionMetadata {
jump_if_condition_is_true: false,
jump_target_when_false: group_by_end_without_emitting_row_label,
jump_target_when_true: i64::MAX, // unused
},
Some(&precomputed_exprs_to_register),
)?;
}
}
match order_by {
None => {
emit_select_result(
@ -1430,7 +1452,7 @@ fn group_by_emit(
let start_reg = group_by_metadata.group_exprs_accumulator_register;
program.emit_insn(Insn::Null {
dest: start_reg,
dest_end: Some(start_reg + group_by.len() + aggregates.len() - 1),
dest_end: Some(start_reg + group_by.exprs.len() + aggregates.len() - 1),
});
program.emit_insn(Insn::Integer {

View file

@ -698,7 +698,102 @@ pub fn translate_expr(
}
Ok(target_register)
}
ast::Expr::Case { .. } => todo!(),
ast::Expr::Case {
base,
when_then_pairs,
else_expr,
} => {
// There's two forms of CASE, one which checks a base expression for equality
// against the WHEN values, and returns the corresponding THEN value if it matches:
// CASE 2 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'many' END
// And one which evaluates a series of boolean predicates:
// CASE WHEN is_good THEN 'good' WHEN is_bad THEN 'bad' ELSE 'okay' END
// This just changes which sort of branching instruction to issue, after we
// generate the expression if needed.
let return_label = program.allocate_label();
let mut next_case_label = program.allocate_label();
// Only allocate a reg to hold the base expression if one was provided.
// And base_reg then becomes the flag we check to see which sort of
// case statement we're processing.
let base_reg = base.as_ref().map(|_| program.alloc_register());
let expr_reg = program.alloc_register();
if let Some(base_expr) = base {
translate_expr(
program,
referenced_tables,
base_expr,
base_reg.unwrap(),
precomputed_exprs_to_registers,
)?;
};
for (when_expr, then_expr) in when_then_pairs {
translate_expr(
program,
referenced_tables,
when_expr,
expr_reg,
precomputed_exprs_to_registers,
)?;
match base_reg {
// CASE 1 WHEN 0 THEN 0 ELSE 1 becomes 1==0, Ne branch to next clause
Some(base_reg) => program.emit_insn_with_label_dependency(
Insn::Ne {
lhs: base_reg,
rhs: expr_reg,
target_pc: next_case_label,
},
next_case_label,
),
// CASE WHEN 0 THEN 0 ELSE 1 becomes ifnot 0 branch to next clause
None => program.emit_insn_with_label_dependency(
Insn::IfNot {
reg: expr_reg,
target_pc: next_case_label,
null_reg: 1,
},
next_case_label,
),
};
// THEN...
translate_expr(
program,
referenced_tables,
then_expr,
target_register,
precomputed_exprs_to_registers,
)?;
program.emit_insn_with_label_dependency(
Insn::Goto {
target_pc: return_label,
},
return_label,
);
// This becomes either the next WHEN, or in the last WHEN/THEN, we're
// assured to have at least one instruction corresponding to the ELSE immediately follow.
program.preassign_label_to_next_insn(next_case_label);
next_case_label = program.allocate_label();
}
match else_expr {
Some(expr) => {
translate_expr(
program,
referenced_tables,
expr,
target_register,
precomputed_exprs_to_registers,
)?;
}
// If ELSE isn't specified, it means ELSE null.
None => {
program.emit_insn(Insn::Null {
dest: target_register,
dest_end: None,
});
}
};
program.resolve_label(return_label, program.offset());
Ok(target_register)
}
ast::Expr::Cast { expr, type_name } => {
let type_name = type_name.as_ref().unwrap(); // TODO: why is this optional?
let reg_expr = program.alloc_register();
@ -858,6 +953,16 @@ pub fn translate_expr(
Ok(target_register)
}
ScalarFunc::LastInsertRowid => {
let regs = program.alloc_register();
program.emit_insn(Insn::Function {
constant_mask: 0,
start_reg: regs,
dest: target_register,
func: func_ctx,
});
Ok(target_register)
}
ScalarFunc::Concat => {
let args = if let Some(args) = args {
args
@ -871,7 +976,6 @@ pub fn translate_expr(
for arg in args.iter() {
let reg = program.alloc_register();
start_reg = Some(start_reg.unwrap_or(reg));
translate_expr(
program,
referenced_tables,
@ -1059,6 +1163,7 @@ pub fn translate_expr(
| ScalarFunc::Lower
| ScalarFunc::Upper
| ScalarFunc::Length
| ScalarFunc::OctetLength
| ScalarFunc::Typeof
| ScalarFunc::Unicode
| ScalarFunc::Quote

View file

@ -260,16 +260,24 @@ fn eliminate_constants(
/**
Recursively pushes predicates down the tree, as far as possible.
Where a predicate is pushed determines at which loop level it will be evaluated.
For example, in SELECT * FROM t1 JOIN t2 JOIN t3 WHERE t1.a = t2.a AND t2.b = t3.b AND t1.c = 1
the predicate t1.c = 1 can be pushed to t1 and will be evaluated in the first (outermost) loop,
the predicate t1.a = t2.a can be pushed to t2 and will be evaluated in the second loop
while t2.b = t3.b will be evaluated in the third loop.
*/
fn push_predicates(
operator: &mut SourceOperator,
where_clause: &mut Option<Vec<ast::Expr>>,
referenced_tables: &Vec<BTreeTableReference>,
) -> Result<()> {
// First try to push down any predicates from the WHERE clause
if let Some(predicates) = where_clause {
let mut i = 0;
while i < predicates.len() {
// Take ownership of predicate to try pushing it down
let predicate = predicates[i].take_ownership();
// If predicate was successfully pushed (None returned), remove it from WHERE
let Some(predicate) = push_predicate(operator, predicate, referenced_tables)? else {
predicates.remove(i);
continue;
@ -277,10 +285,12 @@ fn push_predicates(
predicates[i] = predicate;
i += 1;
}
// Clean up empty WHERE clause
if predicates.is_empty() {
*where_clause = None;
}
}
match operator {
SourceOperator::Join {
left,
@ -289,6 +299,7 @@ fn push_predicates(
outer,
..
} => {
// Recursively push predicates down both sides of join
push_predicates(left, where_clause, referenced_tables)?;
push_predicates(right, where_clause, referenced_tables)?;
@ -300,34 +311,41 @@ fn push_predicates(
let mut i = 0;
while i < predicates.len() {
// try to push the predicate to the left side first, then to the right side
// temporarily take ownership of the predicate
let predicate_owned = predicates[i].take_ownership();
// left join predicates cant be pushed to the left side
// For a join like SELECT * FROM left INNER JOIN right ON left.id = right.id AND left.name = 'foo'
// the predicate 'left.name = 'foo' can already be evaluated in the outer loop (left side of join)
// because the row can immediately be skipped if left.name != 'foo'.
// But for a LEFT JOIN, we can't do this since we need to ensure that all rows from the left table are included,
// even if there are no matching rows from the right table. This is why we can't push LEFT JOIN predicates to the left side.
let push_result = if *outer {
Some(predicate_owned)
} else {
push_predicate(left, predicate_owned, referenced_tables)?
};
// if the predicate was pushed to a child, remove it from the list
// Try pushing to left side first (see comment above for reasoning)
let Some(predicate) = push_result else {
predicates.remove(i);
continue;
};
// otherwise try to push it to the right side
// if it was pushed to the right side, remove it from the list
// Then try right side
let Some(predicate) = push_predicate(right, predicate, referenced_tables)? else {
predicates.remove(i);
continue;
};
// otherwise keep the predicate in the list
// If neither side could take it, keep in join predicates (not sure if this actually happens in practice)
// this is effectively the same as pushing to the right side, so maybe it could be removed and assert here
// that we don't reach this code
predicates[i] = predicate;
i += 1;
}
Ok(())
}
// Base cases - nowhere else to push to
SourceOperator::Scan { .. } => Ok(()),
SourceOperator::Search { .. } => Ok(()),
SourceOperator::Nothing => Ok(()),
@ -349,24 +367,29 @@ fn push_predicate(
table_reference,
..
} => {
// Find position of this table in referenced_tables array
let table_index = referenced_tables
.iter()
.position(|t| t.table_identifier == table_reference.table_identifier)
.unwrap();
// Get bitmask showing which tables this predicate references
let predicate_bitmask =
get_table_ref_bitmask_for_ast_expr(referenced_tables, &predicate)?;
// the expression is allowed to refer to tables on its left, i.e. the righter bits in the mask
// e.g. if this table is 0010, and the table on its right in the join is 0100:
// if predicate_bitmask is 0011, the predicate can be pushed (refers to this table and the table on its left)
// if predicate_bitmask is 0001, the predicate can be pushed (refers to the table on its left)
// if predicate_bitmask is 0101, the predicate can't be pushed (refers to this table and a table on its right)
// Each table has a bit position based on join order from left to right
// e.g. in SELECT * FROM t1 JOIN t2 JOIN t3
// t1 is position 0 (001), t2 is position 1 (010), t3 is position 2 (100)
// To push a predicate to a given table, it can only reference that table and tables to its left
// Example: For table t2 at position 1 (bit 010):
// - Can push: 011 (t2 + t1), 001 (just t1), 010 (just t2)
// - Can't push: 110 (t2 + t3)
let next_table_on_the_right_in_join_bitmask = 1 << (table_index + 1);
if predicate_bitmask >= next_table_on_the_right_in_join_bitmask {
return Ok(Some(predicate));
}
// Add predicate to this table's filters
if predicates.is_none() {
predicates.replace(vec![predicate]);
} else {
@ -375,7 +398,8 @@ fn push_predicate(
Ok(None)
}
SourceOperator::Search { .. } => Ok(Some(predicate)),
// Search nodes don't exist yet at this point; Scans are transformed to Search in use_indexes()
SourceOperator::Search { .. } => unreachable!(),
SourceOperator::Join {
left,
right,
@ -383,31 +407,36 @@ fn push_predicate(
outer,
..
} => {
// Try pushing to left side first
let push_result_left = push_predicate(left, predicate, referenced_tables)?;
if push_result_left.is_none() {
return Ok(None);
}
// Then try right side
let push_result_right =
push_predicate(right, push_result_left.unwrap(), referenced_tables)?;
if push_result_right.is_none() {
return Ok(None);
}
// For LEFT JOIN, predicates must stay at join level
if *outer {
return Ok(Some(push_result_right.unwrap()));
}
let pred = push_result_right.unwrap();
// Get bitmasks for tables referenced in predicate and both sides of join
let table_refs_bitmask = get_table_ref_bitmask_for_ast_expr(referenced_tables, &pred)?;
let left_bitmask = get_table_ref_bitmask_for_operator(referenced_tables, left)?;
let right_bitmask = get_table_ref_bitmask_for_operator(referenced_tables, right)?;
// If predicate doesn't reference tables from both sides, it can't be a join condition
if table_refs_bitmask & left_bitmask == 0 || table_refs_bitmask & right_bitmask == 0 {
return Ok(Some(pred));
}
// Add as join predicate since it references both sides
if join_on_preds.is_none() {
join_on_preds.replace(vec![pred]);
} else {

View file

@ -8,7 +8,7 @@ use sqlite3_parser::ast;
use crate::{
function::AggFunc,
schema::{BTreeTable, Index},
schema::{BTreeTable, Column, Index},
Result,
};
@ -19,6 +19,13 @@ pub struct ResultSetColumn {
pub contains_aggregates: bool,
}
#[derive(Debug)]
pub struct GroupBy {
pub exprs: Vec<ast::Expr>,
/// having clause split into a vec at 'AND' boundaries.
pub having: Option<Vec<ast::Expr>>,
}
#[derive(Debug)]
pub struct Plan {
/// A tree of sources (tables).
@ -28,7 +35,7 @@ pub struct Plan {
/// where clause split into a vec at 'AND' boundaries.
pub where_clause: Option<Vec<ast::Expr>>,
/// group by clause
pub group_by: Option<Vec<ast::Expr>>,
pub group_by: Option<GroupBy>,
/// order by clause
pub order_by: Option<Vec<(ast::Expr, Direction)>>,
/// all the aggregates collected from the result columns, order by, and (TODO) having clauses
@ -53,6 +60,64 @@ pub enum IterationDirection {
Backwards,
}
impl SourceOperator {
pub fn select_star(&self, out_columns: &mut Vec<ResultSetColumn>) {
for (table_ref, col, idx) in self.select_star_helper() {
out_columns.push(ResultSetColumn {
expr: ast::Expr::Column {
database: None,
table: table_ref.table_index,
column: idx,
is_rowid_alias: col.primary_key,
},
contains_aggregates: false,
});
}
}
/// All this ceremony is required to deduplicate columns when joining with USING
fn select_star_helper(&self) -> Vec<(&BTreeTableReference, &Column, usize)> {
match self {
SourceOperator::Join {
left, right, using, ..
} => {
let mut columns = left.select_star_helper();
// Join columns are filtered out from the right side
// in the case of a USING join.
if let Some(using_cols) = using {
let right_columns = right.select_star_helper();
for (table_ref, col, idx) in right_columns {
if !using_cols
.iter()
.any(|using_col| col.name.eq_ignore_ascii_case(&using_col.0))
{
columns.push((table_ref, col, idx));
}
}
} else {
columns.extend(right.select_star_helper());
}
columns
}
SourceOperator::Scan {
table_reference, ..
}
| SourceOperator::Search {
table_reference, ..
} => table_reference
.table
.columns
.iter()
.enumerate()
.map(|(i, col)| (table_reference, col, i))
.collect(),
SourceOperator::Nothing => Vec::new(),
}
}
}
/**
A SourceOperator is a Node in the query plan that reads data from a table.
*/
@ -68,6 +133,7 @@ pub enum SourceOperator {
right: Box<SourceOperator>,
predicates: Option<Vec<ast::Expr>>,
outer: bool,
using: Option<ast::DistinctNames>,
},
// Scan operator
// This operator is used to scan a table.
@ -299,7 +365,7 @@ pub fn get_table_ref_bitmask_for_operator<'a>(
table_refs_mask |= 1
<< tables
.iter()
.position(|t| Rc::ptr_eq(&t.table, &table_reference.table))
.position(|t| &t.table_identifier == &table_reference.table_identifier)
.unwrap();
}
SourceOperator::Search {
@ -308,7 +374,7 @@ pub fn get_table_ref_bitmask_for_operator<'a>(
table_refs_mask |= 1
<< tables
.iter()
.position(|t| Rc::ptr_eq(&t.table, &table_reference.table))
.position(|t| &t.table_identifier == &table_reference.table_identifier)
.unwrap();
}
SourceOperator::Nothing => {}

View file

@ -1,5 +1,5 @@
use super::plan::{
Aggregate, BTreeTableReference, Direction, Plan, ResultSetColumn, SourceOperator,
Aggregate, BTreeTableReference, Direction, GroupBy, Plan, ResultSetColumn, SourceOperator,
};
use crate::{function::Func, schema::Schema, util::normalize_ident, Result};
use sqlite3_parser::ast::{self, FromClause, JoinType, ResultColumn};
@ -19,9 +19,9 @@ impl OperatorIdCounter {
}
}
fn resolve_aggregates(expr: &ast::Expr, aggs: &mut Vec<Aggregate>) {
fn resolve_aggregates(expr: &ast::Expr, aggs: &mut Vec<Aggregate>) -> bool {
if aggs.iter().any(|a| a.original_expr == *expr) {
return;
return true;
}
match expr {
ast::Expr::FunctionCall { name, args, .. } => {
@ -31,17 +31,22 @@ fn resolve_aggregates(expr: &ast::Expr, aggs: &mut Vec<Aggregate>) {
0
};
match Func::resolve_function(normalize_ident(name.0.as_str()).as_str(), args_count) {
Ok(Func::Agg(f)) => aggs.push(Aggregate {
func: f,
args: args.clone().unwrap_or_default(),
original_expr: expr.clone(),
}),
Ok(Func::Agg(f)) => {
aggs.push(Aggregate {
func: f,
args: args.clone().unwrap_or_default(),
original_expr: expr.clone(),
});
true
}
_ => {
let mut contains_aggregates = false;
if let Some(args) = args {
for arg in args.iter() {
resolve_aggregates(arg, aggs);
contains_aggregates |= resolve_aggregates(arg, aggs);
}
}
contains_aggregates
}
}
}
@ -53,15 +58,20 @@ fn resolve_aggregates(expr: &ast::Expr, aggs: &mut Vec<Aggregate>) {
func: f,
args: vec![],
original_expr: expr.clone(),
})
});
true
} else {
false
}
}
ast::Expr::Binary(lhs, _, rhs) => {
resolve_aggregates(lhs, aggs);
resolve_aggregates(rhs, aggs);
let mut contains_aggregates = false;
contains_aggregates |= resolve_aggregates(lhs, aggs);
contains_aggregates |= resolve_aggregates(rhs, aggs);
contains_aggregates
}
// TODO: handle other expressions that may contain aggregates
_ => {}
_ => false,
}
}
@ -271,19 +281,7 @@ pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result<P
for column in columns.clone() {
match column {
ast::ResultColumn::Star => {
for table_reference in plan.referenced_tables.iter() {
for (idx, col) in table_reference.table.columns.iter().enumerate() {
plan.result_columns.push(ResultSetColumn {
expr: ast::Expr::Column {
database: None, // TODO: support different databases
table: table_reference.table_index,
column: idx,
is_rowid_alias: col.primary_key,
},
contains_aggregates: false,
});
}
}
plan.source.select_star(&mut plan.result_columns);
}
ast::ResultColumn::TableStar(name) => {
let name_normalized = normalize_ident(name.0.as_str());
@ -340,10 +338,8 @@ pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result<P
});
}
Ok(_) => {
let cur_agg_count = aggregate_expressions.len();
resolve_aggregates(&expr, &mut aggregate_expressions);
let contains_aggregates =
cur_agg_count != aggregate_expressions.len();
resolve_aggregates(&expr, &mut aggregate_expressions);
plan.result_columns.push(ResultSetColumn {
expr: expr.clone(),
contains_aggregates,
@ -380,10 +376,8 @@ pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result<P
}
}
expr => {
let cur_agg_count = aggregate_expressions.len();
resolve_aggregates(expr, &mut aggregate_expressions);
let contains_aggregates =
cur_agg_count != aggregate_expressions.len();
resolve_aggregates(expr, &mut aggregate_expressions);
plan.result_columns.push(ResultSetColumn {
expr: expr.clone(),
contains_aggregates,
@ -393,18 +387,37 @@ pub fn prepare_select_plan<'a>(schema: &Schema, select: ast::Select) -> Result<P
}
}
}
if let Some(group_by) = group_by.as_mut() {
if let Some(mut group_by) = group_by {
for expr in group_by.exprs.iter_mut() {
bind_column_references(expr, &plan.referenced_tables)?;
}
if aggregate_expressions.is_empty() {
crate::bail_parse_error!(
"GROUP BY clause without aggregate functions is not allowed"
);
}
plan.group_by = Some(GroupBy {
exprs: group_by.exprs,
having: if let Some(having) = group_by.having {
let mut predicates = vec![];
break_predicate_at_and_boundaries(having, &mut predicates);
for expr in predicates.iter_mut() {
bind_column_references(expr, &plan.referenced_tables)?;
let contains_aggregates =
resolve_aggregates(expr, &mut aggregate_expressions);
if !contains_aggregates {
// TODO: sqlite allows HAVING clauses with non aggregate expressions like
// HAVING id = 5. We should support this too eventually (I guess).
// sqlite3-parser does not support HAVING without group by though, so we'll
// need to either make a PR or add it to our vendored version.
crate::bail_parse_error!(
"HAVING clause must contain an aggregate function"
);
}
}
Some(predicates)
} else {
None
},
});
}
plan.group_by = group_by.map(|g| g.exprs);
plan.aggregates = if aggregate_expressions.is_empty() {
None
} else {
@ -513,13 +526,14 @@ fn parse_from(
let mut table_index = 1;
for join in from.joins.unwrap_or_default().into_iter() {
let (right, outer, predicates) =
let (right, outer, using, predicates) =
parse_join(schema, join, operator_id_counter, &mut tables, table_index)?;
operator = SourceOperator::Join {
left: Box::new(operator),
right: Box::new(right),
predicates,
outer,
using,
id: operator_id_counter.get_next_id(),
};
table_index += 1;
@ -534,7 +548,12 @@ fn parse_join(
operator_id_counter: &mut OperatorIdCounter,
tables: &mut Vec<BTreeTableReference>,
table_index: usize,
) -> Result<(SourceOperator, bool, Option<Vec<ast::Expr>>)> {
) -> Result<(
SourceOperator,
bool,
Option<ast::DistinctNames>,
Option<Vec<ast::Expr>>,
)> {
let ast::JoinedSelectTable {
operator,
table,
@ -563,18 +582,62 @@ fn parse_join(
tables.push(table.clone());
let outer = match operator {
let (outer, natural) = match operator {
ast::JoinOperator::TypedJoin(Some(join_type)) => {
if join_type == JoinType::LEFT | JoinType::OUTER {
true
} else {
join_type == JoinType::RIGHT | JoinType::OUTER
}
let is_outer = join_type.contains(JoinType::OUTER);
let is_natural = join_type.contains(JoinType::NATURAL);
(is_outer, is_natural)
}
_ => false,
_ => (false, false),
};
let mut using = None;
let mut predicates = None;
if natural && constraint.is_some() {
crate::bail_parse_error!("NATURAL JOIN cannot be combined with ON or USING clause");
}
let constraint = if natural {
// NATURAL JOIN is first transformed into a USING join with the common columns
let left_tables = &tables[..table_index];
assert!(!left_tables.is_empty());
let right_table = &tables[table_index];
let right_cols = &right_table.table.columns;
let mut distinct_names = None;
// TODO: O(n^2) maybe not great for large tables or big multiway joins
for right_col in right_cols.iter() {
let mut found_match = false;
for left_table in left_tables.iter() {
for left_col in left_table.table.columns.iter() {
if left_col.name == right_col.name {
if distinct_names.is_none() {
distinct_names =
Some(ast::DistinctNames::new(ast::Name(left_col.name.clone())));
} else {
distinct_names
.as_mut()
.unwrap()
.insert(ast::Name(left_col.name.clone()))
.unwrap();
}
found_match = true;
break;
}
}
if found_match {
break;
}
}
}
if distinct_names.is_none() {
crate::bail_parse_error!("No columns found to NATURAL join on");
}
Some(ast::JoinConstraint::Using(distinct_names.unwrap()))
} else {
constraint
};
if let Some(constraint) = constraint {
match constraint {
ast::JoinConstraint::On(expr) => {
@ -585,7 +648,66 @@ fn parse_join(
}
predicates = Some(preds);
}
ast::JoinConstraint::Using(_) => todo!("USING joins not supported yet"),
ast::JoinConstraint::Using(distinct_names) => {
// USING join is replaced with a list of equality predicates
let mut using_predicates = vec![];
for distinct_name in distinct_names.iter() {
let name_normalized = normalize_ident(distinct_name.0.as_str());
let left_tables = &tables[..table_index];
assert!(!left_tables.is_empty());
let right_table = &tables[table_index];
let mut left_col = None;
for (left_table_idx, left_table) in left_tables.iter().enumerate() {
left_col = left_table
.table
.columns
.iter()
.enumerate()
.find(|(_, col)| col.name == name_normalized)
.map(|(idx, col)| (left_table_idx, idx, col));
if left_col.is_some() {
break;
}
}
if left_col.is_none() {
crate::bail_parse_error!(
"cannot join using column {} - column not present in all tables",
distinct_name.0
);
}
let right_col = right_table
.table
.columns
.iter()
.enumerate()
.find(|(_, col)| col.name == name_normalized);
if right_col.is_none() {
crate::bail_parse_error!(
"cannot join using column {} - column not present in all tables",
distinct_name.0
);
}
let (left_table_idx, left_col_idx, left_col) = left_col.unwrap();
let (right_col_idx, right_col) = right_col.unwrap();
using_predicates.push(ast::Expr::Binary(
Box::new(ast::Expr::Column {
database: None,
table: left_table_idx,
column: left_col_idx,
is_rowid_alias: left_col.primary_key,
}),
ast::Operator::Equals,
Box::new(ast::Expr::Column {
database: None,
table: right_table.table_index,
column: right_col_idx,
is_rowid_alias: right_col.primary_key,
}),
));
}
predicates = Some(using_predicates);
using = Some(distinct_names);
}
}
}
@ -597,6 +719,7 @@ fn parse_join(
iter_dir: None,
},
outer,
using,
predicates,
))
}

View file

@ -124,7 +124,9 @@ impl PartialOrd<OwnedValue> for OwnedValue {
(OwnedValue::Null, _) => Some(std::cmp::Ordering::Less),
(_, OwnedValue::Null) => Some(std::cmp::Ordering::Greater),
(OwnedValue::Agg(a), OwnedValue::Agg(b)) => a.partial_cmp(b),
_ => None,
(OwnedValue::Agg(a), other) => a.final_value().partial_cmp(other),
(other, OwnedValue::Agg(b)) => other.partial_cmp(b.final_value()),
other => todo!("{:?}", other),
}
}
}
@ -425,6 +427,7 @@ pub enum SeekKey<'a> {
pub trait Cursor {
fn is_empty(&self) -> bool;
fn root_page(&self) -> usize;
fn rewind(&mut self) -> Result<CursorResult<()>>;
fn last(&mut self) -> Result<CursorResult<()>>;
fn next(&mut self) -> Result<CursorResult<()>>;

View file

@ -599,6 +599,7 @@ impl ProgramState {
}
}
#[derive(Debug)]
pub struct Program {
pub max_registers: usize,
pub insns: Vec<Insn>,
@ -2112,6 +2113,14 @@ impl Program {
let result = exec_instr(reg_value, pattern_value);
state.registers[*dest] = result;
}
ScalarFunc::LastInsertRowid => {
if let Some(conn) = self.connection.upgrade() {
state.registers[*dest] =
OwnedValue::Integer(conn.last_insert_rowid() as i64);
} else {
state.registers[*dest] = OwnedValue::Null;
}
}
ScalarFunc::Like => {
let pattern = &state.registers[*start_reg];
let text = &state.registers[*start_reg + 1];
@ -2134,6 +2143,7 @@ impl Program {
| ScalarFunc::Lower
| ScalarFunc::Upper
| ScalarFunc::Length
| ScalarFunc::OctetLength
| ScalarFunc::Typeof
| ScalarFunc::Unicode
| ScalarFunc::Quote
@ -2147,6 +2157,7 @@ impl Program {
ScalarFunc::Lower => exec_lower(reg_value),
ScalarFunc::Upper => exec_upper(reg_value),
ScalarFunc::Length => Some(exec_length(reg_value)),
ScalarFunc::OctetLength => Some(exec_octet_length(reg_value)),
ScalarFunc::Typeof => Some(exec_typeof(reg_value)),
ScalarFunc::Unicode => Some(exec_unicode(reg_value)),
ScalarFunc::Quote => Some(exec_quote(reg_value)),
@ -2190,7 +2201,12 @@ impl Program {
}
ScalarFunc::Round => {
let reg_value = state.registers[*start_reg].clone();
let precision_value = state.registers.get(*start_reg + 1).cloned();
assert!(arg_count == 1 || arg_count == 2);
let precision_value = if arg_count > 1 {
Some(state.registers[*start_reg + 1].clone())
} else {
None
};
let result = exec_round(&reg_value, precision_value);
state.registers[*dest] = result;
}
@ -2315,6 +2331,14 @@ impl Program {
Insn::InsertAwait { cursor_id } => {
let cursor = cursors.get_mut(cursor_id).unwrap();
cursor.wait_for_completion()?;
// Only update last_insert_rowid for regular table inserts, not schema modifications
if cursor.root_page() != 1 {
if let Some(rowid) = cursor.rowid()? {
if let Some(conn) = self.connection.upgrade() {
conn.update_last_rowid(rowid);
}
}
}
state.pc += 1;
}
Insn::NewRowid {
@ -2533,7 +2557,7 @@ fn exec_lower(reg: &OwnedValue) -> Option<OwnedValue> {
fn exec_length(reg: &OwnedValue) -> OwnedValue {
match reg {
OwnedValue::Text(_) | OwnedValue::Integer(_) | OwnedValue::Float(_) => {
OwnedValue::Integer(reg.to_string().len() as i64)
OwnedValue::Integer(reg.to_string().chars().count() as i64)
}
OwnedValue::Blob(blob) => OwnedValue::Integer(blob.len() as i64),
OwnedValue::Agg(aggctx) => exec_length(aggctx.final_value()),
@ -2541,6 +2565,17 @@ fn exec_length(reg: &OwnedValue) -> OwnedValue {
}
}
fn exec_octet_length(reg: &OwnedValue) -> OwnedValue {
match reg {
OwnedValue::Text(_) | OwnedValue::Integer(_) | OwnedValue::Float(_) => {
OwnedValue::Integer(reg.to_string().into_bytes().len() as i64)
}
OwnedValue::Blob(blob) => OwnedValue::Integer(blob.len() as i64),
OwnedValue::Agg(aggctx) => exec_octet_length(aggctx.final_value()),
_ => reg.to_owned(),
}
}
fn exec_upper(reg: &OwnedValue) -> Option<OwnedValue> {
match reg {
OwnedValue::Text(t) => Some(OwnedValue::Text(Rc::new(t.to_uppercase()))),
@ -2555,7 +2590,10 @@ fn exec_concat(registers: &[OwnedValue]) -> OwnedValue {
OwnedValue::Text(text) => result.push_str(text),
OwnedValue::Integer(i) => result.push_str(&i.to_string()),
OwnedValue::Float(f) => result.push_str(&f.to_string()),
_ => continue,
OwnedValue::Agg(aggctx) => result.push_str(&aggctx.final_value().to_string()),
OwnedValue::Null => continue,
OwnedValue::Blob(_) => todo!("TODO concat blob"),
OwnedValue::Record(_) => unreachable!(),
}
}
OwnedValue::Text(Rc::new(result))
@ -2910,20 +2948,27 @@ fn exec_unicode(reg: &OwnedValue) -> OwnedValue {
}
}
fn _to_float(reg: &OwnedValue) -> f64 {
match reg {
OwnedValue::Text(x) => x.parse().unwrap_or(0.0),
OwnedValue::Integer(x) => *x as f64,
OwnedValue::Float(x) => *x,
_ => 0.0,
}
}
fn exec_round(reg: &OwnedValue, precision: Option<OwnedValue>) -> OwnedValue {
let precision = match precision {
Some(OwnedValue::Text(x)) => x.parse().unwrap_or(0.0),
Some(OwnedValue::Integer(x)) => x as f64,
Some(OwnedValue::Float(x)) => x,
None => 0.0,
_ => return OwnedValue::Null,
Some(OwnedValue::Null) => return OwnedValue::Null,
_ => 0.0,
};
let reg = match reg {
OwnedValue::Text(x) => x.parse().unwrap_or(0.0),
OwnedValue::Integer(x) => *x as f64,
OwnedValue::Float(x) => *x,
_ => return reg.to_owned(),
OwnedValue::Agg(ctx) => _to_float(ctx.final_value()),
_ => _to_float(reg),
};
let precision = if precision < 1.0 { 0.0 } else { precision };
@ -3228,6 +3273,10 @@ mod tests {
}
impl Cursor for MockCursor {
fn root_page(&self) -> usize {
unreachable!()
}
fn seek_to_last(&mut self) -> Result<CursorResult<()>> {
self.seek_to_last()
}
@ -3764,6 +3813,14 @@ mod tests {
let precision_val = OwnedValue::Integer(1);
let expected_val = OwnedValue::Float(123.0);
assert_eq!(exec_round(&input_val, Some(precision_val)), expected_val);
let input_val = OwnedValue::Float(100.123);
let expected_val = OwnedValue::Float(100.0);
assert_eq!(exec_round(&input_val, None), expected_val);
let input_val = OwnedValue::Float(100.123);
let expected_val = OwnedValue::Null;
assert_eq!(exec_round(&input_val, Some(OwnedValue::Null)), expected_val);
}
#[test]

View file

@ -26,6 +26,10 @@ impl Cursor for Sorter {
self.current.borrow().is_none()
}
fn root_page(&self) -> usize {
unreachable!()
}
// We do the sorting here since this is what is called by the SorterSort instruction
fn rewind(&mut self) -> Result<CursorResult<()>> {
self.records.sort_by(|a, b| {

View file

@ -4,7 +4,7 @@ version = "0.1.0"
edition = "2021"
[dependencies]
clap = { version = "4.4.2", features = ["derive"] }
clap = { version = "4.5", features = ["derive"] }
env_logger = "0.11.0"
hdrhistogram = "7.5.2"
limbo_core = { path = "../../../core" }

View file

@ -4,7 +4,7 @@ version = "0.1.0"
edition = "2021"
[dependencies]
clap = { version = "4.4.2", features = ["derive"] }
clap = { version = "4.5", features = ["derive"] }
hdrhistogram = "7.5.2"
rusqlite = "0.29.0"

View file

@ -72,6 +72,7 @@ def get_pr_info(g, repo, pr_number):
'title': pr.title,
'author': author_name,
'head': pr.head.ref,
'head_sha': pr.head.sha,
'body': pr.body.strip() if pr.body else '',
'reviewed_by': reviewed_by
}
@ -123,35 +124,34 @@ def merge_pr(pr_number):
temp_file.write(commit_message)
temp_file_path = temp_file.name
# Fetch the PR branch
cmd = f"git fetch origin pull/{pr_number}/head:{pr_info['head']}"
output, error, returncode = run_command(cmd)
if returncode != 0:
print(f"Error fetching PR branch: {error}")
try:
# Instead of fetching to a branch, fetch the specific commit
cmd = f"git fetch origin pull/{pr_number}/head"
output, error, returncode = run_command(cmd)
if returncode != 0:
print(f"Error fetching PR: {error}")
sys.exit(1)
# Checkout main branch
cmd = "git checkout main"
output, error, returncode = run_command(cmd)
if returncode != 0:
print(f"Error checking out main branch: {error}")
sys.exit(1)
# Merge using the commit SHA instead of branch name
cmd = f"git merge --no-ff {pr_info['head_sha']} -F {temp_file_path}"
output, error, returncode = run_command(cmd)
if returncode != 0:
print(f"Error merging PR: {error}")
sys.exit(1)
print("Pull request merged successfully!")
print(f"Merge commit message:\n{commit_message}")
finally:
# Clean up the temporary file
os.unlink(temp_file_path)
sys.exit(1)
# Checkout main branch
cmd = "git checkout main"
output, error, returncode = run_command(cmd)
if returncode != 0:
print(f"Error checking out main branch: {error}")
os.unlink(temp_file_path)
sys.exit(1)
# Merge the PR
cmd = f"git merge --no-ff {pr_info['head']} -F {temp_file_path}"
output, error, returncode = run_command(cmd)
if returncode != 0:
print(f"Error merging PR: {error}")
os.unlink(temp_file_path)
sys.exit(1)
# Clean up the temporary file
os.unlink(temp_file_path)
print("Pull request merged successfully!")
print(f"Merge commit message:\n{commit_message}")
if __name__ == "__main__":
if len(sys.argv) != 2:

View file

@ -14,7 +14,7 @@ path = "src/lib.rs"
[dependencies]
anyhow = "1.0.75"
clap = { version = "4.4.0", features = ["derive"] }
clap = { version = "4.5", features = ["derive"] }
dirs = "5.0.1"
env_logger = "0.10.1"
limbo_core = { path = "../core" }

View file

@ -410,4 +410,83 @@ mod tests {
}
Ok(())
}
#[test]
fn test_last_insert_rowid_basic() -> anyhow::Result<()> {
let _ = env_logger::try_init();
let tmp_db =
TempDatabase::new("CREATE TABLE test_rowid (id INTEGER PRIMARY KEY, val TEXT);");
let conn = tmp_db.connect_limbo();
// Simple insert
let mut insert_query =
conn.query("INSERT INTO test_rowid (id, val) VALUES (NULL, 'test1')")?;
if let Some(ref mut rows) = insert_query {
loop {
match rows.next_row()? {
RowResult::IO => {
tmp_db.io.run_once()?;
}
RowResult::Done => break,
_ => unreachable!(),
}
}
}
// Check last_insert_rowid separately
let mut select_query = conn.query("SELECT last_insert_rowid()")?;
if let Some(ref mut rows) = select_query {
loop {
match rows.next_row()? {
RowResult::Row(row) => {
if let Value::Integer(id) = row.values[0] {
assert_eq!(id, 1, "First insert should have rowid 1");
}
}
RowResult::IO => {
tmp_db.io.run_once()?;
}
RowResult::Done => break,
}
}
}
// Test explicit rowid
match conn.query("INSERT INTO test_rowid (id, val) VALUES (5, 'test2')") {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
RowResult::IO => {
tmp_db.io.run_once()?;
}
RowResult::Done => break,
_ => unreachable!(),
}
},
Ok(None) => {}
Err(err) => eprintln!("{}", err),
};
// Check last_insert_rowid after explicit id
let mut last_id = 0;
match conn.query("SELECT last_insert_rowid()") {
Ok(Some(ref mut rows)) => loop {
match rows.next_row()? {
RowResult::Row(row) => {
if let Value::Integer(id) = row.values[0] {
last_id = id;
}
}
RowResult::IO => {
tmp_db.io.run_once()?;
}
RowResult::Done => break,
}
},
Ok(None) => {}
Err(err) => eprintln!("{}", err),
};
assert_eq!(last_id, 5, "Explicit insert should have rowid 5");
do_flush(&conn, &tmp_db)?;
Ok(())
}
}

View file

@ -130,4 +130,35 @@ do_execsql_test group_by_function_expression_ridiculous {
do_execsql_test group_by_count_star {
select u.first_name, count(*) from users u group by u.first_name limit 1;
} {Aaron|41}
} {Aaron|41}
do_execsql_test having {
select u.first_name, round(avg(u.age)) from users u group by u.first_name having avg(u.age) > 97 order by avg(u.age) desc limit 5;
} {Nina|100.0
Kurt|99.0
Selena|98.0}
do_execsql_test having_with_binary_cond {
select u.first_name, sum(u.age) from users u group by u.first_name having sum(u.age) + 1000 = 9109;
} {Robert|8109}
do_execsql_test having_with_scalar_fn_over_aggregate {
select u.first_name, concat(count(1), ' people with this name') from users u group by u.first_name having count(1) > 50 order by count(1) asc limit 5;
} {"Angela|51 people with this name
Justin|51 people with this name
Rachel|52 people with this name
Susan|52 people with this name
Jeffrey|54 people with this name"}
do_execsql_test having_with_multiple_conditions {
select u.first_name, count(*), round(avg(u.age)) as avg_age
from users u
group by u.first_name
having count(*) > 40 and avg(u.age) > 40
order by count(*) desc, avg(u.age) desc
limit 5;
} {Michael|228|49.0
David|165|53.0
Robert|159|51.0
Jennifer|151|51.0
John|145|50.0}

View file

@ -1,4 +1,3 @@
#!/usr/bin/env tclsh
set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/tester.tcl

View file

@ -212,4 +212,38 @@ do_execsql_test join-utilizing-both-seekrowid-and-secondary-index {
select u.first_name, p.name from users u join products p on u.id = p.id and u.age > 70;
} {Matthew|boots
Nicholas|shorts
Jamie|hat}
Jamie|hat}
# important difference between regular SELECT * join and a SELECT * USING join is that the join keys are deduplicated
# from the result in the USING case.
do_execsql_test join-using {
select * from users join products using (id) limit 3;
} {"1|Jamie|Foster|dylan00@example.com|496-522-9493|62375 Johnson Rest Suite 322|West Lauriestad|IL|35865|94|hat|79.0
2|Cindy|Salazar|williamsrebecca@example.com|287-934-1135|75615 Stacey Shore|South Stephanie|NC|85181|37|cap|82.0
3|Tommy|Perry|warechristopher@example.org|001-288-554-8139x0276|2896 Paul Fall Apt. 972|Michaelborough|VA|15691|18|shirt|18.0"}
do_execsql_test join-using-multiple {
select u.first_name, u.last_name, p.name from users u join users u2 using(id) join products p using(id) limit 3;
} {"Jamie|Foster|hat
Cindy|Salazar|cap
Tommy|Perry|shirt"}
# NATURAL JOIN desugars to JOIN USING (common_column1, common_column2...)
do_execsql_test join-using {
select * from users natural join products limit 3;
} {"1|Jamie|Foster|dylan00@example.com|496-522-9493|62375 Johnson Rest Suite 322|West Lauriestad|IL|35865|94|hat|79.0
2|Cindy|Salazar|williamsrebecca@example.com|287-934-1135|75615 Stacey Shore|South Stephanie|NC|85181|37|cap|82.0
3|Tommy|Perry|warechristopher@example.org|001-288-554-8139x0276|2896 Paul Fall Apt. 972|Michaelborough|VA|15691|18|shirt|18.0"}
do_execsql_test natural-join-multiple {
select u.first_name, u2.last_name, p.name from users u natural join users u2 natural join products p limit 3;
} {"Jamie|Foster|hat
Cindy|Salazar|cap
Tommy|Perry|shirt"}
# have to be able to join between 1st table and 3rd table as well
do_execsql_test natural-join-and-using-join {
select u.id, u2.id, p.id from users u natural join products p join users u2 using (first_name) limit 3;
} {"1|1|1
1|1204|1
1|1261|1"}

View file

@ -351,6 +351,10 @@ do_execsql_test length-text {
SELECT length('limbo');
} {5}
do_execsql_test lenght-text-utf8-chars {
SELECT length('ąłóżźć');
} {6}
do_execsql_test length-integer {
SELECT length(12345);
} {5}
@ -367,8 +371,32 @@ do_execsql_test length-empty-text {
SELECT length('');
} {0}
do_execsql_test length-date-binary-expr {
select length(date('now')) = 10;
do_execsql_test octet-length-text {
SELECT length('limbo');
} {5}
do_execsql_test octet-lenght-text-utf8-chars {
SELECT octet_length('ąłóżźć');
} {12}
do_execsql_test octet-length-integer {
SELECT octet_length(12345);
} {5}
do_execsql_test octet-length-float {
SELECT octet_length(123.456);
} {7}
do_execsql_test octet-length-null {
SELECT octet_length(NULL);
} {}
do_execsql_test octet-length-empty-text {
SELECT octet_length('');
} {0}
do_execsql_test octet-length-date-binary-expr {
select octet_length(date('now')) = 10;
} {1}
do_execsql_test min-number {

View file

@ -57,4 +57,20 @@ do_execsql_test seekrowid {
do_execsql_test select_parenthesized {
select (price + 100) from products limit 1;
} {179.0}
} {179.0}
do_execsql_test select_case_base_else {
select case when 0 then 'false' when 1 then 'true' else 'null' end;
} {true}
do_execsql_test select_case_noelse_null {
select case when 0 then 0 end;
} {}
do_execsql_test select_base_case_else {
select case 1 when 0 then 'zero' when 1 then 'one' else 'two' end;
} {one}
do_execsql_test select_base_case_noelse_null {
select case 'null else' when 0 then 0 when 1 then 1 end;
} {}